mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (46 commits) cnic: Fix NETDEV_UP event processing. uvesafb/connector: Disallow unpliviged users to send netlink packets pohmelfs/connector: Disallow unpliviged users to configure pohmelfs dst/connector: Disallow unpliviged users to configure dst dm/connector: Only process connector packages from privileged processes connector: Removed the destruct_data callback since it is always kfree_skb() connector/dm: Fixed a compilation warning connector: Provide the sender's credentials to the callback connector: Keep the skb in cn_callback_data e1000e/igb/ixgbe: Don't report an error if devices don't support AER net: Fix wrong sizeof net: splice() from tcp to pipe should take into account O_NONBLOCK net: Use sk_mark for routing lookup in more places sky2: irqname based on pci address skge: use unique IRQ name IPv4 TCP fails to send window scale option when window scale is zero net/ipv4/tcp.c: fix min() type mismatch warning Kconfig: STRIP: Remove stale bits of STRIP help text NET: mkiss: Fix typo tg3: Remove prev_vlan_tag from struct tx_ring_info ...
This commit is contained in:
commit
90d5ffc729
54 changed files with 2158 additions and 269 deletions
|
@ -34,7 +34,7 @@ static char cn_test_name[] = "cn_test";
|
||||||
static struct sock *nls;
|
static struct sock *nls;
|
||||||
static struct timer_list cn_test_timer;
|
static struct timer_list cn_test_timer;
|
||||||
|
|
||||||
static void cn_test_callback(struct cn_msg *msg)
|
static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
|
||||||
{
|
{
|
||||||
pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n",
|
pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n",
|
||||||
__func__, jiffies, msg->id.idx, msg->id.val,
|
__func__, jiffies, msg->id.idx, msg->id.val,
|
||||||
|
|
|
@ -23,7 +23,7 @@ handling, etc... The Connector driver allows any kernelspace agents to use
|
||||||
netlink based networking for inter-process communication in a significantly
|
netlink based networking for inter-process communication in a significantly
|
||||||
easier way:
|
easier way:
|
||||||
|
|
||||||
int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *));
|
int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
|
||||||
void cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask);
|
void cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask);
|
||||||
|
|
||||||
struct cb_id
|
struct cb_id
|
||||||
|
@ -53,15 +53,15 @@ struct cn_msg
|
||||||
Connector interfaces.
|
Connector interfaces.
|
||||||
/*****************************************/
|
/*****************************************/
|
||||||
|
|
||||||
int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *));
|
int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
|
||||||
|
|
||||||
Registers new callback with connector core.
|
Registers new callback with connector core.
|
||||||
|
|
||||||
struct cb_id *id - unique connector's user identifier.
|
struct cb_id *id - unique connector's user identifier.
|
||||||
It must be registered in connector.h for legal in-kernel users.
|
It must be registered in connector.h for legal in-kernel users.
|
||||||
char *name - connector's callback symbolic name.
|
char *name - connector's callback symbolic name.
|
||||||
void (*callback) (void *) - connector's callback.
|
void (*callback) (struct cn..) - connector's callback.
|
||||||
Argument must be dereferenced to struct cn_msg *.
|
cn_msg and the sender's credentials
|
||||||
|
|
||||||
|
|
||||||
void cn_del_callback(struct cb_id *id);
|
void cn_del_callback(struct cb_id *id);
|
||||||
|
|
|
@ -381,7 +381,7 @@ int main(int argc, char **argv)
|
||||||
memset(&hwtstamp, 0, sizeof(hwtstamp));
|
memset(&hwtstamp, 0, sizeof(hwtstamp));
|
||||||
strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name));
|
strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name));
|
||||||
hwtstamp.ifr_data = (void *)&hwconfig;
|
hwtstamp.ifr_data = (void *)&hwconfig;
|
||||||
memset(&hwconfig, 0, sizeof(&hwconfig));
|
memset(&hwconfig, 0, sizeof(hwconfig));
|
||||||
hwconfig.tx_type =
|
hwconfig.tx_type =
|
||||||
(so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
|
(so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
|
||||||
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
|
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
|
||||||
|
|
|
@ -78,18 +78,20 @@ void cn_queue_wrapper(struct work_struct *work)
|
||||||
struct cn_callback_entry *cbq =
|
struct cn_callback_entry *cbq =
|
||||||
container_of(work, struct cn_callback_entry, work);
|
container_of(work, struct cn_callback_entry, work);
|
||||||
struct cn_callback_data *d = &cbq->data;
|
struct cn_callback_data *d = &cbq->data;
|
||||||
|
struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
|
||||||
|
struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
|
||||||
|
|
||||||
d->callback(d->callback_priv);
|
d->callback(msg, nsp);
|
||||||
|
|
||||||
d->destruct_data(d->ddata);
|
kfree_skb(d->skb);
|
||||||
d->ddata = NULL;
|
d->skb = NULL;
|
||||||
|
|
||||||
kfree(d->free);
|
kfree(d->free);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cn_callback_entry *
|
static struct cn_callback_entry *
|
||||||
cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
|
cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
|
||||||
void (*callback)(struct cn_msg *))
|
void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
|
||||||
{
|
{
|
||||||
struct cn_callback_entry *cbq;
|
struct cn_callback_entry *cbq;
|
||||||
|
|
||||||
|
@ -123,7 +125,7 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
|
||||||
}
|
}
|
||||||
|
|
||||||
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
|
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
|
||||||
void (*callback)(struct cn_msg *))
|
void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
|
||||||
{
|
{
|
||||||
struct cn_callback_entry *cbq, *__cbq;
|
struct cn_callback_entry *cbq, *__cbq;
|
||||||
int found = 0;
|
int found = 0;
|
||||||
|
|
|
@ -129,21 +129,19 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
|
||||||
/*
|
/*
|
||||||
* Callback helper - queues work and setup destructor for given data.
|
* Callback helper - queues work and setup destructor for given data.
|
||||||
*/
|
*/
|
||||||
static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data)
|
static int cn_call_callback(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct cn_callback_entry *__cbq, *__new_cbq;
|
struct cn_callback_entry *__cbq, *__new_cbq;
|
||||||
struct cn_dev *dev = &cdev;
|
struct cn_dev *dev = &cdev;
|
||||||
|
struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
|
||||||
int err = -ENODEV;
|
int err = -ENODEV;
|
||||||
|
|
||||||
spin_lock_bh(&dev->cbdev->queue_lock);
|
spin_lock_bh(&dev->cbdev->queue_lock);
|
||||||
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
|
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
|
||||||
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
|
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
|
||||||
if (likely(!work_pending(&__cbq->work) &&
|
if (likely(!work_pending(&__cbq->work) &&
|
||||||
__cbq->data.ddata == NULL)) {
|
__cbq->data.skb == NULL)) {
|
||||||
__cbq->data.callback_priv = msg;
|
__cbq->data.skb = skb;
|
||||||
|
|
||||||
__cbq->data.ddata = data;
|
|
||||||
__cbq->data.destruct_data = destruct_data;
|
|
||||||
|
|
||||||
if (queue_cn_work(__cbq, &__cbq->work))
|
if (queue_cn_work(__cbq, &__cbq->work))
|
||||||
err = 0;
|
err = 0;
|
||||||
|
@ -156,10 +154,8 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
|
||||||
__new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
|
__new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
|
||||||
if (__new_cbq) {
|
if (__new_cbq) {
|
||||||
d = &__new_cbq->data;
|
d = &__new_cbq->data;
|
||||||
d->callback_priv = msg;
|
d->skb = skb;
|
||||||
d->callback = __cbq->data.callback;
|
d->callback = __cbq->data.callback;
|
||||||
d->ddata = data;
|
|
||||||
d->destruct_data = destruct_data;
|
|
||||||
d->free = __new_cbq;
|
d->free = __new_cbq;
|
||||||
|
|
||||||
__new_cbq->pdev = __cbq->pdev;
|
__new_cbq->pdev = __cbq->pdev;
|
||||||
|
@ -191,7 +187,6 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
|
||||||
*/
|
*/
|
||||||
static void cn_rx_skb(struct sk_buff *__skb)
|
static void cn_rx_skb(struct sk_buff *__skb)
|
||||||
{
|
{
|
||||||
struct cn_msg *msg;
|
|
||||||
struct nlmsghdr *nlh;
|
struct nlmsghdr *nlh;
|
||||||
int err;
|
int err;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
@ -208,8 +203,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = NLMSG_DATA(nlh);
|
err = cn_call_callback(skb);
|
||||||
err = cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
}
|
}
|
||||||
|
@ -270,7 +264,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
|
||||||
* May sleep.
|
* May sleep.
|
||||||
*/
|
*/
|
||||||
int cn_add_callback(struct cb_id *id, char *name,
|
int cn_add_callback(struct cb_id *id, char *name,
|
||||||
void (*callback)(struct cn_msg *))
|
void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
struct cn_dev *dev = &cdev;
|
struct cn_dev *dev = &cdev;
|
||||||
|
@ -352,7 +346,7 @@ static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
|
||||||
*
|
*
|
||||||
* Used for notification of a request's processing.
|
* Used for notification of a request's processing.
|
||||||
*/
|
*/
|
||||||
static void cn_callback(struct cn_msg *msg)
|
static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
|
||||||
{
|
{
|
||||||
struct cn_ctl_msg *ctl;
|
struct cn_ctl_msg *ctl;
|
||||||
struct cn_ctl_entry *ent;
|
struct cn_ctl_entry *ent;
|
||||||
|
|
|
@ -129,11 +129,13 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
|
||||||
* This is the connector callback that delivers data
|
* This is the connector callback that delivers data
|
||||||
* that was sent from userspace.
|
* that was sent from userspace.
|
||||||
*/
|
*/
|
||||||
static void cn_ulog_callback(void *data)
|
static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
|
||||||
{
|
{
|
||||||
struct cn_msg *msg = (struct cn_msg *)data;
|
|
||||||
struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
|
struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
|
||||||
|
|
||||||
|
if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock(&receiving_list_lock);
|
spin_lock(&receiving_list_lock);
|
||||||
if (msg->len == 0)
|
if (msg->len == 0)
|
||||||
fill_pkg(msg, NULL);
|
fill_pkg(msg, NULL);
|
||||||
|
|
|
@ -805,52 +805,54 @@ static void poll_vortex(struct net_device *dev)
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
|
|
||||||
static int vortex_suspend(struct pci_dev *pdev, pm_message_t state)
|
static int vortex_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct net_device *dev = pci_get_drvdata(pdev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
|
struct net_device *ndev = pci_get_drvdata(pdev);
|
||||||
|
|
||||||
|
if (!ndev || !netif_running(ndev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
netif_device_detach(ndev);
|
||||||
|
vortex_down(ndev, 1);
|
||||||
|
|
||||||
if (dev && netdev_priv(dev)) {
|
|
||||||
if (netif_running(dev)) {
|
|
||||||
netif_device_detach(dev);
|
|
||||||
vortex_down(dev, 1);
|
|
||||||
disable_irq(dev->irq);
|
|
||||||
}
|
|
||||||
pci_save_state(pdev);
|
|
||||||
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
|
|
||||||
pci_disable_device(pdev);
|
|
||||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vortex_resume(struct pci_dev *pdev)
|
static int vortex_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct net_device *dev = pci_get_drvdata(pdev);
|
struct pci_dev *pdev = to_pci_dev(dev);
|
||||||
struct vortex_private *vp = netdev_priv(dev);
|
struct net_device *ndev = pci_get_drvdata(pdev);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (dev && vp) {
|
if (!ndev || !netif_running(ndev))
|
||||||
pci_set_power_state(pdev, PCI_D0);
|
return 0;
|
||||||
pci_restore_state(pdev);
|
|
||||||
err = pci_enable_device(pdev);
|
err = vortex_up(ndev);
|
||||||
if (err) {
|
if (err)
|
||||||
pr_warning("%s: Could not enable device\n",
|
return err;
|
||||||
dev->name);
|
|
||||||
return err;
|
netif_device_attach(ndev);
|
||||||
}
|
|
||||||
pci_set_master(pdev);
|
|
||||||
if (netif_running(dev)) {
|
|
||||||
err = vortex_up(dev);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
enable_irq(dev->irq);
|
|
||||||
netif_device_attach(dev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_PM */
|
static struct dev_pm_ops vortex_pm_ops = {
|
||||||
|
.suspend = vortex_suspend,
|
||||||
|
.resume = vortex_resume,
|
||||||
|
.freeze = vortex_suspend,
|
||||||
|
.thaw = vortex_resume,
|
||||||
|
.poweroff = vortex_suspend,
|
||||||
|
.restore = vortex_resume,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define VORTEX_PM_OPS (&vortex_pm_ops)
|
||||||
|
|
||||||
|
#else /* !CONFIG_PM */
|
||||||
|
|
||||||
|
#define VORTEX_PM_OPS NULL
|
||||||
|
|
||||||
|
#endif /* !CONFIG_PM */
|
||||||
|
|
||||||
#ifdef CONFIG_EISA
|
#ifdef CONFIG_EISA
|
||||||
static struct eisa_device_id vortex_eisa_ids[] = {
|
static struct eisa_device_id vortex_eisa_ids[] = {
|
||||||
|
@ -3199,10 +3201,7 @@ static struct pci_driver vortex_driver = {
|
||||||
.probe = vortex_init_one,
|
.probe = vortex_init_one,
|
||||||
.remove = __devexit_p(vortex_remove_one),
|
.remove = __devexit_p(vortex_remove_one),
|
||||||
.id_table = vortex_pci_tbl,
|
.id_table = vortex_pci_tbl,
|
||||||
#ifdef CONFIG_PM
|
.driver.pm = VORTEX_PM_OPS,
|
||||||
.suspend = vortex_suspend,
|
|
||||||
.resume = vortex_resume,
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1738,6 +1738,13 @@ config KS8851
|
||||||
help
|
help
|
||||||
SPI driver for Micrel KS8851 SPI attached network chip.
|
SPI driver for Micrel KS8851 SPI attached network chip.
|
||||||
|
|
||||||
|
config KS8851_MLL
|
||||||
|
tristate "Micrel KS8851 MLL"
|
||||||
|
depends on HAS_IOMEM
|
||||||
|
help
|
||||||
|
This platform driver is for Micrel KS8851 Address/data bus
|
||||||
|
multiplexed network chip.
|
||||||
|
|
||||||
config VIA_RHINE
|
config VIA_RHINE
|
||||||
tristate "VIA Rhine support"
|
tristate "VIA Rhine support"
|
||||||
depends on NET_PCI && PCI
|
depends on NET_PCI && PCI
|
||||||
|
|
|
@ -89,6 +89,7 @@ obj-$(CONFIG_SKY2) += sky2.o
|
||||||
obj-$(CONFIG_SKFP) += skfp/
|
obj-$(CONFIG_SKFP) += skfp/
|
||||||
obj-$(CONFIG_KS8842) += ks8842.o
|
obj-$(CONFIG_KS8842) += ks8842.o
|
||||||
obj-$(CONFIG_KS8851) += ks8851.o
|
obj-$(CONFIG_KS8851) += ks8851.o
|
||||||
|
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
|
||||||
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
|
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
|
||||||
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
|
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
|
||||||
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
|
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
|
||||||
|
|
|
@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
|
||||||
if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
|
if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
|
||||||
break;
|
break;
|
||||||
udelay(1);
|
udelay(1);
|
||||||
} while (limit-- >= 0);
|
} while (limit-- > 0);
|
||||||
|
|
||||||
return (limit < 0) ? 1 : 0;
|
return (limit < 0) ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,6 +258,7 @@ struct be_adapter {
|
||||||
bool link_up;
|
bool link_up;
|
||||||
u32 port_num;
|
u32 port_num;
|
||||||
bool promiscuous;
|
bool promiscuous;
|
||||||
|
u32 cap;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const struct ethtool_ops be_ethtool_ops;
|
extern const struct ethtool_ops be_ethtool_ops;
|
||||||
|
|
|
@ -1068,7 +1068,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Uses mbox */
|
/* Uses mbox */
|
||||||
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
|
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
|
||||||
{
|
{
|
||||||
struct be_mcc_wrb *wrb;
|
struct be_mcc_wrb *wrb;
|
||||||
struct be_cmd_req_query_fw_cfg *req;
|
struct be_cmd_req_query_fw_cfg *req;
|
||||||
|
@ -1088,6 +1088,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
|
||||||
if (!status) {
|
if (!status) {
|
||||||
struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
|
struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
|
||||||
*port_num = le32_to_cpu(resp->phys_port);
|
*port_num = le32_to_cpu(resp->phys_port);
|
||||||
|
*cap = le32_to_cpu(resp->function_cap);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&adapter->mbox_lock);
|
spin_unlock(&adapter->mbox_lock);
|
||||||
|
|
|
@ -760,7 +760,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
|
||||||
u32 tx_fc, u32 rx_fc);
|
u32 tx_fc, u32 rx_fc);
|
||||||
extern int be_cmd_get_flow_control(struct be_adapter *adapter,
|
extern int be_cmd_get_flow_control(struct be_adapter *adapter,
|
||||||
u32 *tx_fc, u32 *rx_fc);
|
u32 *tx_fc, u32 *rx_fc);
|
||||||
extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num);
|
extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
|
||||||
|
u32 *port_num, u32 *cap);
|
||||||
extern int be_cmd_reset_function(struct be_adapter *adapter);
|
extern int be_cmd_reset_function(struct be_adapter *adapter);
|
||||||
extern int be_process_mcc(struct be_adapter *adapter);
|
extern int be_process_mcc(struct be_adapter *adapter);
|
||||||
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
|
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
|
||||||
|
|
|
@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
|
||||||
struct be_eth_rx_compl *rxcp)
|
struct be_eth_rx_compl *rxcp)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u32 vtp, vid;
|
u32 vlanf, vid;
|
||||||
|
u8 vtm;
|
||||||
|
|
||||||
vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
|
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
|
||||||
|
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
|
||||||
|
|
||||||
|
/* vlanf could be wrongly set in some cards.
|
||||||
|
* ignore if vtm is not set */
|
||||||
|
if ((adapter->cap == 0x400) && !vtm)
|
||||||
|
vlanf = 0;
|
||||||
|
|
||||||
skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
|
skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
|
@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
|
||||||
skb->protocol = eth_type_trans(skb, adapter->netdev);
|
skb->protocol = eth_type_trans(skb, adapter->netdev);
|
||||||
skb->dev = adapter->netdev;
|
skb->dev = adapter->netdev;
|
||||||
|
|
||||||
if (vtp) {
|
if (vlanf) {
|
||||||
if (!adapter->vlan_grp || adapter->num_vlans == 0) {
|
if (!adapter->vlan_grp || adapter->num_vlans == 0) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return;
|
return;
|
||||||
|
@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
|
||||||
struct be_eq_obj *eq_obj = &adapter->rx_eq;
|
struct be_eq_obj *eq_obj = &adapter->rx_eq;
|
||||||
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
|
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
|
||||||
u16 i, rxq_idx = 0, vid, j;
|
u16 i, rxq_idx = 0, vid, j;
|
||||||
|
u8 vtm;
|
||||||
|
|
||||||
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
|
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
|
||||||
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
|
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
|
||||||
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
|
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
|
||||||
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
|
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
|
||||||
|
vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
|
||||||
|
|
||||||
|
/* vlanf could be wrongly set in some cards.
|
||||||
|
* ignore if vtm is not set */
|
||||||
|
if ((adapter->cap == 0x400) && !vtm)
|
||||||
|
vlanf = 0;
|
||||||
|
|
||||||
skb = napi_get_frags(&eq_obj->napi);
|
skb = napi_get_frags(&eq_obj->napi);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
|
@ -2045,7 +2059,8 @@ static int be_hw_up(struct be_adapter *adapter)
|
||||||
if (status)
|
if (status)
|
||||||
return status;
|
return status;
|
||||||
|
|
||||||
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num);
|
status = be_cmd_query_fw_cfg(adapter,
|
||||||
|
&adapter->port_num, &adapter->cap);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1182,6 +1182,7 @@ static ssize_t bonding_store_primary(struct device *d,
|
||||||
": %s: Setting %s as primary slave.\n",
|
": %s: Setting %s as primary slave.\n",
|
||||||
bond->dev->name, slave->dev->name);
|
bond->dev->name, slave->dev->name);
|
||||||
bond->primary_slave = slave;
|
bond->primary_slave = slave;
|
||||||
|
strcpy(bond->params.primary, slave->dev->name);
|
||||||
bond_select_active_slave(bond);
|
bond_select_active_slave(bond);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
|
||||||
cnic_ulp_init(dev);
|
cnic_ulp_init(dev);
|
||||||
else if (event == NETDEV_UNREGISTER)
|
else if (event == NETDEV_UNREGISTER)
|
||||||
cnic_ulp_exit(dev);
|
cnic_ulp_exit(dev);
|
||||||
else if (event == NETDEV_UP) {
|
|
||||||
|
if (event == NETDEV_UP) {
|
||||||
if (cnic_register_netdev(dev) != 0) {
|
if (cnic_register_netdev(dev) != 0) {
|
||||||
cnic_put(dev);
|
cnic_put(dev);
|
||||||
goto done;
|
goto done;
|
||||||
|
|
|
@ -12,8 +12,8 @@
|
||||||
#ifndef CNIC_IF_H
|
#ifndef CNIC_IF_H
|
||||||
#define CNIC_IF_H
|
#define CNIC_IF_H
|
||||||
|
|
||||||
#define CNIC_MODULE_VERSION "2.0.0"
|
#define CNIC_MODULE_VERSION "2.0.1"
|
||||||
#define CNIC_MODULE_RELDATE "May 21, 2009"
|
#define CNIC_MODULE_RELDATE "Oct 01, 2009"
|
||||||
|
|
||||||
#define CNIC_ULP_RDMA 0
|
#define CNIC_ULP_RDMA 0
|
||||||
#define CNIC_ULP_ISCSI 1
|
#define CNIC_ULP_ISCSI 1
|
||||||
|
|
|
@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
||||||
goto err_pci_reg;
|
goto err_pci_reg;
|
||||||
|
|
||||||
/* AER (Advanced Error Reporting) hooks */
|
/* AER (Advanced Error Reporting) hooks */
|
||||||
err = pci_enable_pcie_error_reporting(pdev);
|
pci_enable_pcie_error_reporting(pdev);
|
||||||
if (err) {
|
|
||||||
dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
|
|
||||||
"0x%x\n", err);
|
|
||||||
/* non-fatal, continue */
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
/* PCI config space info */
|
/* PCI config space info */
|
||||||
|
@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||||
int err;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* flush_scheduled work may reschedule our watchdog task, so
|
* flush_scheduled work may reschedule our watchdog task, so
|
||||||
|
@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
|
||||||
free_netdev(netdev);
|
free_netdev(netdev);
|
||||||
|
|
||||||
/* AER disable */
|
/* AER disable */
|
||||||
err = pci_disable_pcie_error_reporting(pdev);
|
pci_disable_pcie_error_reporting(pdev);
|
||||||
if (err)
|
|
||||||
dev_err(&pdev->dev,
|
|
||||||
"pci_disable_pcie_error_reporting failed 0x%x\n", err);
|
|
||||||
|
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax)
|
||||||
}
|
}
|
||||||
if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
|
if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO
|
||||||
"mkiss: %s: Switchting to crc-smack\n",
|
"mkiss: %s: Switching to crc-smack\n",
|
||||||
ax->dev->name);
|
ax->dev->name);
|
||||||
ax->crcmode = CRC_MODE_SMACK;
|
ax->crcmode = CRC_MODE_SMACK;
|
||||||
}
|
}
|
||||||
|
@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax)
|
||||||
}
|
}
|
||||||
if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
|
if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO
|
||||||
"mkiss: %s: Switchting to crc-flexnet\n",
|
"mkiss: %s: Switching to crc-flexnet\n",
|
||||||
ax->dev->name);
|
ax->dev->name);
|
||||||
ax->crcmode = CRC_MODE_FLEX;
|
ax->crcmode = CRC_MODE_FLEX;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
|
||||||
if (err)
|
if (err)
|
||||||
goto err_pci_reg;
|
goto err_pci_reg;
|
||||||
|
|
||||||
err = pci_enable_pcie_error_reporting(pdev);
|
pci_enable_pcie_error_reporting(pdev);
|
||||||
if (err) {
|
|
||||||
dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
|
|
||||||
"0x%x\n", err);
|
|
||||||
/* non-fatal, continue */
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
|
@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
|
||||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||||
struct e1000_hw *hw = &adapter->hw;
|
struct e1000_hw *hw = &adapter->hw;
|
||||||
int err;
|
|
||||||
|
|
||||||
/* flush_scheduled work may reschedule our watchdog task, so
|
/* flush_scheduled work may reschedule our watchdog task, so
|
||||||
* explicitly disable watchdog tasks from being rescheduled */
|
* explicitly disable watchdog tasks from being rescheduled */
|
||||||
|
@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
|
||||||
|
|
||||||
free_netdev(netdev);
|
free_netdev(netdev);
|
||||||
|
|
||||||
err = pci_disable_pcie_error_reporting(pdev);
|
pci_disable_pcie_error_reporting(pdev);
|
||||||
if (err)
|
|
||||||
dev_err(&pdev->dev,
|
|
||||||
"pci_disable_pcie_error_reporting failed 0x%x\n", err);
|
|
||||||
|
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
|
||||||
cnx->remote_lp);
|
cnx->remote_lp);
|
||||||
} else {
|
} else {
|
||||||
memcpy(&cnx->cap_ack_event, event,
|
memcpy(&cnx->cap_ack_event, event,
|
||||||
sizeof(&cnx->cap_ack_event));
|
sizeof(cnx->cap_ack_event));
|
||||||
cnx->state |= VETH_STATE_GOTCAPACK;
|
cnx->state |= VETH_STATE_GOTCAPACK;
|
||||||
veth_kick_statemachine(cnx);
|
veth_kick_statemachine(cnx);
|
||||||
}
|
}
|
||||||
|
|
|
@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
|
||||||
#endif /* CONFIG_DCB */
|
#endif /* CONFIG_DCB */
|
||||||
default:
|
default:
|
||||||
hw_dbg(hw, "Flow control param set incorrectly\n");
|
hw_dbg(hw, "Flow control param set incorrectly\n");
|
||||||
ret_val = -IXGBE_ERR_CONFIG;
|
ret_val = IXGBE_ERR_CONFIG;
|
||||||
goto out;
|
goto out;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
|
||||||
/**
|
/**
|
||||||
* ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
|
* ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
|
||||||
* @hw: pointer to hardware structure
|
* @hw: pointer to hardware structure
|
||||||
* @addr_list: the list of new addresses
|
* @uc_list: the list of new addresses
|
||||||
* @addr_count: number of addresses
|
|
||||||
* @next: iterator function to walk the address list
|
|
||||||
*
|
*
|
||||||
* The given list replaces any existing list. Clears the secondary addrs from
|
* The given list replaces any existing list. Clears the secondary addrs from
|
||||||
* receive address registers. Uses unused receive address registers for the
|
* receive address registers. Uses unused receive address registers for the
|
||||||
|
@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
|
||||||
#endif /* CONFIG_DCB */
|
#endif /* CONFIG_DCB */
|
||||||
default:
|
default:
|
||||||
hw_dbg(hw, "Flow control param set incorrectly\n");
|
hw_dbg(hw, "Flow control param set incorrectly\n");
|
||||||
ret_val = -IXGBE_ERR_CONFIG;
|
ret_val = IXGBE_ERR_CONFIG;
|
||||||
goto out;
|
goto out;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
|
||||||
s32 ret_val = 0;
|
s32 ret_val = 0;
|
||||||
ixgbe_link_speed speed;
|
ixgbe_link_speed speed;
|
||||||
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
|
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
|
||||||
|
u32 links2, anlp1_reg, autoc_reg, links;
|
||||||
bool link_up;
|
bool link_up;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AN should have completed when the cable was plugged in.
|
* AN should have completed when the cable was plugged in.
|
||||||
* Look for reasons to bail out. Bail out if:
|
* Look for reasons to bail out. Bail out if:
|
||||||
* - FC autoneg is disabled, or if
|
* - FC autoneg is disabled, or if
|
||||||
* - we don't have multispeed fiber, or if
|
* - link is not up.
|
||||||
* - we're not running at 1G, or if
|
|
||||||
* - link is not up, or if
|
|
||||||
* - link is up but AN did not complete, or if
|
|
||||||
* - link is up and AN completed but timed out
|
|
||||||
*
|
*
|
||||||
* Since we're being called from an LSC, link is already know to be up.
|
* Since we're being called from an LSC, link is already known to be up.
|
||||||
* So use link_up_wait_to_complete=false.
|
* So use link_up_wait_to_complete=false.
|
||||||
*/
|
*/
|
||||||
hw->mac.ops.check_link(hw, &speed, &link_up, false);
|
hw->mac.ops.check_link(hw, &speed, &link_up, false);
|
||||||
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
|
|
||||||
|
|
||||||
if (hw->fc.disable_fc_autoneg ||
|
if (hw->fc.disable_fc_autoneg || (!link_up)) {
|
||||||
!hw->phy.multispeed_fiber ||
|
|
||||||
(speed != IXGBE_LINK_SPEED_1GB_FULL) ||
|
|
||||||
!link_up ||
|
|
||||||
((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
|
|
||||||
((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
|
|
||||||
hw->fc.fc_was_autonegged = false;
|
hw->fc.fc_was_autonegged = false;
|
||||||
hw->fc.current_mode = hw->fc.requested_mode;
|
hw->fc.current_mode = hw->fc.requested_mode;
|
||||||
hw_dbg(hw, "Autoneg FC was skipped.\n");
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On backplane, bail out if
|
||||||
|
* - backplane autoneg was not completed, or if
|
||||||
|
* - link partner is not AN enabled
|
||||||
|
*/
|
||||||
|
if (hw->phy.media_type == ixgbe_media_type_backplane) {
|
||||||
|
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
|
||||||
|
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
|
||||||
|
if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
|
||||||
|
((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
|
||||||
|
hw->fc.fc_was_autonegged = false;
|
||||||
|
hw->fc.current_mode = hw->fc.requested_mode;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On multispeed fiber at 1g, bail out if
|
||||||
|
* - link is up but AN did not complete, or if
|
||||||
|
* - link is up and AN completed but timed out
|
||||||
|
*/
|
||||||
|
if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
|
||||||
|
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
|
||||||
|
if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
|
||||||
|
((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
|
||||||
|
hw->fc.fc_was_autonegged = false;
|
||||||
|
hw->fc.current_mode = hw->fc.requested_mode;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read the AN advertisement and LP ability registers and resolve
|
* Read the AN advertisement and LP ability registers and resolve
|
||||||
* local flow control settings accordingly
|
* local flow control settings accordingly
|
||||||
*/
|
*/
|
||||||
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
|
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
|
||||||
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
|
(hw->phy.media_type != ixgbe_media_type_backplane)) {
|
||||||
if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
|
||||||
(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
|
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
|
||||||
/*
|
if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
||||||
* Now we need to check if the user selected Rx ONLY
|
(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
|
||||||
* of pause frames. In this case, we had to advertise
|
/*
|
||||||
* FULL flow control because we could not advertise RX
|
* Now we need to check if the user selected Rx ONLY
|
||||||
* ONLY. Hence, we must now check to see if we need to
|
* of pause frames. In this case, we had to advertise
|
||||||
* turn OFF the TRANSMISSION of PAUSE frames.
|
* FULL flow control because we could not advertise RX
|
||||||
*/
|
* ONLY. Hence, we must now check to see if we need to
|
||||||
if (hw->fc.requested_mode == ixgbe_fc_full) {
|
* turn OFF the TRANSMISSION of PAUSE frames.
|
||||||
hw->fc.current_mode = ixgbe_fc_full;
|
*/
|
||||||
hw_dbg(hw, "Flow Control = FULL.\n");
|
if (hw->fc.requested_mode == ixgbe_fc_full) {
|
||||||
} else {
|
hw->fc.current_mode = ixgbe_fc_full;
|
||||||
|
hw_dbg(hw, "Flow Control = FULL.\n");
|
||||||
|
} else {
|
||||||
|
hw->fc.current_mode = ixgbe_fc_rx_pause;
|
||||||
|
hw_dbg(hw, "Flow Control=RX PAUSE only\n");
|
||||||
|
}
|
||||||
|
} else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
||||||
|
(pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
|
||||||
|
(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
||||||
|
(pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
|
||||||
|
hw->fc.current_mode = ixgbe_fc_tx_pause;
|
||||||
|
hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
|
||||||
|
} else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
||||||
|
(pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
|
||||||
|
!(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
||||||
|
(pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
|
||||||
hw->fc.current_mode = ixgbe_fc_rx_pause;
|
hw->fc.current_mode = ixgbe_fc_rx_pause;
|
||||||
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
|
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
|
||||||
|
} else {
|
||||||
|
hw->fc.current_mode = ixgbe_fc_none;
|
||||||
|
hw_dbg(hw, "Flow Control = NONE.\n");
|
||||||
}
|
}
|
||||||
} else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
|
||||||
(pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
|
|
||||||
(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
|
||||||
(pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
|
|
||||||
hw->fc.current_mode = ixgbe_fc_tx_pause;
|
|
||||||
hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
|
|
||||||
} else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
|
||||||
(pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
|
|
||||||
!(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
|
|
||||||
(pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
|
|
||||||
hw->fc.current_mode = ixgbe_fc_rx_pause;
|
|
||||||
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
|
|
||||||
} else {
|
|
||||||
hw->fc.current_mode = ixgbe_fc_none;
|
|
||||||
hw_dbg(hw, "Flow Control = NONE.\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (hw->phy.media_type == ixgbe_media_type_backplane) {
|
||||||
|
/*
|
||||||
|
* Read the 10g AN autoc and LP ability registers and resolve
|
||||||
|
* local flow control settings accordingly
|
||||||
|
*/
|
||||||
|
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
|
||||||
|
anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
|
||||||
|
|
||||||
|
if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
|
||||||
|
(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
|
||||||
|
/*
|
||||||
|
* Now we need to check if the user selected Rx ONLY
|
||||||
|
* of pause frames. In this case, we had to advertise
|
||||||
|
* FULL flow control because we could not advertise RX
|
||||||
|
* ONLY. Hence, we must now check to see if we need to
|
||||||
|
* turn OFF the TRANSMISSION of PAUSE frames.
|
||||||
|
*/
|
||||||
|
if (hw->fc.requested_mode == ixgbe_fc_full) {
|
||||||
|
hw->fc.current_mode = ixgbe_fc_full;
|
||||||
|
hw_dbg(hw, "Flow Control = FULL.\n");
|
||||||
|
} else {
|
||||||
|
hw->fc.current_mode = ixgbe_fc_rx_pause;
|
||||||
|
hw_dbg(hw, "Flow Control=RX PAUSE only\n");
|
||||||
|
}
|
||||||
|
} else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
|
||||||
|
(autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
|
||||||
|
(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
|
||||||
|
(anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
|
||||||
|
hw->fc.current_mode = ixgbe_fc_tx_pause;
|
||||||
|
hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
|
||||||
|
} else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
|
||||||
|
(autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
|
||||||
|
!(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
|
||||||
|
(anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
|
||||||
|
hw->fc.current_mode = ixgbe_fc_rx_pause;
|
||||||
|
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
|
||||||
|
} else {
|
||||||
|
hw->fc.current_mode = ixgbe_fc_none;
|
||||||
|
hw_dbg(hw, "Flow Control = NONE.\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
/* Record that current_mode is the result of a successful autoneg */
|
/* Record that current_mode is the result of a successful autoneg */
|
||||||
hw->fc.fc_was_autonegged = true;
|
hw->fc.fc_was_autonegged = true;
|
||||||
|
|
||||||
|
@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
|
||||||
#endif /* CONFIG_DCB */
|
#endif /* CONFIG_DCB */
|
||||||
default:
|
default:
|
||||||
hw_dbg(hw, "Flow control param set incorrectly\n");
|
hw_dbg(hw, "Flow control param set incorrectly\n");
|
||||||
ret_val = -IXGBE_ERR_CONFIG;
|
ret_val = IXGBE_ERR_CONFIG;
|
||||||
goto out;
|
goto out;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
|
IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
|
||||||
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
|
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
|
||||||
|
|
||||||
/* Enable and restart autoneg to inform the link partner */
|
|
||||||
reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
|
|
||||||
|
|
||||||
/* Disable AN timeout */
|
/* Disable AN timeout */
|
||||||
if (hw->fc.strict_ieee)
|
if (hw->fc.strict_ieee)
|
||||||
reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
|
reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
|
||||||
|
@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
|
IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
|
||||||
hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
|
hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set up the 10G flow control advertisement registers so the HW
|
||||||
|
* can do fc autoneg once the cable is plugged in. If we end up
|
||||||
|
* using 1g instead, this is harmless.
|
||||||
|
*/
|
||||||
|
reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The possible values of fc.requested_mode are:
|
||||||
|
* 0: Flow control is completely disabled
|
||||||
|
* 1: Rx flow control is enabled (we can receive pause frames,
|
||||||
|
* but not send pause frames).
|
||||||
|
* 2: Tx flow control is enabled (we can send pause frames but
|
||||||
|
* we do not support receiving pause frames).
|
||||||
|
* 3: Both Rx and Tx flow control (symmetric) are enabled.
|
||||||
|
* other: Invalid.
|
||||||
|
*/
|
||||||
|
switch (hw->fc.requested_mode) {
|
||||||
|
case ixgbe_fc_none:
|
||||||
|
/* Flow control completely disabled by software override. */
|
||||||
|
reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
|
||||||
|
break;
|
||||||
|
case ixgbe_fc_rx_pause:
|
||||||
|
/*
|
||||||
|
* Rx Flow control is enabled and Tx Flow control is
|
||||||
|
* disabled by software override. Since there really
|
||||||
|
* isn't a way to advertise that we are capable of RX
|
||||||
|
* Pause ONLY, we will advertise that we support both
|
||||||
|
* symmetric and asymmetric Rx PAUSE. Later, we will
|
||||||
|
* disable the adapter's ability to send PAUSE frames.
|
||||||
|
*/
|
||||||
|
reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
|
||||||
|
break;
|
||||||
|
case ixgbe_fc_tx_pause:
|
||||||
|
/*
|
||||||
|
* Tx Flow control is enabled, and Rx Flow control is
|
||||||
|
* disabled by software override.
|
||||||
|
*/
|
||||||
|
reg |= (IXGBE_AUTOC_ASM_PAUSE);
|
||||||
|
reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
|
||||||
|
break;
|
||||||
|
case ixgbe_fc_full:
|
||||||
|
/* Flow control (both Rx and Tx) is enabled by SW override. */
|
||||||
|
reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
|
||||||
|
break;
|
||||||
|
#ifdef CONFIG_DCB
|
||||||
|
case ixgbe_fc_pfc:
|
||||||
|
goto out;
|
||||||
|
break;
|
||||||
|
#endif /* CONFIG_DCB */
|
||||||
|
default:
|
||||||
|
hw_dbg(hw, "Flow control param set incorrectly\n");
|
||||||
|
ret_val = IXGBE_ERR_CONFIG;
|
||||||
|
goto out;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* AUTOC restart handles negotiation of 1G and 10G. There is
|
||||||
|
* no need to set the PCS1GCTL register.
|
||||||
|
*/
|
||||||
|
reg |= IXGBE_AUTOC_AN_RESTART;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
|
||||||
|
hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
|
@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
|
||||||
|
|
||||||
while (timeout) {
|
while (timeout) {
|
||||||
if (ixgbe_get_eeprom_semaphore(hw))
|
if (ixgbe_get_eeprom_semaphore(hw))
|
||||||
return -IXGBE_ERR_SWFW_SYNC;
|
return IXGBE_ERR_SWFW_SYNC;
|
||||||
|
|
||||||
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
|
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
|
||||||
if (!(gssr & (fwmask | swmask)))
|
if (!(gssr & (fwmask | swmask)))
|
||||||
|
@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
|
||||||
|
|
||||||
if (!timeout) {
|
if (!timeout) {
|
||||||
hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
|
hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
|
||||||
return -IXGBE_ERR_SWFW_SYNC;
|
return IXGBE_ERR_SWFW_SYNC;
|
||||||
}
|
}
|
||||||
|
|
||||||
gssr |= swmask;
|
gssr |= swmask;
|
||||||
|
|
|
@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
|
||||||
{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
|
{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
|
||||||
{"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
|
{"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
|
||||||
{"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
|
{"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
|
||||||
|
{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
|
||||||
|
{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
|
||||||
|
{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
|
||||||
|
{"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
|
||||||
{"lsc_int", IXGBE_STAT(lsc_int)},
|
{"lsc_int", IXGBE_STAT(lsc_int)},
|
||||||
{"tx_busy", IXGBE_STAT(tx_busy)},
|
{"tx_busy", IXGBE_STAT(tx_busy)},
|
||||||
{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
|
{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
|
||||||
|
|
|
@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
|
||||||
static const char ixgbe_driver_string[] =
|
static const char ixgbe_driver_string[] =
|
||||||
"Intel(R) 10 Gigabit PCI Express Network Driver";
|
"Intel(R) 10 Gigabit PCI Express Network Driver";
|
||||||
|
|
||||||
#define DRV_VERSION "2.0.37-k2"
|
#define DRV_VERSION "2.0.44-k2"
|
||||||
const char ixgbe_driver_version[] = DRV_VERSION;
|
const char ixgbe_driver_version[] = DRV_VERSION;
|
||||||
static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
|
static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
|
||||||
|
|
||||||
|
@ -1885,12 +1885,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
|
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
|
||||||
adapter->tx_ring[i].head = IXGBE_TDH(j);
|
adapter->tx_ring[i].head = IXGBE_TDH(j);
|
||||||
adapter->tx_ring[i].tail = IXGBE_TDT(j);
|
adapter->tx_ring[i].tail = IXGBE_TDT(j);
|
||||||
/* Disable Tx Head Writeback RO bit, since this hoses
|
/*
|
||||||
|
* Disable Tx Head Writeback RO bit, since this hoses
|
||||||
* bookkeeping if things aren't delivered in order.
|
* bookkeeping if things aren't delivered in order.
|
||||||
*/
|
*/
|
||||||
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
|
switch (hw->mac.type) {
|
||||||
|
case ixgbe_mac_82598EB:
|
||||||
|
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
|
||||||
|
break;
|
||||||
|
case ixgbe_mac_82599EB:
|
||||||
|
default:
|
||||||
|
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
|
||||||
|
break;
|
||||||
|
}
|
||||||
txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
|
txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
|
switch (hw->mac.type) {
|
||||||
|
case ixgbe_mac_82598EB:
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
|
||||||
|
break;
|
||||||
|
case ixgbe_mac_82599EB:
|
||||||
|
default:
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (hw->mac.type == ixgbe_mac_82599EB) {
|
if (hw->mac.type == ixgbe_mac_82599EB) {
|
||||||
/* We enable 8 traffic classes, DCB only */
|
/* We enable 8 traffic classes, DCB only */
|
||||||
|
@ -4432,10 +4449,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
||||||
|
|
||||||
/* 82598 hardware only has a 32 bit counter in the high register */
|
/* 82598 hardware only has a 32 bit counter in the high register */
|
||||||
if (hw->mac.type == ixgbe_mac_82599EB) {
|
if (hw->mac.type == ixgbe_mac_82599EB) {
|
||||||
|
u64 tmp;
|
||||||
adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
|
adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
|
||||||
IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
|
tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
|
||||||
|
adapter->stats.gorc += (tmp << 32);
|
||||||
adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
|
adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
|
||||||
IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
|
tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
|
||||||
|
adapter->stats.gotc += (tmp << 32);
|
||||||
adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
|
adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
|
||||||
IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
|
IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
|
||||||
adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
|
adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
|
||||||
|
@ -5071,7 +5091,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
|
||||||
/* Right now, we support IPv4 only */
|
/* Right now, we support IPv4 only */
|
||||||
struct ixgbe_atr_input atr_input;
|
struct ixgbe_atr_input atr_input;
|
||||||
struct tcphdr *th;
|
struct tcphdr *th;
|
||||||
struct udphdr *uh;
|
|
||||||
struct iphdr *iph = ip_hdr(skb);
|
struct iphdr *iph = ip_hdr(skb);
|
||||||
struct ethhdr *eth = (struct ethhdr *)skb->data;
|
struct ethhdr *eth = (struct ethhdr *)skb->data;
|
||||||
u16 vlan_id, src_port, dst_port, flex_bytes;
|
u16 vlan_id, src_port, dst_port, flex_bytes;
|
||||||
|
@ -5085,12 +5104,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
|
||||||
dst_port = th->dest;
|
dst_port = th->dest;
|
||||||
l4type |= IXGBE_ATR_L4TYPE_TCP;
|
l4type |= IXGBE_ATR_L4TYPE_TCP;
|
||||||
/* l4type IPv4 type is 0, no need to assign */
|
/* l4type IPv4 type is 0, no need to assign */
|
||||||
} else if(iph->protocol == IPPROTO_UDP) {
|
|
||||||
uh = udp_hdr(skb);
|
|
||||||
src_port = uh->source;
|
|
||||||
dst_port = uh->dest;
|
|
||||||
l4type |= IXGBE_ATR_L4TYPE_UDP;
|
|
||||||
/* l4type IPv4 type is 0, no need to assign */
|
|
||||||
} else {
|
} else {
|
||||||
/* Unsupported L4 header, just bail here */
|
/* Unsupported L4 header, just bail here */
|
||||||
return;
|
return;
|
||||||
|
@ -5494,12 +5507,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
||||||
goto err_pci_reg;
|
goto err_pci_reg;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pci_enable_pcie_error_reporting(pdev);
|
pci_enable_pcie_error_reporting(pdev);
|
||||||
if (err) {
|
|
||||||
dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
|
|
||||||
"0x%x\n", err);
|
|
||||||
/* non-fatal, continue */
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
|
@ -5808,7 +5816,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
int err;
|
|
||||||
|
|
||||||
set_bit(__IXGBE_DOWN, &adapter->state);
|
set_bit(__IXGBE_DOWN, &adapter->state);
|
||||||
/* clear the module not found bit to make sure the worker won't
|
/* clear the module not found bit to make sure the worker won't
|
||||||
|
@ -5859,10 +5866,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
|
||||||
|
|
||||||
free_netdev(netdev);
|
free_netdev(netdev);
|
||||||
|
|
||||||
err = pci_disable_pcie_error_reporting(pdev);
|
pci_disable_pcie_error_reporting(pdev);
|
||||||
if (err)
|
|
||||||
dev_err(&pdev->dev,
|
|
||||||
"pci_disable_pcie_error_reporting failed 0x%x\n", err);
|
|
||||||
|
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1336,6 +1336,8 @@
|
||||||
#define IXGBE_AUTOC_KX4_SUPP 0x80000000
|
#define IXGBE_AUTOC_KX4_SUPP 0x80000000
|
||||||
#define IXGBE_AUTOC_KX_SUPP 0x40000000
|
#define IXGBE_AUTOC_KX_SUPP 0x40000000
|
||||||
#define IXGBE_AUTOC_PAUSE 0x30000000
|
#define IXGBE_AUTOC_PAUSE 0x30000000
|
||||||
|
#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
|
||||||
|
#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
|
||||||
#define IXGBE_AUTOC_RF 0x08000000
|
#define IXGBE_AUTOC_RF 0x08000000
|
||||||
#define IXGBE_AUTOC_PD_TMR 0x06000000
|
#define IXGBE_AUTOC_PD_TMR 0x06000000
|
||||||
#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
|
#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
|
||||||
|
@ -1404,6 +1406,8 @@
|
||||||
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
|
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
|
||||||
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
|
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
|
||||||
|
|
||||||
|
#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
|
||||||
|
|
||||||
/* PCS1GLSTA Bit Masks */
|
/* PCS1GLSTA Bit Masks */
|
||||||
#define IXGBE_PCS1GLSTA_LINK_OK 1
|
#define IXGBE_PCS1GLSTA_LINK_OK 1
|
||||||
#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
|
#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
|
||||||
|
@ -1424,6 +1428,11 @@
|
||||||
#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
|
#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
|
||||||
#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
|
#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
|
||||||
|
|
||||||
|
/* ANLP1 Bit Masks */
|
||||||
|
#define IXGBE_ANLP1_PAUSE 0x0C00
|
||||||
|
#define IXGBE_ANLP1_SYM_PAUSE 0x0400
|
||||||
|
#define IXGBE_ANLP1_ASM_PAUSE 0x0800
|
||||||
|
|
||||||
/* SW Semaphore Register bitmasks */
|
/* SW Semaphore Register bitmasks */
|
||||||
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
|
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
|
||||||
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
|
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
|
||||||
|
|
1697
drivers/net/ks8851_mll.c
Normal file
1697
drivers/net/ks8851_mll.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
static struct platform_driver meth_driver = {
|
static struct platform_driver meth_driver = {
|
||||||
.probe = meth_probe,
|
.probe = meth_probe,
|
||||||
.remove = __devexit_p(meth_remove),
|
.remove = __exit_p(meth_remove),
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "meth",
|
.name = "meth",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
|
|
@ -1381,15 +1381,15 @@ struct intr_context {
|
||||||
|
|
||||||
/* adapter flags definitions. */
|
/* adapter flags definitions. */
|
||||||
enum {
|
enum {
|
||||||
QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */
|
QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
|
||||||
QL_LEGACY_ENABLED = (1 << 3),
|
QL_LEGACY_ENABLED = 1,
|
||||||
QL_MSI_ENABLED = (1 << 3),
|
QL_MSI_ENABLED = 2,
|
||||||
QL_MSIX_ENABLED = (1 << 4),
|
QL_MSIX_ENABLED = 3,
|
||||||
QL_DMA64 = (1 << 5),
|
QL_DMA64 = 4,
|
||||||
QL_PROMISCUOUS = (1 << 6),
|
QL_PROMISCUOUS = 5,
|
||||||
QL_ALLMULTI = (1 << 7),
|
QL_ALLMULTI = 6,
|
||||||
QL_PORT_CFG = (1 << 8),
|
QL_PORT_CFG = 7,
|
||||||
QL_CAM_RT_SET = (1 << 9),
|
QL_CAM_RT_SET = 8,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* link_status bit definitions */
|
/* link_status bit definitions */
|
||||||
|
|
|
@ -3142,14 +3142,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
|
||||||
{
|
{
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
|
||||||
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
|
|
||||||
if (status)
|
|
||||||
return status;
|
|
||||||
|
|
||||||
/* Clear all the entries in the routing table. */
|
/* Clear all the entries in the routing table. */
|
||||||
status = ql_clear_routing_entries(qdev);
|
status = ql_clear_routing_entries(qdev);
|
||||||
if (status)
|
if (status)
|
||||||
goto exit;
|
return status;
|
||||||
|
|
||||||
|
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
|
||||||
|
if (status)
|
||||||
|
return status;
|
||||||
|
|
||||||
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
|
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
|
||||||
if (status) {
|
if (status) {
|
||||||
|
@ -3380,12 +3380,10 @@ static int ql_adapter_down(struct ql_adapter *qdev)
|
||||||
|
|
||||||
ql_free_rx_buffers(qdev);
|
ql_free_rx_buffers(qdev);
|
||||||
|
|
||||||
spin_lock(&qdev->hw_lock);
|
|
||||||
status = ql_adapter_reset(qdev);
|
status = ql_adapter_reset(qdev);
|
||||||
if (status)
|
if (status)
|
||||||
QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
|
QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
|
||||||
qdev->func);
|
qdev->func);
|
||||||
spin_unlock(&qdev->hw_lock);
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3705,7 +3703,7 @@ static void ql_asic_reset_work(struct work_struct *work)
|
||||||
struct ql_adapter *qdev =
|
struct ql_adapter *qdev =
|
||||||
container_of(work, struct ql_adapter, asic_reset_work.work);
|
container_of(work, struct ql_adapter, asic_reset_work.work);
|
||||||
int status;
|
int status;
|
||||||
|
rtnl_lock();
|
||||||
status = ql_adapter_down(qdev);
|
status = ql_adapter_down(qdev);
|
||||||
if (status)
|
if (status)
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -3713,12 +3711,12 @@ static void ql_asic_reset_work(struct work_struct *work)
|
||||||
status = ql_adapter_up(qdev);
|
status = ql_adapter_up(qdev);
|
||||||
if (status)
|
if (status)
|
||||||
goto error;
|
goto error;
|
||||||
|
rtnl_unlock();
|
||||||
return;
|
return;
|
||||||
error:
|
error:
|
||||||
QPRINTK(qdev, IFUP, ALERT,
|
QPRINTK(qdev, IFUP, ALERT,
|
||||||
"Driver up/down cycle failed, closing device\n");
|
"Driver up/down cycle failed, closing device\n");
|
||||||
rtnl_lock();
|
|
||||||
set_bit(QL_ADAPTER_UP, &qdev->flags);
|
set_bit(QL_ADAPTER_UP, &qdev->flags);
|
||||||
dev_close(qdev->ndev);
|
dev_close(qdev->ndev);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
@ -3834,11 +3832,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qdev->ndev = ndev;
|
||||||
|
qdev->pdev = pdev;
|
||||||
|
pci_set_drvdata(pdev, ndev);
|
||||||
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
|
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
|
||||||
if (pos <= 0) {
|
if (pos <= 0) {
|
||||||
dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
|
dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
|
||||||
"aborting.\n");
|
"aborting.\n");
|
||||||
goto err_out;
|
return pos;
|
||||||
} else {
|
} else {
|
||||||
pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
|
pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
|
||||||
val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
|
val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
|
||||||
|
@ -3851,7 +3852,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
|
||||||
err = pci_request_regions(pdev, DRV_NAME);
|
err = pci_request_regions(pdev, DRV_NAME);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "PCI region request failed.\n");
|
dev_err(&pdev->dev, "PCI region request failed.\n");
|
||||||
goto err_out;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
@ -3869,7 +3870,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_set_drvdata(pdev, ndev);
|
|
||||||
qdev->reg_base =
|
qdev->reg_base =
|
||||||
ioremap_nocache(pci_resource_start(pdev, 1),
|
ioremap_nocache(pci_resource_start(pdev, 1),
|
||||||
pci_resource_len(pdev, 1));
|
pci_resource_len(pdev, 1));
|
||||||
|
@ -3889,8 +3889,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
qdev->ndev = ndev;
|
|
||||||
qdev->pdev = pdev;
|
|
||||||
err = ql_get_board_info(qdev);
|
err = ql_get_board_info(qdev);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "Register access failed.\n");
|
dev_err(&pdev->dev, "Register access failed.\n");
|
||||||
|
|
|
@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
static struct platform_driver sgiseeq_driver = {
|
static struct platform_driver sgiseeq_driver = {
|
||||||
.probe = sgiseeq_probe,
|
.probe = sgiseeq_probe,
|
||||||
.remove = __devexit_p(sgiseeq_remove),
|
.remove = __exit_p(sgiseeq_remove),
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "sgiseeq",
|
.name = "sgiseeq",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
|
|
@ -3935,11 +3935,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
|
/* space for skge@pci:0000:04:00.0 */
|
||||||
|
hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
|
||||||
|
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
|
||||||
if (!hw) {
|
if (!hw) {
|
||||||
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
|
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
|
||||||
goto err_out_free_regions;
|
goto err_out_free_regions;
|
||||||
}
|
}
|
||||||
|
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
|
||||||
|
|
||||||
hw->pdev = pdev;
|
hw->pdev = pdev;
|
||||||
spin_lock_init(&hw->hw_lock);
|
spin_lock_init(&hw->hw_lock);
|
||||||
|
@ -3974,7 +3977,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
|
||||||
goto err_out_free_netdev;
|
goto err_out_free_netdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
|
err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
|
dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
|
||||||
dev->name, pdev->irq);
|
dev->name, pdev->irq);
|
||||||
|
@ -3982,14 +3985,17 @@ static int __devinit skge_probe(struct pci_dev *pdev,
|
||||||
}
|
}
|
||||||
skge_show_addr(dev);
|
skge_show_addr(dev);
|
||||||
|
|
||||||
if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
|
if (hw->ports > 1) {
|
||||||
if (register_netdev(dev1) == 0)
|
dev1 = skge_devinit(hw, 1, using_dac);
|
||||||
|
if (dev1 && register_netdev(dev1) == 0)
|
||||||
skge_show_addr(dev1);
|
skge_show_addr(dev1);
|
||||||
else {
|
else {
|
||||||
/* Failure to register second port need not be fatal */
|
/* Failure to register second port need not be fatal */
|
||||||
dev_warn(&pdev->dev, "register of second port failed\n");
|
dev_warn(&pdev->dev, "register of second port failed\n");
|
||||||
hw->dev[1] = NULL;
|
hw->dev[1] = NULL;
|
||||||
free_netdev(dev1);
|
hw->ports = 1;
|
||||||
|
if (dev1)
|
||||||
|
free_netdev(dev1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pci_set_drvdata(pdev, hw);
|
pci_set_drvdata(pdev, hw);
|
||||||
|
|
|
@ -2423,6 +2423,8 @@ struct skge_hw {
|
||||||
u16 phy_addr;
|
u16 phy_addr;
|
||||||
spinlock_t phy_lock;
|
spinlock_t phy_lock;
|
||||||
struct tasklet_struct phy_task;
|
struct tasklet_struct phy_task;
|
||||||
|
|
||||||
|
char irq_name[0]; /* skge@pci:000:04:00.0 */
|
||||||
};
|
};
|
||||||
|
|
||||||
enum pause_control {
|
enum pause_control {
|
||||||
|
|
|
@ -4487,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
||||||
wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
|
wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
|
||||||
|
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
|
|
||||||
|
hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
|
||||||
|
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
|
||||||
if (!hw) {
|
if (!hw) {
|
||||||
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
|
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
|
||||||
goto err_out_free_regions;
|
goto err_out_free_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
hw->pdev = pdev;
|
hw->pdev = pdev;
|
||||||
|
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
|
||||||
|
|
||||||
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
|
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
|
||||||
if (!hw->regs) {
|
if (!hw->regs) {
|
||||||
|
@ -4539,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
||||||
|
|
||||||
err = request_irq(pdev->irq, sky2_intr,
|
err = request_irq(pdev->irq, sky2_intr,
|
||||||
(hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
|
(hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
|
||||||
dev->name, hw);
|
hw->irq_name, hw);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
|
dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
|
||||||
goto err_out_unregister;
|
goto err_out_unregister;
|
||||||
|
|
|
@ -2085,6 +2085,8 @@ struct sky2_hw {
|
||||||
struct timer_list watchdog_timer;
|
struct timer_list watchdog_timer;
|
||||||
struct work_struct restart_work;
|
struct work_struct restart_work;
|
||||||
wait_queue_head_t msi_wait;
|
wait_queue_head_t msi_wait;
|
||||||
|
|
||||||
|
char irq_name[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int sky2_is_copper(const struct sky2_hw *hw)
|
static inline int sky2_is_copper(const struct sky2_hw *hw)
|
||||||
|
|
|
@ -2412,7 +2412,6 @@ struct ring_info {
|
||||||
|
|
||||||
struct tx_ring_info {
|
struct tx_ring_info {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u32 prev_vlan_tag;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct tg3_config_info {
|
struct tg3_config_info {
|
||||||
|
|
|
@ -948,7 +948,7 @@ free:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtnet_remove(struct virtio_device *vdev)
|
static void __devexit virtnet_remove(struct virtio_device *vdev)
|
||||||
{
|
{
|
||||||
struct virtnet_info *vi = vdev->priv;
|
struct virtnet_info *vi = vdev->priv;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
|
@ -31,13 +31,12 @@ config STRIP
|
||||||
---help---
|
---help---
|
||||||
Say Y if you have a Metricom radio and intend to use Starmode Radio
|
Say Y if you have a Metricom radio and intend to use Starmode Radio
|
||||||
IP. STRIP is a radio protocol developed for the MosquitoNet project
|
IP. STRIP is a radio protocol developed for the MosquitoNet project
|
||||||
(on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet
|
to send Internet traffic using Metricom radios. Metricom radios are
|
||||||
traffic using Metricom radios. Metricom radios are small, battery
|
small, battery powered, 100kbit/sec packet radio transceivers, about
|
||||||
powered, 100kbit/sec packet radio transceivers, about the size and
|
the size and weight of a cellular telephone. (You may also have heard
|
||||||
weight of a cellular telephone. (You may also have heard them called
|
them called "Metricom modems" but we avoid the term "modem" because
|
||||||
"Metricom modems" but we avoid the term "modem" because it misleads
|
it misleads many people into thinking that you can plug a Metricom
|
||||||
many people into thinking that you can plug a Metricom modem into a
|
modem into a phone line and use it as a modem.)
|
||||||
phone line and use it as a modem.)
|
|
||||||
|
|
||||||
You can use STRIP on any Linux machine with a serial port, although
|
You can use STRIP on any Linux machine with a serial port, although
|
||||||
it is obviously most useful for people with laptop computers. If you
|
it is obviously most useful for people with laptop computers. If you
|
||||||
|
|
|
@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
|
||||||
u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
|
u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
|
||||||
u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
|
u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
|
||||||
int chain, idx, i;
|
int chain, idx, i;
|
||||||
u8 f;
|
u32 phy_data = 0;
|
||||||
|
u8 f, tmp;
|
||||||
|
|
||||||
switch (channel->band) {
|
switch (channel->band) {
|
||||||
case IEEE80211_BAND_2GHZ:
|
case IEEE80211_BAND_2GHZ:
|
||||||
|
@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 76; i++) {
|
for (i = 0; i < 76; i++) {
|
||||||
u32 phy_data;
|
|
||||||
u8 tmp;
|
|
||||||
|
|
||||||
if (i < 25) {
|
if (i < 25) {
|
||||||
tmp = ar9170_interpolate_val(i, &pwrs[0][0],
|
tmp = ar9170_interpolate_val(i, &pwrs[0][0],
|
||||||
&vpds[0][0]);
|
&vpds[0][0]);
|
||||||
|
|
|
@ -340,10 +340,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
|
||||||
q->mmio_base + B43_PIO_TXDATA,
|
q->mmio_base + B43_PIO_TXDATA,
|
||||||
sizeof(u16));
|
sizeof(u16));
|
||||||
if (data_len & 1) {
|
if (data_len & 1) {
|
||||||
|
u8 tail[2] = { 0, };
|
||||||
|
|
||||||
/* Write the last byte. */
|
/* Write the last byte. */
|
||||||
ctl &= ~B43_PIO_TXCTL_WRITEHI;
|
ctl &= ~B43_PIO_TXCTL_WRITEHI;
|
||||||
b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
|
b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
|
||||||
b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]);
|
tail[0] = data[data_len - 1];
|
||||||
|
ssb_block_write(dev->dev, tail, 2,
|
||||||
|
q->mmio_base + B43_PIO_TXDATA,
|
||||||
|
sizeof(u16));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctl;
|
return ctl;
|
||||||
|
@ -386,26 +391,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
|
||||||
q->mmio_base + B43_PIO8_TXDATA,
|
q->mmio_base + B43_PIO8_TXDATA,
|
||||||
sizeof(u32));
|
sizeof(u32));
|
||||||
if (data_len & 3) {
|
if (data_len & 3) {
|
||||||
u32 value = 0;
|
u8 tail[4] = { 0, };
|
||||||
|
|
||||||
/* Write the last few bytes. */
|
/* Write the last few bytes. */
|
||||||
ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
|
ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
|
||||||
B43_PIO8_TXCTL_24_31);
|
B43_PIO8_TXCTL_24_31);
|
||||||
data = &(data[data_len - 1]);
|
|
||||||
switch (data_len & 3) {
|
switch (data_len & 3) {
|
||||||
case 3:
|
case 3:
|
||||||
ctl |= B43_PIO8_TXCTL_16_23;
|
ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
|
||||||
value |= (u32)(*data) << 16;
|
tail[0] = data[data_len - 3];
|
||||||
data--;
|
tail[1] = data[data_len - 2];
|
||||||
|
tail[2] = data[data_len - 1];
|
||||||
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
ctl |= B43_PIO8_TXCTL_8_15;
|
ctl |= B43_PIO8_TXCTL_8_15;
|
||||||
value |= (u32)(*data) << 8;
|
tail[0] = data[data_len - 2];
|
||||||
data--;
|
tail[1] = data[data_len - 1];
|
||||||
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
value |= (u32)(*data);
|
tail[0] = data[data_len - 1];
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
|
b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
|
||||||
b43_piotx_write32(q, B43_PIO8_TXDATA, value);
|
ssb_block_write(dev->dev, tail, 4,
|
||||||
|
q->mmio_base + B43_PIO8_TXDATA,
|
||||||
|
sizeof(u32));
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctl;
|
return ctl;
|
||||||
|
@ -693,21 +703,25 @@ data_ready:
|
||||||
q->mmio_base + B43_PIO8_RXDATA,
|
q->mmio_base + B43_PIO8_RXDATA,
|
||||||
sizeof(u32));
|
sizeof(u32));
|
||||||
if (len & 3) {
|
if (len & 3) {
|
||||||
u32 value;
|
u8 tail[4] = { 0, };
|
||||||
char *data;
|
|
||||||
|
|
||||||
/* Read the last few bytes. */
|
/* Read the last few bytes. */
|
||||||
value = b43_piorx_read32(q, B43_PIO8_RXDATA);
|
ssb_block_read(dev->dev, tail, 4,
|
||||||
data = &(skb->data[len + padding - 1]);
|
q->mmio_base + B43_PIO8_RXDATA,
|
||||||
|
sizeof(u32));
|
||||||
switch (len & 3) {
|
switch (len & 3) {
|
||||||
case 3:
|
case 3:
|
||||||
*data = (value >> 16);
|
skb->data[len + padding - 3] = tail[0];
|
||||||
data--;
|
skb->data[len + padding - 2] = tail[1];
|
||||||
|
skb->data[len + padding - 1] = tail[2];
|
||||||
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
*data = (value >> 8);
|
skb->data[len + padding - 2] = tail[0];
|
||||||
data--;
|
skb->data[len + padding - 1] = tail[1];
|
||||||
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
*data = value;
|
skb->data[len + padding - 1] = tail[0];
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -715,11 +729,13 @@ data_ready:
|
||||||
q->mmio_base + B43_PIO_RXDATA,
|
q->mmio_base + B43_PIO_RXDATA,
|
||||||
sizeof(u16));
|
sizeof(u16));
|
||||||
if (len & 1) {
|
if (len & 1) {
|
||||||
u16 value;
|
u8 tail[2] = { 0, };
|
||||||
|
|
||||||
/* Read the last byte. */
|
/* Read the last byte. */
|
||||||
value = b43_piorx_read16(q, B43_PIO_RXDATA);
|
ssb_block_read(dev->dev, tail, 2,
|
||||||
skb->data[len + padding - 1] = value;
|
q->mmio_base + B43_PIO_RXDATA,
|
||||||
|
sizeof(u16));
|
||||||
|
skb->data[len + padding - 1] = tail[0];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
|
||||||
data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
|
data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
|
||||||
if (WARN_ON(!data->beacon_int))
|
if (WARN_ON(!data->beacon_int))
|
||||||
data->beacon_int = 1;
|
data->beacon_int = 1;
|
||||||
|
if (data->started)
|
||||||
|
mod_timer(&data->beacon_timer,
|
||||||
|
jiffies + data->beacon_int);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
|
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
|
||||||
|
|
|
@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = {
|
||||||
/* Huawei-3Com */
|
/* Huawei-3Com */
|
||||||
{ USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
|
{ USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||||
/* Hercules */
|
/* Hercules */
|
||||||
|
{ USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||||
{ USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
|
{ USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||||
{ USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
|
{ USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
|
||||||
/* Linksys */
|
/* Linksys */
|
||||||
|
|
|
@ -847,7 +847,7 @@ static dst_command_func dst_commands[] = {
|
||||||
/*
|
/*
|
||||||
* Configuration parser.
|
* Configuration parser.
|
||||||
*/
|
*/
|
||||||
static void cn_dst_callback(struct cn_msg *msg)
|
static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
|
||||||
{
|
{
|
||||||
struct dst_ctl *ctl;
|
struct dst_ctl *ctl;
|
||||||
int err;
|
int err;
|
||||||
|
@ -855,6 +855,11 @@ static void cn_dst_callback(struct cn_msg *msg)
|
||||||
struct dst_node *n = NULL, *tmp;
|
struct dst_node *n = NULL, *tmp;
|
||||||
unsigned int hash;
|
unsigned int hash;
|
||||||
|
|
||||||
|
if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
|
||||||
|
err = -EPERM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (msg->len < sizeof(struct dst_ctl)) {
|
if (msg->len < sizeof(struct dst_ctl)) {
|
||||||
err = -EBADMSG;
|
err = -EBADMSG;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -527,10 +527,13 @@ out_unlock:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pohmelfs_cn_callback(struct cn_msg *msg)
|
static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
|
||||||
|
return;
|
||||||
|
|
||||||
switch (msg->flags) {
|
switch (msg->flags) {
|
||||||
case POHMELFS_FLAGS_ADD:
|
case POHMELFS_FLAGS_ADD:
|
||||||
case POHMELFS_FLAGS_DEL:
|
case POHMELFS_FLAGS_DEL:
|
||||||
|
|
|
@ -67,11 +67,14 @@ static DEFINE_MUTEX(uvfb_lock);
|
||||||
* find the kernel part of the task struct, copy the registers and
|
* find the kernel part of the task struct, copy the registers and
|
||||||
* the buffer contents and then complete the task.
|
* the buffer contents and then complete the task.
|
||||||
*/
|
*/
|
||||||
static void uvesafb_cn_callback(struct cn_msg *msg)
|
static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
|
||||||
{
|
{
|
||||||
struct uvesafb_task *utask;
|
struct uvesafb_task *utask;
|
||||||
struct uvesafb_ktask *task;
|
struct uvesafb_ktask *task;
|
||||||
|
|
||||||
|
if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
|
||||||
|
return;
|
||||||
|
|
||||||
if (msg->seq >= UVESAFB_TASKS_MAX)
|
if (msg->seq >= UVESAFB_TASKS_MAX)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -306,7 +306,7 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void w1_cn_callback(struct cn_msg *msg)
|
static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
|
||||||
{
|
{
|
||||||
struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
|
struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
|
||||||
struct w1_netlink_cmd *cmd;
|
struct w1_netlink_cmd *cmd;
|
||||||
|
|
|
@ -132,11 +132,8 @@ struct cn_callback_id {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cn_callback_data {
|
struct cn_callback_data {
|
||||||
void (*destruct_data) (void *);
|
struct sk_buff *skb;
|
||||||
void *ddata;
|
void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
|
||||||
|
|
||||||
void *callback_priv;
|
|
||||||
void (*callback) (struct cn_msg *);
|
|
||||||
|
|
||||||
void *free;
|
void *free;
|
||||||
};
|
};
|
||||||
|
@ -167,11 +164,11 @@ struct cn_dev {
|
||||||
struct cn_queue_dev *cbdev;
|
struct cn_queue_dev *cbdev;
|
||||||
};
|
};
|
||||||
|
|
||||||
int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *));
|
int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
|
||||||
void cn_del_callback(struct cb_id *);
|
void cn_del_callback(struct cb_id *);
|
||||||
int cn_netlink_send(struct cn_msg *, u32, gfp_t);
|
int cn_netlink_send(struct cn_msg *, u32, gfp_t);
|
||||||
|
|
||||||
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *));
|
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
|
||||||
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
|
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
|
||||||
|
|
||||||
int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
|
int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
|
||||||
|
|
|
@ -2105,15 +2105,17 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
|
||||||
static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
|
||||||
{
|
{
|
||||||
ktime_t start_time, end_time;
|
ktime_t start_time, end_time;
|
||||||
s32 remaining;
|
s64 remaining;
|
||||||
struct hrtimer_sleeper t;
|
struct hrtimer_sleeper t;
|
||||||
|
|
||||||
hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||||
hrtimer_set_expires(&t.timer, spin_until);
|
hrtimer_set_expires(&t.timer, spin_until);
|
||||||
|
|
||||||
remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
|
remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
|
||||||
if (remaining <= 0)
|
if (remaining <= 0) {
|
||||||
|
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
start_time = ktime_now();
|
start_time = ktime_now();
|
||||||
if (remaining < 100)
|
if (remaining < 100)
|
||||||
|
|
|
@ -1119,6 +1119,7 @@ int inet_sk_rebuild_header(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct flowi fl = {
|
struct flowi fl = {
|
||||||
.oif = sk->sk_bound_dev_if,
|
.oif = sk->sk_bound_dev_if,
|
||||||
|
.mark = sk->sk_mark,
|
||||||
.nl_u = {
|
.nl_u = {
|
||||||
.ip4_u = {
|
.ip4_u = {
|
||||||
.daddr = daddr,
|
.daddr = daddr,
|
||||||
|
|
|
@ -335,6 +335,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
|
||||||
|
|
||||||
{
|
{
|
||||||
struct flowi fl = { .oif = sk->sk_bound_dev_if,
|
struct flowi fl = { .oif = sk->sk_bound_dev_if,
|
||||||
|
.mark = sk->sk_mark,
|
||||||
.nl_u = { .ip4_u =
|
.nl_u = { .ip4_u =
|
||||||
{ .daddr = daddr,
|
{ .daddr = daddr,
|
||||||
.saddr = inet->saddr,
|
.saddr = inet->saddr,
|
||||||
|
|
|
@ -580,7 +580,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
|
||||||
|
|
||||||
lock_sock(sk);
|
lock_sock(sk);
|
||||||
|
|
||||||
timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
|
timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
|
||||||
while (tss.len) {
|
while (tss.len) {
|
||||||
ret = __tcp_splice_read(sk, &tss);
|
ret = __tcp_splice_read(sk, &tss);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
@ -2047,7 +2047,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
val = strncpy_from_user(name, optval,
|
val = strncpy_from_user(name, optval,
|
||||||
min(TCP_CA_NAME_MAX-1, optlen));
|
min_t(long, TCP_CA_NAME_MAX-1, optlen));
|
||||||
if (val < 0)
|
if (val < 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
name[val] = 0;
|
name[val] = 0;
|
||||||
|
|
|
@ -361,6 +361,7 @@ static inline int tcp_urg_mode(const struct tcp_sock *tp)
|
||||||
#define OPTION_SACK_ADVERTISE (1 << 0)
|
#define OPTION_SACK_ADVERTISE (1 << 0)
|
||||||
#define OPTION_TS (1 << 1)
|
#define OPTION_TS (1 << 1)
|
||||||
#define OPTION_MD5 (1 << 2)
|
#define OPTION_MD5 (1 << 2)
|
||||||
|
#define OPTION_WSCALE (1 << 3)
|
||||||
|
|
||||||
struct tcp_out_options {
|
struct tcp_out_options {
|
||||||
u8 options; /* bit field of OPTION_* */
|
u8 options; /* bit field of OPTION_* */
|
||||||
|
@ -427,7 +428,7 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
|
||||||
TCPOLEN_SACK_PERM);
|
TCPOLEN_SACK_PERM);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(opts->ws)) {
|
if (unlikely(OPTION_WSCALE & opts->options)) {
|
||||||
*ptr++ = htonl((TCPOPT_NOP << 24) |
|
*ptr++ = htonl((TCPOPT_NOP << 24) |
|
||||||
(TCPOPT_WINDOW << 16) |
|
(TCPOPT_WINDOW << 16) |
|
||||||
(TCPOLEN_WINDOW << 8) |
|
(TCPOLEN_WINDOW << 8) |
|
||||||
|
@ -494,8 +495,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
if (likely(sysctl_tcp_window_scaling)) {
|
if (likely(sysctl_tcp_window_scaling)) {
|
||||||
opts->ws = tp->rx_opt.rcv_wscale;
|
opts->ws = tp->rx_opt.rcv_wscale;
|
||||||
if (likely(opts->ws))
|
opts->options |= OPTION_WSCALE;
|
||||||
size += TCPOLEN_WSCALE_ALIGNED;
|
size += TCPOLEN_WSCALE_ALIGNED;
|
||||||
}
|
}
|
||||||
if (likely(sysctl_tcp_sack)) {
|
if (likely(sysctl_tcp_sack)) {
|
||||||
opts->options |= OPTION_SACK_ADVERTISE;
|
opts->options |= OPTION_SACK_ADVERTISE;
|
||||||
|
@ -537,8 +538,8 @@ static unsigned tcp_synack_options(struct sock *sk,
|
||||||
|
|
||||||
if (likely(ireq->wscale_ok)) {
|
if (likely(ireq->wscale_ok)) {
|
||||||
opts->ws = ireq->rcv_wscale;
|
opts->ws = ireq->rcv_wscale;
|
||||||
if (likely(opts->ws))
|
opts->options |= OPTION_WSCALE;
|
||||||
size += TCPOLEN_WSCALE_ALIGNED;
|
size += TCPOLEN_WSCALE_ALIGNED;
|
||||||
}
|
}
|
||||||
if (likely(doing_ts)) {
|
if (likely(doing_ts)) {
|
||||||
opts->options |= OPTION_TS;
|
opts->options |= OPTION_TS;
|
||||||
|
|
|
@ -696,6 +696,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||||
|
|
||||||
if (rt == NULL) {
|
if (rt == NULL) {
|
||||||
struct flowi fl = { .oif = ipc.oif,
|
struct flowi fl = { .oif = ipc.oif,
|
||||||
|
.mark = sk->sk_mark,
|
||||||
.nl_u = { .ip4_u =
|
.nl_u = { .ip4_u =
|
||||||
{ .daddr = faddr,
|
{ .daddr = faddr,
|
||||||
.saddr = saddr,
|
.saddr = saddr,
|
||||||
|
|
|
@ -367,7 +367,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
|
||||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
|
||||||
u32 staflags;
|
u32 staflags;
|
||||||
|
|
||||||
if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)))
|
if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)
|
||||||
|
|| ieee80211_is_auth(hdr->frame_control)
|
||||||
|
|| ieee80211_is_assoc_resp(hdr->frame_control)
|
||||||
|
|| ieee80211_is_reassoc_resp(hdr->frame_control)))
|
||||||
return TX_CONTINUE;
|
return TX_CONTINUE;
|
||||||
|
|
||||||
staflags = get_sta_flags(sta);
|
staflags = get_sta_flags(sta);
|
||||||
|
|
Loading…
Reference in a new issue