mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (31 commits) [BRIDGE]: Fix crash in __ip_route_output_key with bridge netfilter [NETFILTER]: ipt_CLUSTERIP: fix race between clusterip_config_find_get and _entry_put [IPV6] ADDRCONF: Don't generate temporary address for ip6-ip6 interface. [IPV6] ADDRCONF: Ensure disabling multicast RS even if privacy extensions are disabled. [IPV6]: Use appropriate sock tclass setting for routing lookup. [IPV6]: IPv6 extension header structures need to be packed. [IPV6]: Fix ipv6 address fetching in raw6_icmp_error(). [NET]: Return more appropriate error from eth_validate_addr(). [ISDN]: Do not validate ISDN net device address prior to interface-up [NET]: Fix kernel-doc for skb_segment [SOCK] sk_stamp: should be initialized to ktime_set(-1L, 0) net: check for underlength tap writes net: make struct tun_struct private to tun.c [SCTP]: IPv4 vs IPv6 addresses mess in sctp_inet[6]addr_event. [SCTP]: Fix compiler warning about const qualifiers [SCTP]: Fix protocol violation when receiving an error lenght INIT-ACK [SCTP]: Add check for hmac_algo parameter in sctp_verify_param() [NET_SCHED] cls_u32: refcounting fix for u32_delete() [DCCP]: Fix skb->cb conflicts with IP [AX25]: Potential ax25_uid_assoc-s leaks on module unload. ...
This commit is contained in:
commit
533bb8a4d7
40 changed files with 250 additions and 116 deletions
|
@ -2010,6 +2010,7 @@ isdn_net_init(struct net_device *ndev)
|
|||
ndev->flags = IFF_NOARP|IFF_POINTOPOINT;
|
||||
ndev->type = ARPHRD_ETHER;
|
||||
ndev->addr_len = ETH_ALEN;
|
||||
ndev->validate_addr = NULL;
|
||||
|
||||
/* for clients with MPPP maybe higher values better */
|
||||
ndev->tx_queue_len = 30;
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#include <asm/io.h>
|
||||
|
||||
#define DRV_NAME "ehea"
|
||||
#define DRV_VERSION "EHEA_0089"
|
||||
#define DRV_VERSION "EHEA_0090"
|
||||
|
||||
/* eHEA capability flags */
|
||||
#define DLPAR_PORT_ADD_REM 1
|
||||
|
@ -371,6 +371,7 @@ struct ehea_port_res {
|
|||
struct ehea_q_skb_arr rq2_skba;
|
||||
struct ehea_q_skb_arr rq3_skba;
|
||||
struct ehea_q_skb_arr sq_skba;
|
||||
int sq_skba_size;
|
||||
spinlock_t netif_queue;
|
||||
int queue_stopped;
|
||||
int swqe_refill_th;
|
||||
|
|
|
@ -349,7 +349,8 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
|
|||
pr->rq1_skba.os_skbs = 0;
|
||||
|
||||
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
|
||||
pr->rq1_skba.index = index;
|
||||
if (nr_of_wqes > 0)
|
||||
pr->rq1_skba.index = index;
|
||||
pr->rq1_skba.os_skbs = fill_wqes;
|
||||
return;
|
||||
}
|
||||
|
@ -1464,7 +1465,9 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
|
|||
init_attr->act_nr_rwqes_rq2,
|
||||
init_attr->act_nr_rwqes_rq3);
|
||||
|
||||
ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
|
||||
pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
|
||||
|
||||
ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
|
||||
ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
|
||||
ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
|
||||
ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
|
||||
|
@ -2621,6 +2624,22 @@ void ehea_purge_sq(struct ehea_qp *orig_qp)
|
|||
}
|
||||
}
|
||||
|
||||
void ehea_flush_sq(struct ehea_port *port)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
|
||||
struct ehea_port_res *pr = &port->port_res[i];
|
||||
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
|
||||
int k = 0;
|
||||
while (atomic_read(&pr->swqe_avail) < swqe_max) {
|
||||
msleep(5);
|
||||
if (++k == 20)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int ehea_stop_qps(struct net_device *dev)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
|
@ -2845,6 +2864,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
|
|||
if (dev->flags & IFF_UP) {
|
||||
down(&port->port_lock);
|
||||
netif_stop_queue(dev);
|
||||
ehea_flush_sq(port);
|
||||
ret = ehea_stop_qps(dev);
|
||||
if (ret) {
|
||||
up(&port->port_lock);
|
||||
|
|
|
@ -5316,8 +5316,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
|||
|
||||
/* check the workaround bit for correct mac address order */
|
||||
txreg = readl(base + NvRegTransmitPoll);
|
||||
if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) ||
|
||||
(id->driver_data & DEV_HAS_CORRECT_MACADDR)) {
|
||||
if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
|
||||
/* mac address is already in correct order */
|
||||
dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
|
||||
dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
|
||||
|
@ -5325,6 +5324,22 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
|||
dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
|
||||
dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
|
||||
dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
|
||||
} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
|
||||
/* mac address is already in correct order */
|
||||
dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
|
||||
dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
|
||||
dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
|
||||
dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
|
||||
dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
|
||||
dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
|
||||
/*
|
||||
* Set orig mac address back to the reversed version.
|
||||
* This flag will be cleared during low power transition.
|
||||
* Therefore, we should always put back the reversed address.
|
||||
*/
|
||||
np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
|
||||
(dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
|
||||
np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
|
||||
} else {
|
||||
/* need to reverse mac address to correct order */
|
||||
dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
|
||||
|
@ -5595,7 +5610,9 @@ out:
|
|||
static int nv_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
u8 __iomem *base = get_hwbase(dev);
|
||||
int rc = 0;
|
||||
u32 txreg;
|
||||
|
||||
if (!netif_running(dev))
|
||||
goto out;
|
||||
|
@ -5606,6 +5623,11 @@ static int nv_resume(struct pci_dev *pdev)
|
|||
pci_restore_state(pdev);
|
||||
pci_enable_wake(pdev, PCI_D0, 0);
|
||||
|
||||
/* restore mac address reverse flag */
|
||||
txreg = readl(base + NvRegTransmitPoll);
|
||||
txreg |= NVREG_TRANSMITPOLL_MAC_ADDR_REV;
|
||||
writel(txreg, base + NvRegTransmitPoll);
|
||||
|
||||
rc = nv_open(dev);
|
||||
out:
|
||||
return rc;
|
||||
|
|
|
@ -835,7 +835,8 @@ static int fs_enet_close(struct net_device *dev)
|
|||
|
||||
netif_stop_queue(dev);
|
||||
netif_carrier_off(dev);
|
||||
napi_disable(&fep->napi);
|
||||
if (fep->fpi->use_napi)
|
||||
napi_disable(&fep->napi);
|
||||
phy_stop(fep->phydev);
|
||||
|
||||
spin_lock_irqsave(&fep->lock, flags);
|
||||
|
|
|
@ -242,12 +242,12 @@ static int macb_mii_init(struct macb *bp)
|
|||
/* Enable managment port */
|
||||
macb_writel(bp, NCR, MACB_BIT(MPE));
|
||||
|
||||
bp->mii_bus.name = "MACB_mii_bus",
|
||||
bp->mii_bus.read = &macb_mdio_read,
|
||||
bp->mii_bus.write = &macb_mdio_write,
|
||||
bp->mii_bus.reset = &macb_mdio_reset,
|
||||
bp->mii_bus.id = bp->pdev->id,
|
||||
bp->mii_bus.priv = bp,
|
||||
bp->mii_bus.name = "MACB_mii_bus";
|
||||
bp->mii_bus.read = &macb_mdio_read;
|
||||
bp->mii_bus.write = &macb_mdio_write;
|
||||
bp->mii_bus.reset = &macb_mdio_reset;
|
||||
bp->mii_bus.id = bp->pdev->id;
|
||||
bp->mii_bus.priv = bp;
|
||||
bp->mii_bus.dev = &bp->dev->dev;
|
||||
pdata = bp->pdev->dev.platform_data;
|
||||
|
||||
|
@ -1257,6 +1257,8 @@ static int __exit macb_remove(struct platform_device *pdev)
|
|||
|
||||
if (dev) {
|
||||
bp = netdev_priv(dev);
|
||||
if (bp->phy_dev)
|
||||
phy_disconnect(bp->phy_dev);
|
||||
mdiobus_unregister(&bp->mii_bus);
|
||||
kfree(bp->mii_bus.irq);
|
||||
unregister_netdev(dev);
|
||||
|
|
|
@ -1446,6 +1446,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
|
||||
#if SC92031_USE_BAR == 0
|
||||
dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
|
||||
|
|
|
@ -154,6 +154,7 @@ static const char *yukon2_name[] = {
|
|||
"EC", /* 0xb6 */
|
||||
"FE", /* 0xb7 */
|
||||
"FE+", /* 0xb8 */
|
||||
"Supreme", /* 0xb9 */
|
||||
};
|
||||
|
||||
static void sky2_set_multicast(struct net_device *dev);
|
||||
|
|
|
@ -1326,9 +1326,11 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
|
|||
SMC_SET_INT_MASK(mask);
|
||||
spin_unlock(&lp->lock);
|
||||
|
||||
#ifndef CONFIG_NET_POLL_CONTROLLER
|
||||
if (timeout == MAX_IRQ_LOOPS)
|
||||
PRINTK("%s: spurious interrupt (mask = 0x%02x)\n",
|
||||
dev->name, mask);
|
||||
#endif
|
||||
DBG(3, "%s: Interrupt done (%d loops)\n",
|
||||
dev->name, MAX_IRQ_LOOPS - timeout);
|
||||
|
||||
|
|
|
@ -64,8 +64,8 @@
|
|||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "3.89"
|
||||
#define DRV_MODULE_RELDATE "April 03, 2008"
|
||||
#define DRV_MODULE_VERSION "3.90"
|
||||
#define DRV_MODULE_RELDATE "April 12, 2008"
|
||||
|
||||
#define TG3_DEF_MAC_MODE 0
|
||||
#define TG3_DEF_RX_MODE 0
|
||||
|
@ -12578,7 +12578,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
|
|||
const struct pci_device_id *ent)
|
||||
{
|
||||
static int tg3_version_printed = 0;
|
||||
unsigned long tg3reg_base, tg3reg_len;
|
||||
resource_size_t tg3reg_base;
|
||||
unsigned long tg3reg_len;
|
||||
struct net_device *dev;
|
||||
struct tg3 *tp;
|
||||
int err, pm_cap;
|
||||
|
|
|
@ -67,10 +67,43 @@
|
|||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* Uncomment to enable debugging */
|
||||
/* #define TUN_DEBUG 1 */
|
||||
|
||||
#ifdef TUN_DEBUG
|
||||
static int debug;
|
||||
|
||||
#define DBG if(tun->debug)printk
|
||||
#define DBG1 if(debug==2)printk
|
||||
#else
|
||||
#define DBG( a... )
|
||||
#define DBG1( a... )
|
||||
#endif
|
||||
|
||||
struct tun_struct {
|
||||
struct list_head list;
|
||||
unsigned long flags;
|
||||
int attached;
|
||||
uid_t owner;
|
||||
gid_t group;
|
||||
|
||||
wait_queue_head_t read_wait;
|
||||
struct sk_buff_head readq;
|
||||
|
||||
struct net_device *dev;
|
||||
|
||||
struct fasync_struct *fasync;
|
||||
|
||||
unsigned long if_flags;
|
||||
u8 dev_addr[ETH_ALEN];
|
||||
u32 chr_filter[2];
|
||||
u32 net_filter[2];
|
||||
|
||||
#ifdef TUN_DEBUG
|
||||
int debug;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Network device part of the driver */
|
||||
|
||||
static LIST_HEAD(tun_dev_list);
|
||||
|
@ -253,8 +286,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV)
|
||||
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
|
||||
align = NET_IP_ALIGN;
|
||||
if (unlikely(len < ETH_HLEN))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(skb = alloc_skb(len + align, GFP_KERNEL))) {
|
||||
tun->dev->stats.rx_dropped++;
|
||||
|
|
|
@ -3833,6 +3833,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
|
|||
struct device_node *phy;
|
||||
int err, ucc_num, max_speed = 0;
|
||||
const phandle *ph;
|
||||
const u32 *fixed_link;
|
||||
const unsigned int *prop;
|
||||
const char *sprop;
|
||||
const void *mac_addr;
|
||||
|
@ -3923,18 +3924,38 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
|
|||
|
||||
ug_info->uf_info.regs = res.start;
|
||||
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
|
||||
fixed_link = of_get_property(np, "fixed-link", NULL);
|
||||
if (fixed_link) {
|
||||
ug_info->mdio_bus = 0;
|
||||
ug_info->phy_address = fixed_link[0];
|
||||
phy = NULL;
|
||||
} else {
|
||||
ph = of_get_property(np, "phy-handle", NULL);
|
||||
phy = of_find_node_by_phandle(*ph);
|
||||
|
||||
ph = of_get_property(np, "phy-handle", NULL);
|
||||
phy = of_find_node_by_phandle(*ph);
|
||||
if (phy == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
if (phy == NULL)
|
||||
return -ENODEV;
|
||||
/* set the PHY address */
|
||||
prop = of_get_property(phy, "reg", NULL);
|
||||
if (prop == NULL)
|
||||
return -1;
|
||||
ug_info->phy_address = *prop;
|
||||
|
||||
/* set the PHY address */
|
||||
prop = of_get_property(phy, "reg", NULL);
|
||||
if (prop == NULL)
|
||||
return -1;
|
||||
ug_info->phy_address = *prop;
|
||||
/* Set the bus id */
|
||||
mdio = of_get_parent(phy);
|
||||
|
||||
if (mdio == NULL)
|
||||
return -1;
|
||||
|
||||
err = of_address_to_resource(mdio, 0, &res);
|
||||
of_node_put(mdio);
|
||||
|
||||
if (err)
|
||||
return -1;
|
||||
|
||||
ug_info->mdio_bus = res.start;
|
||||
}
|
||||
|
||||
/* get the phy interface type, or default to MII */
|
||||
prop = of_get_property(np, "phy-connection-type", NULL);
|
||||
|
@ -3979,20 +4000,6 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
|
|||
ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
|
||||
}
|
||||
|
||||
/* Set the bus id */
|
||||
mdio = of_get_parent(phy);
|
||||
|
||||
if (mdio == NULL)
|
||||
return -1;
|
||||
|
||||
err = of_address_to_resource(mdio, 0, &res);
|
||||
of_node_put(mdio);
|
||||
|
||||
if (err)
|
||||
return -1;
|
||||
|
||||
ug_info->mdio_bus = res.start;
|
||||
|
||||
if (netif_msg_probe(&debug))
|
||||
printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
|
||||
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
|
||||
|
|
|
@ -150,9 +150,13 @@ config HDLC_FR
|
|||
|
||||
config HDLC_PPP
|
||||
tristate "Synchronous Point-to-Point Protocol (PPP) support"
|
||||
depends on HDLC
|
||||
depends on HDLC && BROKEN
|
||||
help
|
||||
Generic HDLC driver supporting PPP over WAN connections.
|
||||
This module is currently broken and will cause a kernel panic
|
||||
when a device configured in PPP mode is activated.
|
||||
|
||||
It will be replaced by new PPP implementation in Linux 2.6.26.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
|
|
|
@ -86,6 +86,7 @@ header-y += if_plip.h
|
|||
header-y += if_ppp.h
|
||||
header-y += if_slip.h
|
||||
header-y += if_strip.h
|
||||
header-y += if_tun.h
|
||||
header-y += if_tunnel.h
|
||||
header-y += in6.h
|
||||
header-y += in_route.h
|
||||
|
@ -229,7 +230,6 @@ unifdef-y += if_link.h
|
|||
unifdef-y += if_pppol2tp.h
|
||||
unifdef-y += if_pppox.h
|
||||
unifdef-y += if_tr.h
|
||||
unifdef-y += if_tun.h
|
||||
unifdef-y += if_vlan.h
|
||||
unifdef-y += if_wanpipe.h
|
||||
unifdef-y += igmp.h
|
||||
|
|
|
@ -18,47 +18,8 @@
|
|||
#ifndef __IF_TUN_H
|
||||
#define __IF_TUN_H
|
||||
|
||||
/* Uncomment to enable debugging */
|
||||
/* #define TUN_DEBUG 1 */
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef TUN_DEBUG
|
||||
#define DBG if(tun->debug)printk
|
||||
#define DBG1 if(debug==2)printk
|
||||
#else
|
||||
#define DBG( a... )
|
||||
#define DBG1( a... )
|
||||
#endif
|
||||
|
||||
struct tun_struct {
|
||||
struct list_head list;
|
||||
unsigned long flags;
|
||||
int attached;
|
||||
uid_t owner;
|
||||
gid_t group;
|
||||
|
||||
wait_queue_head_t read_wait;
|
||||
struct sk_buff_head readq;
|
||||
|
||||
struct net_device *dev;
|
||||
|
||||
struct fasync_struct *fasync;
|
||||
|
||||
unsigned long if_flags;
|
||||
u8 dev_addr[ETH_ALEN];
|
||||
u32 chr_filter[2];
|
||||
u32 net_filter[2];
|
||||
|
||||
#ifdef TUN_DEBUG
|
||||
int debug;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/* Read queue size */
|
||||
#define TUN_READQ_SIZE 500
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ struct ipv6_opt_hdr {
|
|||
/*
|
||||
* TLV encoded option data follows.
|
||||
*/
|
||||
};
|
||||
} __attribute__ ((packed)); /* required for some archs */
|
||||
|
||||
#define ipv6_destopt_hdr ipv6_opt_hdr
|
||||
#define ipv6_hopopt_hdr ipv6_opt_hdr
|
||||
|
|
|
@ -47,7 +47,7 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
|
|||
} while (0)
|
||||
|
||||
#define IP6_ECN_flow_xmit(sk, label) do { \
|
||||
if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
|
||||
if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \
|
||||
(label) |= htonl(INET_ECN_ECT_0 << 20); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ typedef enum {
|
|||
SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */
|
||||
SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */
|
||||
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
|
||||
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
|
||||
SCTP_CMD_LAST
|
||||
} sctp_verb_t;
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ struct sctp_ulpevent {
|
|||
};
|
||||
|
||||
/* Retrieve the skb this event sits inside of. */
|
||||
static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev)
|
||||
static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
|
||||
{
|
||||
return container_of((void *)ev, struct sk_buff, cb);
|
||||
}
|
||||
|
|
|
@ -218,9 +218,11 @@ void __exit ax25_uid_free(void)
|
|||
struct hlist_node *node;
|
||||
|
||||
write_lock(&ax25_uid_lock);
|
||||
again:
|
||||
ax25_uid_for_each(ax25_uid, node, &ax25_uid_list) {
|
||||
hlist_del_init(&ax25_uid->uid_node);
|
||||
ax25_uid_put(ax25_uid);
|
||||
goto again;
|
||||
}
|
||||
write_unlock(&ax25_uid_lock);
|
||||
}
|
||||
|
|
|
@ -110,7 +110,8 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
|
|||
* ipt_REJECT needs it. Future netfilter modules might
|
||||
* require us to fill additional fields. */
|
||||
static struct net_device __fake_net_device = {
|
||||
.hard_header_len = ETH_HLEN
|
||||
.hard_header_len = ETH_HLEN,
|
||||
.nd_net = &init_net,
|
||||
};
|
||||
|
||||
static struct rtable __fake_rtable = {
|
||||
|
|
|
@ -2131,8 +2131,8 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
|
|||
* @features: features for the output path (see dev->features)
|
||||
*
|
||||
* This function performs segmentation on the given skb. It returns
|
||||
* the segment at the given position. It returns NULL if there are
|
||||
* no more segments to generate, or when an error is encountered.
|
||||
* a pointer to the first in a list of new skbs for the segments.
|
||||
* In case of error it returns ERR_PTR(err).
|
||||
*/
|
||||
struct sk_buff *skb_segment(struct sk_buff *skb, int features)
|
||||
{
|
||||
|
|
|
@ -1725,7 +1725,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
|||
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
|
||||
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
|
||||
|
||||
sk->sk_stamp = ktime_set(-1L, -1L);
|
||||
sk->sk_stamp = ktime_set(-1L, 0);
|
||||
|
||||
atomic_set(&sk->sk_refcnt, 1);
|
||||
atomic_set(&sk->sk_drops, 0);
|
||||
|
|
|
@ -325,6 +325,12 @@ static inline int dccp_bad_service_code(const struct sock *sk,
|
|||
* This is used for transmission as well as for reception.
|
||||
*/
|
||||
struct dccp_skb_cb {
|
||||
union {
|
||||
struct inet_skb_parm h4;
|
||||
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
struct inet6_skb_parm h6;
|
||||
#endif
|
||||
} header;
|
||||
__u8 dccpd_type:4;
|
||||
__u8 dccpd_ccval:4;
|
||||
__u8 dccpd_reset_code,
|
||||
|
|
|
@ -489,7 +489,6 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
|
|||
|
||||
dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
|
||||
ireq->rmt_addr);
|
||||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
|
||||
ireq->rmt_addr,
|
||||
ireq->opt);
|
||||
|
|
|
@ -126,7 +126,6 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
|
||||
|
||||
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
|
||||
err = icsk->icsk_af_ops->queue_xmit(skb, 0);
|
||||
return net_xmit_eval(err);
|
||||
}
|
||||
|
|
|
@ -1057,6 +1057,9 @@ static int __init dccp_init(void)
|
|||
int ehash_order, bhash_order, i;
|
||||
int rc = -ENOBUFS;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
|
||||
FIELD_SIZEOF(struct sk_buff, cb));
|
||||
|
||||
dccp_hashinfo.bind_bucket_cachep =
|
||||
kmem_cache_create("dccp_bind_bucket",
|
||||
sizeof(struct inet_bind_bucket), 0,
|
||||
|
|
|
@ -301,7 +301,7 @@ static int eth_change_mtu(struct net_device *dev, int new_mtu)
|
|||
static int eth_validate_addr(struct net_device *dev)
|
||||
{
|
||||
if (!is_valid_ether_addr(dev->dev_addr))
|
||||
return -EINVAL;
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -82,8 +82,8 @@ clusterip_config_put(struct clusterip_config *c)
|
|||
static inline void
|
||||
clusterip_config_entry_put(struct clusterip_config *c)
|
||||
{
|
||||
write_lock_bh(&clusterip_lock);
|
||||
if (atomic_dec_and_test(&c->entries)) {
|
||||
write_lock_bh(&clusterip_lock);
|
||||
list_del(&c->list);
|
||||
write_unlock_bh(&clusterip_lock);
|
||||
|
||||
|
@ -96,7 +96,9 @@ clusterip_config_entry_put(struct clusterip_config *c)
|
|||
#ifdef CONFIG_PROC_FS
|
||||
remove_proc_entry(c->pde->name, c->pde->parent);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
write_unlock_bh(&clusterip_lock);
|
||||
}
|
||||
|
||||
static struct clusterip_config *
|
||||
|
|
|
@ -371,25 +371,26 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
|
|||
*/
|
||||
in6_dev_hold(ndev);
|
||||
|
||||
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
|
||||
if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
|
||||
printk(KERN_INFO
|
||||
"%s: Disabled Multicast RS\n",
|
||||
dev->name);
|
||||
ndev->cnf.rtr_solicits = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IPV6_PRIVACY
|
||||
setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
|
||||
if ((dev->flags&IFF_LOOPBACK) ||
|
||||
dev->type == ARPHRD_TUNNEL ||
|
||||
#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
|
||||
dev->type == ARPHRD_TUNNEL6 ||
|
||||
dev->type == ARPHRD_SIT ||
|
||||
#endif
|
||||
dev->type == ARPHRD_NONE) {
|
||||
printk(KERN_INFO
|
||||
"%s: Disabled Privacy Extensions\n",
|
||||
dev->name);
|
||||
ndev->cnf.use_tempaddr = -1;
|
||||
|
||||
if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
|
||||
printk(KERN_INFO
|
||||
"%s: Disabled Multicast RS\n",
|
||||
dev->name);
|
||||
ndev->cnf.rtr_solicits = 0;
|
||||
}
|
||||
} else {
|
||||
in6_dev_hold(ndev);
|
||||
ipv6_regen_rndid((unsigned long) ndev);
|
||||
|
|
|
@ -372,8 +372,10 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
|
|||
read_lock(&raw_v6_hashinfo.lock);
|
||||
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
|
||||
if (sk != NULL) {
|
||||
saddr = &ipv6_hdr(skb)->saddr;
|
||||
daddr = &ipv6_hdr(skb)->daddr;
|
||||
struct ipv6hdr *hdr = (struct ipv6hdr *) skb->data;
|
||||
|
||||
saddr = &hdr->saddr;
|
||||
daddr = &hdr->daddr;
|
||||
net = skb->dev->nd_net;
|
||||
|
||||
while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
|
||||
|
|
|
@ -411,8 +411,10 @@ static void u32_destroy(struct tcf_proto *tp)
|
|||
}
|
||||
}
|
||||
|
||||
for (ht=tp_c->hlist; ht; ht = ht->next)
|
||||
for (ht = tp_c->hlist; ht; ht = ht->next) {
|
||||
ht->refcnt--;
|
||||
u32_clear_hnode(tp, ht);
|
||||
}
|
||||
|
||||
while ((ht = tp_c->hlist) != NULL) {
|
||||
tp_c->hlist = ht->next;
|
||||
|
@ -441,8 +443,12 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
|
|||
if (tp->root == ht)
|
||||
return -EINVAL;
|
||||
|
||||
if (--ht->refcnt == 0)
|
||||
if (ht->refcnt == 1) {
|
||||
ht->refcnt--;
|
||||
u32_destroy_hnode(tp, ht);
|
||||
} else {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -568,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
|
|||
if (ht == NULL)
|
||||
return -ENOBUFS;
|
||||
ht->tp_c = tp_c;
|
||||
ht->refcnt = 0;
|
||||
ht->refcnt = 1;
|
||||
ht->divisor = divisor;
|
||||
ht->handle = handle;
|
||||
ht->prio = tp->prio;
|
||||
|
|
|
@ -110,8 +110,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
|
|||
spin_lock_bh(&sctp_local_addr_lock);
|
||||
list_for_each_entry_safe(addr, temp,
|
||||
&sctp_local_addr_list, list) {
|
||||
if (ipv6_addr_equal(&addr->a.v6.sin6_addr,
|
||||
&ifa->addr)) {
|
||||
if (addr->a.sa.sa_family == AF_INET6 &&
|
||||
ipv6_addr_equal(&addr->a.v6.sin6_addr,
|
||||
&ifa->addr)) {
|
||||
found = 1;
|
||||
addr->valid = 0;
|
||||
list_del_rcu(&addr->list);
|
||||
|
|
|
@ -793,6 +793,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
|
|||
break;
|
||||
|
||||
case SCTP_CID_ABORT:
|
||||
if (sctp_test_T_bit(chunk)) {
|
||||
packet->vtag = asoc->c.my_vtag;
|
||||
}
|
||||
case SCTP_CID_SACK:
|
||||
case SCTP_CID_HEARTBEAT:
|
||||
case SCTP_CID_HEARTBEAT_ACK:
|
||||
|
|
|
@ -647,7 +647,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
|
|||
spin_lock_bh(&sctp_local_addr_lock);
|
||||
list_for_each_entry_safe(addr, temp,
|
||||
&sctp_local_addr_list, list) {
|
||||
if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) {
|
||||
if (addr->a.sa.sa_family == AF_INET &&
|
||||
addr->a.v4.sin_addr.s_addr ==
|
||||
ifa->ifa_local) {
|
||||
found = 1;
|
||||
addr->valid = 0;
|
||||
list_del_rcu(&addr->list);
|
||||
|
|
|
@ -1982,7 +1982,10 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
|
|||
struct sctp_chunk *chunk,
|
||||
struct sctp_chunk **err_chunk)
|
||||
{
|
||||
struct sctp_hmac_algo_param *hmacs;
|
||||
int retval = SCTP_IERROR_NO_ERROR;
|
||||
__u16 n_elt, id = 0;
|
||||
int i;
|
||||
|
||||
/* FIXME - This routine is not looking at each parameter per the
|
||||
* chunk type, i.e., unrecognized parameters should be further
|
||||
|
@ -2056,9 +2059,29 @@ static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
|
|||
break;
|
||||
|
||||
case SCTP_PARAM_HMAC_ALGO:
|
||||
if (sctp_auth_enable)
|
||||
break;
|
||||
/* Fall Through */
|
||||
if (!sctp_auth_enable)
|
||||
goto fallthrough;
|
||||
|
||||
hmacs = (struct sctp_hmac_algo_param *)param.p;
|
||||
n_elt = (ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) >> 1;
|
||||
|
||||
/* SCTP-AUTH: Section 6.1
|
||||
* The HMAC algorithm based on SHA-1 MUST be supported and
|
||||
* included in the HMAC-ALGO parameter.
|
||||
*/
|
||||
for (i = 0; i < n_elt; i++) {
|
||||
id = ntohs(hmacs->hmac_ids[i]);
|
||||
|
||||
if (id == SCTP_AUTH_HMAC_ID_SHA1)
|
||||
break;
|
||||
}
|
||||
|
||||
if (id != SCTP_AUTH_HMAC_ID_SHA1) {
|
||||
sctp_process_inv_paramlength(asoc, param.p, chunk,
|
||||
err_chunk);
|
||||
retval = SCTP_IERROR_ABORT;
|
||||
}
|
||||
break;
|
||||
fallthrough:
|
||||
default:
|
||||
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
|
||||
|
|
|
@ -1536,6 +1536,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
|
|||
error = sctp_auth_asoc_init_active_key(asoc,
|
||||
GFP_ATOMIC);
|
||||
break;
|
||||
case SCTP_CMD_UPDATE_INITTAG:
|
||||
asoc->peer.i.init_tag = cmd->obj.u32;
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_WARNING "Impossible command: %u, %p\n",
|
||||
|
|
|
@ -4144,6 +4144,24 @@ static sctp_disposition_t sctp_sf_abort_violation(
|
|||
goto nomem;
|
||||
|
||||
if (asoc) {
|
||||
/* Treat INIT-ACK as a special case during COOKIE-WAIT. */
|
||||
if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK &&
|
||||
!asoc->peer.i.init_tag) {
|
||||
sctp_initack_chunk_t *initack;
|
||||
|
||||
initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
|
||||
if (!sctp_chunk_length_valid(chunk,
|
||||
sizeof(sctp_initack_chunk_t)))
|
||||
abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T;
|
||||
else {
|
||||
unsigned int inittag;
|
||||
|
||||
inittag = ntohl(initack->init_hdr.init_tag);
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG,
|
||||
SCTP_U32(inittag));
|
||||
}
|
||||
}
|
||||
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
|
||||
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
|
||||
|
||||
|
@ -4349,6 +4367,7 @@ sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep,
|
|||
sctp_cmd_seq_t *commands)
|
||||
{
|
||||
struct sctp_chunk *repl;
|
||||
struct sctp_association* my_asoc;
|
||||
|
||||
/* The comment below says that we enter COOKIE-WAIT AFTER
|
||||
* sending the INIT, but that doesn't actually work in our
|
||||
|
@ -4372,8 +4391,8 @@ sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep,
|
|||
/* Cast away the const modifier, as we want to just
|
||||
* rerun it through as a sideffect.
|
||||
*/
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC,
|
||||
SCTP_ASOC((struct sctp_association *) asoc));
|
||||
my_asoc = (struct sctp_association *)asoc;
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc));
|
||||
|
||||
/* Choose transport for INIT. */
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
|
||||
|
|
|
@ -5868,11 +5868,12 @@ SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg,
|
|||
sctp_cmsgs_t *cmsgs)
|
||||
{
|
||||
struct cmsghdr *cmsg;
|
||||
struct msghdr *my_msg = (struct msghdr *)msg;
|
||||
|
||||
for (cmsg = CMSG_FIRSTHDR(msg);
|
||||
cmsg != NULL;
|
||||
cmsg = CMSG_NXTHDR((struct msghdr*)msg, cmsg)) {
|
||||
if (!CMSG_OK(msg, cmsg))
|
||||
cmsg = CMSG_NXTHDR(my_msg, cmsg)) {
|
||||
if (!CMSG_OK(my_msg, cmsg))
|
||||
return -EINVAL;
|
||||
|
||||
/* Should we parse this header or ignore? */
|
||||
|
|
|
@ -859,7 +859,7 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
|
|||
union sctp_notification *notification;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = sctp_event2skb((struct sctp_ulpevent *)event);
|
||||
skb = sctp_event2skb(event);
|
||||
notification = (union sctp_notification *) skb->data;
|
||||
return notification->sn_header.sn_type;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue