mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (66 commits) be2net: fix some cmds to use mccq instead of mbox atl1e: fix 2.6.31-git4 -- ATL1E 0000:03:00.0: DMA-API: device driver frees DMA pkt_sched: Fix qstats.qlen updating in dump_stats ipv6: Log the affected address when DAD failure occurs wl12xx: Fix print_mac() conversion. af_iucv: fix race when queueing skbs on the backlog queue af_iucv: do not call iucv_sock_kill() twice af_iucv: handle non-accepted sockets after resuming from suspend af_iucv: fix race in __iucv_sock_wait() iucv: use correct output register in iucv_query_maxconn() iucv: fix iucv_buffer_cpumask check when calling IUCV functions iucv: suspend/resume error msg for left over pathes wl12xx: switch to %pM to print the mac address b44: the poll handler b44_poll must not enable IRQ unconditionally ipv6: Ignore route option with ROUTER_PREF_INVALID bonding: make ab_arp select active slaves as other modes cfg80211: fix SME connect rc80211_minstrel: fix contention window calculation ssb/sdio: fix printk format warnings p54usb: add Zcomax XG-705A usbid ...
This commit is contained in:
commit
f205ce83a7
132 changed files with 2012 additions and 808 deletions
|
@ -377,10 +377,19 @@ struct atl1e_hw {
|
|||
*/
|
||||
struct atl1e_tx_buffer {
|
||||
struct sk_buff *skb;
|
||||
u16 flags;
|
||||
#define ATL1E_TX_PCIMAP_SINGLE 0x0001
|
||||
#define ATL1E_TX_PCIMAP_PAGE 0x0002
|
||||
#define ATL1E_TX_PCIMAP_TYPE_MASK 0x0003
|
||||
u16 length;
|
||||
dma_addr_t dma;
|
||||
};
|
||||
|
||||
#define ATL1E_SET_PCIMAP_TYPE(tx_buff, type) do { \
|
||||
((tx_buff)->flags) &= ~ATL1E_TX_PCIMAP_TYPE_MASK; \
|
||||
((tx_buff)->flags) |= (type); \
|
||||
} while (0)
|
||||
|
||||
struct atl1e_rx_page {
|
||||
dma_addr_t dma; /* receive rage DMA address */
|
||||
u8 *addr; /* receive rage virtual address */
|
||||
|
|
|
@ -635,7 +635,11 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
|
|||
for (index = 0; index < ring_count; index++) {
|
||||
tx_buffer = &tx_ring->tx_buffer[index];
|
||||
if (tx_buffer->dma) {
|
||||
pci_unmap_page(pdev, tx_buffer->dma,
|
||||
if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
|
||||
pci_unmap_single(pdev, tx_buffer->dma,
|
||||
tx_buffer->length, PCI_DMA_TODEVICE);
|
||||
else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
|
||||
pci_unmap_page(pdev, tx_buffer->dma,
|
||||
tx_buffer->length, PCI_DMA_TODEVICE);
|
||||
tx_buffer->dma = 0;
|
||||
}
|
||||
|
@ -1220,7 +1224,11 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
|
|||
while (next_to_clean != hw_next_to_clean) {
|
||||
tx_buffer = &tx_ring->tx_buffer[next_to_clean];
|
||||
if (tx_buffer->dma) {
|
||||
pci_unmap_page(adapter->pdev, tx_buffer->dma,
|
||||
if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
|
||||
pci_unmap_single(adapter->pdev, tx_buffer->dma,
|
||||
tx_buffer->length, PCI_DMA_TODEVICE);
|
||||
else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
|
||||
pci_unmap_page(adapter->pdev, tx_buffer->dma,
|
||||
tx_buffer->length, PCI_DMA_TODEVICE);
|
||||
tx_buffer->dma = 0;
|
||||
}
|
||||
|
@ -1741,6 +1749,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
|
|||
tx_buffer->length = map_len;
|
||||
tx_buffer->dma = pci_map_single(adapter->pdev,
|
||||
skb->data, hdr_len, PCI_DMA_TODEVICE);
|
||||
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
|
||||
mapped_len += map_len;
|
||||
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
|
||||
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
|
||||
|
@ -1766,6 +1775,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
|
|||
tx_buffer->dma =
|
||||
pci_map_single(adapter->pdev, skb->data + mapped_len,
|
||||
map_len, PCI_DMA_TODEVICE);
|
||||
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
|
||||
mapped_len += map_len;
|
||||
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
|
||||
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
|
||||
|
@ -1801,6 +1811,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
|
|||
(i * MAX_TX_BUF_LEN),
|
||||
tx_buffer->length,
|
||||
PCI_DMA_TODEVICE);
|
||||
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
|
||||
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
|
||||
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
|
||||
((cpu_to_le32(tx_buffer->length) &
|
||||
|
|
|
@ -847,23 +847,22 @@ static int b44_poll(struct napi_struct *napi, int budget)
|
|||
{
|
||||
struct b44 *bp = container_of(napi, struct b44, napi);
|
||||
int work_done;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(&bp->lock);
|
||||
spin_lock_irqsave(&bp->lock, flags);
|
||||
|
||||
if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
|
||||
/* spin_lock(&bp->tx_lock); */
|
||||
b44_tx(bp);
|
||||
/* spin_unlock(&bp->tx_lock); */
|
||||
}
|
||||
spin_unlock_irq(&bp->lock);
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
|
||||
work_done = 0;
|
||||
if (bp->istat & ISTAT_RX)
|
||||
work_done += b44_rx(bp, budget);
|
||||
|
||||
if (bp->istat & ISTAT_ERRORS) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bp->lock, flags);
|
||||
b44_halt(bp);
|
||||
b44_init_rings(bp);
|
||||
|
|
|
@ -362,5 +362,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
|
|||
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
|
||||
u16 num_popped);
|
||||
extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
|
||||
extern void netdev_stats_update(struct be_adapter *adapter);
|
||||
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
|
||||
#endif /* BE_H */
|
||||
|
|
|
@ -59,15 +59,22 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
|
|||
|
||||
compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
|
||||
CQE_STATUS_COMPL_MASK;
|
||||
if (compl_status != MCC_STATUS_SUCCESS) {
|
||||
if (compl_status == MCC_STATUS_SUCCESS) {
|
||||
if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
|
||||
struct be_cmd_resp_get_stats *resp =
|
||||
adapter->stats.cmd.va;
|
||||
be_dws_le_to_cpu(&resp->hw_stats,
|
||||
sizeof(resp->hw_stats));
|
||||
netdev_stats_update(adapter);
|
||||
}
|
||||
} else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
|
||||
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
|
||||
CQE_STATUS_EXTD_MASK;
|
||||
dev_warn(&adapter->pdev->dev,
|
||||
"Error in cmd completion: status(compl/extd)=%d/%d\n",
|
||||
compl_status, extd_status);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
return compl_status;
|
||||
}
|
||||
|
||||
/* Link state evt is a string of bytes; no need for endian swapping */
|
||||
|
@ -97,10 +104,10 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void be_process_mcc(struct be_adapter *adapter)
|
||||
int be_process_mcc(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_mcc_compl *compl;
|
||||
int num = 0;
|
||||
int num = 0, status = 0;
|
||||
|
||||
spin_lock_bh(&adapter->mcc_cq_lock);
|
||||
while ((compl = be_mcc_compl_get(adapter))) {
|
||||
|
@ -111,38 +118,47 @@ void be_process_mcc(struct be_adapter *adapter)
|
|||
/* Interpret compl as a async link evt */
|
||||
be_async_link_state_process(adapter,
|
||||
(struct be_async_event_link_state *) compl);
|
||||
} else {
|
||||
be_mcc_compl_process(adapter, compl);
|
||||
atomic_dec(&adapter->mcc_obj.q.used);
|
||||
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
|
||||
status = be_mcc_compl_process(adapter, compl);
|
||||
atomic_dec(&adapter->mcc_obj.q.used);
|
||||
}
|
||||
be_mcc_compl_use(compl);
|
||||
num++;
|
||||
}
|
||||
|
||||
if (num)
|
||||
be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
|
||||
|
||||
spin_unlock_bh(&adapter->mcc_cq_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Wait till no more pending mcc requests are present */
|
||||
static void be_mcc_wait_compl(struct be_adapter *adapter)
|
||||
static int be_mcc_wait_compl(struct be_adapter *adapter)
|
||||
{
|
||||
#define mcc_timeout 50000 /* 5s timeout */
|
||||
int i;
|
||||
#define mcc_timeout 120000 /* 12s timeout */
|
||||
int i, status;
|
||||
for (i = 0; i < mcc_timeout; i++) {
|
||||
be_process_mcc(adapter);
|
||||
status = be_process_mcc(adapter);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
if (atomic_read(&adapter->mcc_obj.q.used) == 0)
|
||||
break;
|
||||
udelay(100);
|
||||
}
|
||||
if (i == mcc_timeout)
|
||||
if (i == mcc_timeout) {
|
||||
dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Notify MCC requests and wait for completion */
|
||||
static void be_mcc_notify_wait(struct be_adapter *adapter)
|
||||
static int be_mcc_notify_wait(struct be_adapter *adapter)
|
||||
{
|
||||
be_mcc_notify(adapter);
|
||||
be_mcc_wait_compl(adapter);
|
||||
return be_mcc_wait_compl(adapter);
|
||||
}
|
||||
|
||||
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
|
||||
|
@ -173,7 +189,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
|
|||
* Insert the mailbox address into the doorbell in two steps
|
||||
* Polls on the mbox doorbell till a command completion (or a timeout) occurs
|
||||
*/
|
||||
static int be_mbox_notify(struct be_adapter *adapter)
|
||||
static int be_mbox_notify_wait(struct be_adapter *adapter)
|
||||
{
|
||||
int status;
|
||||
u32 val = 0;
|
||||
|
@ -182,8 +198,6 @@ static int be_mbox_notify(struct be_adapter *adapter)
|
|||
struct be_mcc_mailbox *mbox = mbox_mem->va;
|
||||
struct be_mcc_compl *compl = &mbox->compl;
|
||||
|
||||
memset(compl, 0, sizeof(*compl));
|
||||
|
||||
val |= MPU_MAILBOX_DB_HI_MASK;
|
||||
/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
|
||||
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
|
||||
|
@ -310,34 +324,40 @@ static u32 eq_delay_to_mult(u32 usec_delay)
|
|||
return multiplier;
|
||||
}
|
||||
|
||||
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
|
||||
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
|
||||
{
|
||||
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
|
||||
struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
|
||||
struct be_mcc_wrb *wrb
|
||||
= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
return wrb;
|
||||
}
|
||||
|
||||
static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
|
||||
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = NULL;
|
||||
if (atomic_read(&mccq->used) < mccq->len) {
|
||||
wrb = queue_head_node(mccq);
|
||||
queue_head_inc(mccq);
|
||||
atomic_inc(&mccq->used);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
}
|
||||
struct be_queue_info *mccq = &adapter->mcc_obj.q;
|
||||
struct be_mcc_wrb *wrb;
|
||||
|
||||
BUG_ON(atomic_read(&mccq->used) >= mccq->len);
|
||||
wrb = queue_head_node(mccq);
|
||||
queue_head_inc(mccq);
|
||||
atomic_inc(&mccq->used);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
return wrb;
|
||||
}
|
||||
|
||||
int be_cmd_eq_create(struct be_adapter *adapter,
|
||||
struct be_queue_info *eq, int eq_delay)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_eq_create *req = embedded_payload(wrb);
|
||||
struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_eq_create *req;
|
||||
struct be_dma_mem *q_mem = &eq->dma_mem;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -359,25 +379,29 @@ int be_cmd_eq_create(struct be_adapter *adapter,
|
|||
|
||||
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
|
||||
eq->id = le16_to_cpu(resp->eq_id);
|
||||
eq->created = true;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Uses mbox */
|
||||
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
|
||||
u8 type, bool permanent, u32 if_handle)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_mac_query *req = embedded_payload(wrb);
|
||||
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_mac_query *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -388,27 +412,32 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
|
|||
if (permanent) {
|
||||
req->permanent = 1;
|
||||
} else {
|
||||
req->if_id = cpu_to_le16((u16)if_handle);
|
||||
req->if_id = cpu_to_le16((u16) if_handle);
|
||||
req->permanent = 0;
|
||||
}
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
if (!status)
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
|
||||
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Uses synchronous MCCQ */
|
||||
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
|
||||
u32 if_id, u32 *pmac_id)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_pmac_add *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -418,24 +447,27 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
|
|||
req->if_id = cpu_to_le32(if_id);
|
||||
memcpy(req->mac_address, mac_addr, ETH_ALEN);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
|
||||
*pmac_id = le32_to_cpu(resp->pmac_id);
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Uses synchronous MCCQ */
|
||||
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_pmac_del *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -445,25 +477,29 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
|
|||
req->if_id = cpu_to_le32(if_id);
|
||||
req->pmac_id = cpu_to_le32(pmac_id);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Uses Mbox */
|
||||
int be_cmd_cq_create(struct be_adapter *adapter,
|
||||
struct be_queue_info *cq, struct be_queue_info *eq,
|
||||
bool sol_evts, bool no_delay, int coalesce_wm)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_cq_create *req = embedded_payload(wrb);
|
||||
struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_cq_create *req;
|
||||
struct be_dma_mem *q_mem = &cq->dma_mem;
|
||||
void *ctxt = &req->context;
|
||||
void *ctxt;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
ctxt = &req->context;
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -486,11 +522,13 @@ int be_cmd_cq_create(struct be_adapter *adapter,
|
|||
|
||||
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
|
||||
cq->id = le16_to_cpu(resp->cq_id);
|
||||
cq->created = true;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
|
@ -508,14 +546,17 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
|
|||
struct be_queue_info *mccq,
|
||||
struct be_queue_info *cq)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_mcc_create *req;
|
||||
struct be_dma_mem *q_mem = &mccq->dma_mem;
|
||||
void *ctxt = &req->context;
|
||||
void *ctxt;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
ctxt = &req->context;
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -534,7 +575,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
|
|||
|
||||
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
|
||||
mccq->id = le16_to_cpu(resp->id);
|
||||
|
@ -549,15 +590,17 @@ int be_cmd_txq_create(struct be_adapter *adapter,
|
|||
struct be_queue_info *txq,
|
||||
struct be_queue_info *cq)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_eth_tx_create *req;
|
||||
struct be_dma_mem *q_mem = &txq->dma_mem;
|
||||
void *ctxt = &req->context;
|
||||
void *ctxt;
|
||||
int status;
|
||||
u32 len_encoded;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
ctxt = &req->context;
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -568,10 +611,8 @@ int be_cmd_txq_create(struct be_adapter *adapter,
|
|||
req->ulp_num = BE_ULP1_NUM;
|
||||
req->type = BE_ETH_TX_RING_TYPE_STANDARD;
|
||||
|
||||
len_encoded = fls(txq->len); /* log2(len) + 1 */
|
||||
if (len_encoded == 16)
|
||||
len_encoded = 0;
|
||||
AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded);
|
||||
AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
|
||||
be_encoded_q_len(txq->len));
|
||||
AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
|
||||
be_pci_func(adapter));
|
||||
AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
|
||||
|
@ -581,28 +622,32 @@ int be_cmd_txq_create(struct be_adapter *adapter,
|
|||
|
||||
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
|
||||
txq->id = le16_to_cpu(resp->cid);
|
||||
txq->created = true;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Uses mbox */
|
||||
int be_cmd_rxq_create(struct be_adapter *adapter,
|
||||
struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
|
||||
u16 max_frame_size, u32 if_id, u32 rss)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_eth_rx_create *req;
|
||||
struct be_dma_mem *q_mem = &rxq->dma_mem;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -617,29 +662,34 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
|
|||
req->max_frame_size = cpu_to_le16(max_frame_size);
|
||||
req->rss_queue = cpu_to_le32(rss);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
|
||||
rxq->id = le16_to_cpu(resp->id);
|
||||
rxq->created = true;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Generic destroyer function for all types of queues */
|
||||
/* Generic destroyer function for all types of queues
|
||||
* Uses Mbox
|
||||
*/
|
||||
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
|
||||
int queue_type)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_q_destroy *req;
|
||||
u8 subsys = 0, opcode = 0;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
switch (queue_type) {
|
||||
|
@ -669,23 +719,27 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
|
|||
be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
|
||||
req->id = cpu_to_le16(q->id);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Create an rx filtering policy configuration on an i/f */
|
||||
/* Create an rx filtering policy configuration on an i/f
|
||||
* Uses mbox
|
||||
*/
|
||||
int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
||||
bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_if_create *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_if_create *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -694,10 +748,11 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
|||
|
||||
req->capability_flags = cpu_to_le32(flags);
|
||||
req->enable_flags = cpu_to_le32(flags);
|
||||
req->pmac_invalid = pmac_invalid;
|
||||
if (!pmac_invalid)
|
||||
memcpy(req->mac_addr, mac, ETH_ALEN);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
|
||||
*if_handle = le32_to_cpu(resp->interface_id);
|
||||
|
@ -709,14 +764,17 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
|||
return status;
|
||||
}
|
||||
|
||||
/* Uses mbox */
|
||||
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_if_destroy *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -724,7 +782,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
|
|||
OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
|
||||
|
||||
req->interface_id = cpu_to_le32(interface_id);
|
||||
status = be_mbox_notify(adapter);
|
||||
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
|
||||
|
@ -733,20 +792,22 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
|
|||
|
||||
/* Get stats is a non embedded command: the request is not embedded inside
|
||||
* WRB but is a separate dma memory block
|
||||
* Uses asynchronous MCC
|
||||
*/
|
||||
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_get_stats *req = nonemb_cmd->va;
|
||||
struct be_sge *sge = nonembedded_sgl(wrb);
|
||||
int status;
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_get_stats *req;
|
||||
struct be_sge *sge;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
memset(req, 0, sizeof(*req));
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = nonemb_cmd->va;
|
||||
sge = nonembedded_sgl(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
|
||||
wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
|
||||
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
|
||||
OPCODE_ETH_GET_STATISTICS, sizeof(*req));
|
||||
|
@ -754,59 +815,61 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
|
|||
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
|
||||
sge->len = cpu_to_le32(nonemb_cmd->size);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_get_stats *resp = nonemb_cmd->va;
|
||||
be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
|
||||
}
|
||||
be_mcc_notify(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Uses synchronous mcc */
|
||||
int be_cmd_link_status_query(struct be_adapter *adapter,
|
||||
bool *link_up)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_link_status *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_link_status *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
*link_up = false;
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
|
||||
if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
|
||||
*link_up = true;
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Uses Mbox */
|
||||
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_get_fw_version *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
|
||||
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
|
||||
|
@ -816,15 +879,18 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
|
|||
return status;
|
||||
}
|
||||
|
||||
/* set the EQ delay interval of an EQ to specified value */
|
||||
/* set the EQ delay interval of an EQ to specified value
|
||||
* Uses async mcc
|
||||
*/
|
||||
int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
|
||||
int status;
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_modify_eq_delay *req;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -836,21 +902,24 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
|
|||
req->delay[0].phase = 0;
|
||||
req->delay[0].delay_multiplier = cpu_to_le32(eqd);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
be_mcc_notify(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Uses sycnhronous mcc */
|
||||
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
|
||||
u32 num, bool untagged, bool promiscuous)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_vlan_config *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -866,23 +935,24 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
|
|||
req->num_vlan * sizeof(vtag_array[0]));
|
||||
}
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Use MCC for this command as it may be called in BH context */
|
||||
/* Uses MCC for this command as it may be called in BH context
|
||||
* Uses synchronous mcc
|
||||
*/
|
||||
int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
|
||||
{
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_promiscuous_config *req;
|
||||
int status;
|
||||
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mcc(&adapter->mcc_obj.q);
|
||||
BUG_ON(!wrb);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -895,14 +965,14 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
|
|||
else
|
||||
req->port0_promiscuous = en;
|
||||
|
||||
be_mcc_notify_wait(adapter);
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return 0;
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use MCC for this command as it may be called in BH context
|
||||
* Uses MCC for this command as it may be called in BH context
|
||||
* (mc == NULL) => multicast promiscous
|
||||
*/
|
||||
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
|
||||
|
@ -914,9 +984,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
|
|||
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mcc(&adapter->mcc_obj.q);
|
||||
BUG_ON(!wrb);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -944,15 +1012,17 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Uses synchrounous mcc */
|
||||
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_set_flow_control *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -962,28 +1032,30 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
|
|||
req->tx_flow_control = cpu_to_le16((u16)tx_fc);
|
||||
req->rx_flow_control = cpu_to_le16((u16)rx_fc);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Uses sycn mcc */
|
||||
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_get_flow_control *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_get_flow_control *resp =
|
||||
embedded_payload(wrb);
|
||||
|
@ -991,26 +1063,28 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
|
|||
*rx_fc = le16_to_cpu(resp->rx_flow_control);
|
||||
}
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Uses mbox */
|
||||
int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_query_fw_cfg *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
|
||||
*port_num = le32_to_cpu(resp->phys_port);
|
||||
|
@ -1020,22 +1094,24 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
|
|||
return status;
|
||||
}
|
||||
|
||||
/* Uses mbox */
|
||||
int be_cmd_reset_function(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_cmd_req_hdr *req = embedded_payload(wrb);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_hdr *req;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
wrb = wrb_from_mbox(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mbox_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
return status;
|
||||
|
@ -1044,13 +1120,17 @@ int be_cmd_reset_function(struct be_adapter *adapter)
|
|||
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
|
||||
u32 flash_type, u32 flash_opcode, u32 buf_size)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem);
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_write_flashrom *req = cmd->va;
|
||||
struct be_sge *sge = nonembedded_sgl(wrb);
|
||||
struct be_sge *sge;
|
||||
int status;
|
||||
|
||||
spin_lock(&adapter->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mccq(adapter);
|
||||
req = embedded_payload(wrb);
|
||||
sge = nonembedded_sgl(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
|
||||
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
|
@ -1063,8 +1143,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
|
|||
req->params.op_code = cpu_to_le32(flash_opcode);
|
||||
req->params.data_buf_size = cpu_to_le32(buf_size);
|
||||
|
||||
status = be_mbox_notify(adapter);
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
|
||||
spin_unlock(&adapter->mbox_lock);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -61,7 +61,8 @@ enum {
|
|||
/* The command is completing because the queue was getting flushed */
|
||||
MCC_STATUS_QUEUE_FLUSHING = 0x4,
|
||||
/* The command is completing with a DMA error */
|
||||
MCC_STATUS_DMA_FAILED = 0x5
|
||||
MCC_STATUS_DMA_FAILED = 0x5,
|
||||
MCC_STATUS_NOT_SUPPORTED = 0x66
|
||||
};
|
||||
|
||||
#define CQE_STATUS_COMPL_MASK 0xFFFF
|
||||
|
@ -761,7 +762,7 @@ extern int be_cmd_get_flow_control(struct be_adapter *adapter,
|
|||
u32 *tx_fc, u32 *rx_fc);
|
||||
extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num);
|
||||
extern int be_cmd_reset_function(struct be_adapter *adapter);
|
||||
extern void be_process_mcc(struct be_adapter *adapter);
|
||||
extern int be_process_mcc(struct be_adapter *adapter);
|
||||
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
|
||||
struct be_dma_mem *cmd, u32 flash_oper,
|
||||
u32 flash_opcode, u32 buf_size);
|
||||
|
|
|
@ -135,7 +135,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
|
|||
return status;
|
||||
}
|
||||
|
||||
static void netdev_stats_update(struct be_adapter *adapter)
|
||||
void netdev_stats_update(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
|
||||
struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
|
||||
|
@ -431,8 +431,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
|
|||
}
|
||||
|
||||
static netdev_tx_t be_xmit(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
struct be_tx_obj *tx_obj = &adapter->tx_obj;
|
||||
|
@ -490,11 +489,11 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
* program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
|
||||
* set the BE in promiscuous VLAN mode.
|
||||
*/
|
||||
static void be_vid_config(struct net_device *netdev)
|
||||
static int be_vid_config(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
u16 vtag[BE_NUM_VLANS_SUPPORTED];
|
||||
u16 ntags = 0, i;
|
||||
int status;
|
||||
|
||||
if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
|
||||
/* Construct VLAN Table to give to HW */
|
||||
|
@ -504,12 +503,13 @@ static void be_vid_config(struct net_device *netdev)
|
|||
ntags++;
|
||||
}
|
||||
}
|
||||
be_cmd_vlan_config(adapter, adapter->if_handle,
|
||||
vtag, ntags, 1, 0);
|
||||
status = be_cmd_vlan_config(adapter, adapter->if_handle,
|
||||
vtag, ntags, 1, 0);
|
||||
} else {
|
||||
be_cmd_vlan_config(adapter, adapter->if_handle,
|
||||
NULL, 0, 1, 1);
|
||||
status = be_cmd_vlan_config(adapter, adapter->if_handle,
|
||||
NULL, 0, 1, 1);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
|
||||
|
@ -532,7 +532,7 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
|
|||
adapter->num_vlans++;
|
||||
adapter->vlan_tag[vid] = 1;
|
||||
|
||||
be_vid_config(netdev);
|
||||
be_vid_config(adapter);
|
||||
}
|
||||
|
||||
static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
|
||||
|
@ -543,7 +543,7 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
|
|||
adapter->vlan_tag[vid] = 0;
|
||||
|
||||
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
|
||||
be_vid_config(netdev);
|
||||
be_vid_config(adapter);
|
||||
}
|
||||
|
||||
static void be_set_multicast_list(struct net_device *netdev)
|
||||
|
@ -1444,12 +1444,8 @@ static void be_worker(struct work_struct *work)
|
|||
{
|
||||
struct be_adapter *adapter =
|
||||
container_of(work, struct be_adapter, work.work);
|
||||
int status;
|
||||
|
||||
/* Get Stats */
|
||||
status = be_cmd_get_stats(adapter, &adapter->stats.cmd);
|
||||
if (!status)
|
||||
netdev_stats_update(adapter);
|
||||
be_cmd_get_stats(adapter, &adapter->stats.cmd);
|
||||
|
||||
/* Set EQ delay */
|
||||
be_rx_eqd_update(adapter);
|
||||
|
@ -1622,11 +1618,6 @@ static int be_setup(struct be_adapter *adapter)
|
|||
if (status != 0)
|
||||
goto do_none;
|
||||
|
||||
be_vid_config(netdev);
|
||||
|
||||
status = be_cmd_set_flow_control(adapter, true, true);
|
||||
if (status != 0)
|
||||
goto if_destroy;
|
||||
|
||||
status = be_tx_queues_create(adapter);
|
||||
if (status != 0)
|
||||
|
@ -1640,8 +1631,17 @@ static int be_setup(struct be_adapter *adapter)
|
|||
if (status != 0)
|
||||
goto rx_qs_destroy;
|
||||
|
||||
status = be_vid_config(adapter);
|
||||
if (status != 0)
|
||||
goto mccqs_destroy;
|
||||
|
||||
status = be_cmd_set_flow_control(adapter, true, true);
|
||||
if (status != 0)
|
||||
goto mccqs_destroy;
|
||||
return 0;
|
||||
|
||||
mccqs_destroy:
|
||||
be_mcc_queues_destroy(adapter);
|
||||
rx_qs_destroy:
|
||||
be_rx_queues_destroy(adapter);
|
||||
tx_qs_destroy:
|
||||
|
|
|
@ -1093,15 +1093,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
|
|||
return NULL; /* still no slave, return NULL */
|
||||
}
|
||||
|
||||
/*
|
||||
* first try the primary link; if arping, a link must tx/rx
|
||||
* traffic before it can be considered the curr_active_slave.
|
||||
* also, we would skip slaves between the curr_active_slave
|
||||
* and primary_slave that may be up and able to arp
|
||||
*/
|
||||
if ((bond->primary_slave) &&
|
||||
(!bond->params.arp_interval) &&
|
||||
(IS_UP(bond->primary_slave->dev))) {
|
||||
bond->primary_slave->link == BOND_LINK_UP) {
|
||||
new_active = bond->primary_slave;
|
||||
}
|
||||
|
||||
|
@ -1109,15 +1102,14 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
|
|||
old_active = new_active;
|
||||
|
||||
bond_for_each_slave_from(bond, new_active, i, old_active) {
|
||||
if (IS_UP(new_active->dev)) {
|
||||
if (new_active->link == BOND_LINK_UP) {
|
||||
return new_active;
|
||||
} else if (new_active->link == BOND_LINK_BACK) {
|
||||
/* link up, but waiting for stabilization */
|
||||
if (new_active->delay < mintime) {
|
||||
mintime = new_active->delay;
|
||||
bestslave = new_active;
|
||||
}
|
||||
if (new_active->link == BOND_LINK_UP) {
|
||||
return new_active;
|
||||
} else if (new_active->link == BOND_LINK_BACK &&
|
||||
IS_UP(new_active->dev)) {
|
||||
/* link up, but waiting for stabilization */
|
||||
if (new_active->delay < mintime) {
|
||||
mintime = new_active->delay;
|
||||
bestslave = new_active;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1211,7 +1203,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
|
|||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
read_unlock(&bond->lock);
|
||||
|
||||
netdev_bonding_change(bond->dev);
|
||||
netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
|
||||
|
||||
read_lock(&bond->lock);
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
|
@ -1469,14 +1461,17 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||
*/
|
||||
if (bond->slave_cnt == 0) {
|
||||
if (bond_dev->type != slave_dev->type) {
|
||||
dev_close(bond_dev);
|
||||
pr_debug("%s: change device type from %d to %d\n",
|
||||
bond_dev->name, bond_dev->type, slave_dev->type);
|
||||
|
||||
netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
|
||||
|
||||
if (slave_dev->type != ARPHRD_ETHER)
|
||||
bond_setup_by_slave(bond_dev, slave_dev);
|
||||
else
|
||||
ether_setup(bond_dev);
|
||||
dev_open(bond_dev);
|
||||
|
||||
netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
|
||||
}
|
||||
} else if (bond_dev->type != slave_dev->type) {
|
||||
pr_err(DRV_NAME ": %s ether type (%d) is different "
|
||||
|
@ -2929,18 +2924,6 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
|
|||
}
|
||||
}
|
||||
|
||||
read_lock(&bond->curr_slave_lock);
|
||||
|
||||
/*
|
||||
* Trigger a commit if the primary option setting has changed.
|
||||
*/
|
||||
if (bond->primary_slave &&
|
||||
(bond->primary_slave != bond->curr_active_slave) &&
|
||||
(bond->primary_slave->link == BOND_LINK_UP))
|
||||
commit++;
|
||||
|
||||
read_unlock(&bond->curr_slave_lock);
|
||||
|
||||
return commit;
|
||||
}
|
||||
|
||||
|
@ -2961,90 +2944,58 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
|
|||
continue;
|
||||
|
||||
case BOND_LINK_UP:
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
|
||||
if (!bond->curr_active_slave &&
|
||||
time_before_eq(jiffies, dev_trans_start(slave->dev) +
|
||||
delta_in_ticks)) {
|
||||
if ((!bond->curr_active_slave &&
|
||||
time_before_eq(jiffies,
|
||||
dev_trans_start(slave->dev) +
|
||||
delta_in_ticks)) ||
|
||||
bond->curr_active_slave != slave) {
|
||||
slave->link = BOND_LINK_UP;
|
||||
bond_change_active_slave(bond, slave);
|
||||
bond->current_arp_slave = NULL;
|
||||
|
||||
pr_info(DRV_NAME
|
||||
": %s: %s is up and now the "
|
||||
"active interface\n",
|
||||
bond->dev->name, slave->dev->name);
|
||||
": %s: link status definitely "
|
||||
"up for interface %s.\n",
|
||||
bond->dev->name, slave->dev->name);
|
||||
|
||||
} else if (bond->curr_active_slave != slave) {
|
||||
/* this slave has just come up but we
|
||||
* already have a current slave; this can
|
||||
* also happen if bond_enslave adds a new
|
||||
* slave that is up while we are searching
|
||||
* for a new slave
|
||||
*/
|
||||
slave->link = BOND_LINK_UP;
|
||||
bond_set_slave_inactive_flags(slave);
|
||||
bond->current_arp_slave = NULL;
|
||||
if (!bond->curr_active_slave ||
|
||||
(slave == bond->primary_slave))
|
||||
goto do_failover;
|
||||
|
||||
pr_info(DRV_NAME
|
||||
": %s: backup interface %s is now up\n",
|
||||
bond->dev->name, slave->dev->name);
|
||||
}
|
||||
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
|
||||
break;
|
||||
continue;
|
||||
|
||||
case BOND_LINK_DOWN:
|
||||
if (slave->link_failure_count < UINT_MAX)
|
||||
slave->link_failure_count++;
|
||||
|
||||
slave->link = BOND_LINK_DOWN;
|
||||
bond_set_slave_inactive_flags(slave);
|
||||
|
||||
pr_info(DRV_NAME
|
||||
": %s: link status definitely down for "
|
||||
"interface %s, disabling it\n",
|
||||
bond->dev->name, slave->dev->name);
|
||||
|
||||
if (slave == bond->curr_active_slave) {
|
||||
pr_info(DRV_NAME
|
||||
": %s: link status down for active "
|
||||
"interface %s, disabling it\n",
|
||||
bond->dev->name, slave->dev->name);
|
||||
|
||||
bond_set_slave_inactive_flags(slave);
|
||||
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
|
||||
bond_select_active_slave(bond);
|
||||
if (bond->curr_active_slave)
|
||||
bond->curr_active_slave->jiffies =
|
||||
jiffies;
|
||||
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
|
||||
bond->current_arp_slave = NULL;
|
||||
|
||||
} else if (slave->state == BOND_STATE_BACKUP) {
|
||||
pr_info(DRV_NAME
|
||||
": %s: backup interface %s is now down\n",
|
||||
bond->dev->name, slave->dev->name);
|
||||
|
||||
bond_set_slave_inactive_flags(slave);
|
||||
goto do_failover;
|
||||
}
|
||||
break;
|
||||
|
||||
continue;
|
||||
|
||||
default:
|
||||
pr_err(DRV_NAME
|
||||
": %s: impossible: new_link %d on slave %s\n",
|
||||
bond->dev->name, slave->new_link,
|
||||
slave->dev->name);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* No race with changes to primary via sysfs, as we hold rtnl.
|
||||
*/
|
||||
if (bond->primary_slave &&
|
||||
(bond->primary_slave != bond->curr_active_slave) &&
|
||||
(bond->primary_slave->link == BOND_LINK_UP)) {
|
||||
do_failover:
|
||||
ASSERT_RTNL();
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
bond_change_active_slave(bond, bond->primary_slave);
|
||||
bond_select_active_slave(bond);
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
|
|||
skb->dev = dev;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
netif_rx(skb);
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
|
||||
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
|
|
|
@ -49,11 +49,10 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
|
|||
static s32 igb_reset_hw_82575(struct e1000_hw *);
|
||||
static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
|
||||
static s32 igb_setup_copper_link_82575(struct e1000_hw *);
|
||||
static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *);
|
||||
static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
|
||||
static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
|
||||
static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
|
||||
static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
|
||||
static void igb_configure_pcs_link_82575(struct e1000_hw *);
|
||||
static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
|
||||
u16 *);
|
||||
static s32 igb_get_phy_id_82575(struct e1000_hw *);
|
||||
|
@ -105,16 +104,20 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
|||
dev_spec->sgmii_active = false;
|
||||
|
||||
ctrl_ext = rd32(E1000_CTRL_EXT);
|
||||
if ((ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) ==
|
||||
E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES) {
|
||||
hw->phy.media_type = e1000_media_type_internal_serdes;
|
||||
ctrl_ext |= E1000_CTRL_I2C_ENA;
|
||||
} else if (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII) {
|
||||
switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
|
||||
case E1000_CTRL_EXT_LINK_MODE_SGMII:
|
||||
dev_spec->sgmii_active = true;
|
||||
ctrl_ext |= E1000_CTRL_I2C_ENA;
|
||||
} else {
|
||||
break;
|
||||
case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
|
||||
hw->phy.media_type = e1000_media_type_internal_serdes;
|
||||
ctrl_ext |= E1000_CTRL_I2C_ENA;
|
||||
break;
|
||||
default:
|
||||
ctrl_ext &= ~E1000_CTRL_I2C_ENA;
|
||||
break;
|
||||
}
|
||||
|
||||
wr32(E1000_CTRL_EXT, ctrl_ext);
|
||||
|
||||
/* Set mta register count */
|
||||
|
@ -134,7 +137,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
|
|||
mac->ops.setup_physical_interface =
|
||||
(hw->phy.media_type == e1000_media_type_copper)
|
||||
? igb_setup_copper_link_82575
|
||||
: igb_setup_fiber_serdes_link_82575;
|
||||
: igb_setup_serdes_link_82575;
|
||||
|
||||
/* NVM initialization */
|
||||
eecd = rd32(E1000_EECD);
|
||||
|
@ -379,6 +382,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
|
|||
struct e1000_phy_info *phy = &hw->phy;
|
||||
s32 ret_val = 0;
|
||||
u16 phy_id;
|
||||
u32 ctrl_ext;
|
||||
|
||||
/*
|
||||
* For SGMII PHYs, we try the list of possible addresses until
|
||||
|
@ -393,6 +397,12 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Power on sgmii phy if it is disabled */
|
||||
ctrl_ext = rd32(E1000_CTRL_EXT);
|
||||
wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
|
||||
wrfl();
|
||||
msleep(300);
|
||||
|
||||
/*
|
||||
* The address field in the I2CCMD register is 3 bits and 0 is invalid.
|
||||
* Therefore, we need to test 1-7
|
||||
|
@ -418,9 +428,12 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
|
|||
phy->addr = 0;
|
||||
ret_val = -E1000_ERR_PHY;
|
||||
goto out;
|
||||
} else {
|
||||
ret_val = igb_get_phy_id(hw);
|
||||
}
|
||||
|
||||
ret_val = igb_get_phy_id(hw);
|
||||
/* restore previous sfp cage power state */
|
||||
wr32(E1000_CTRL_EXT, ctrl_ext);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
|
@ -766,17 +779,18 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
|
|||
}
|
||||
|
||||
/**
|
||||
* igb_shutdown_fiber_serdes_link_82575 - Remove link during power down
|
||||
* igb_shutdown_serdes_link_82575 - Remove link during power down
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* In the case of fiber serdes, shut down optics and PCS on driver unload
|
||||
* when management pass thru is not enabled.
|
||||
**/
|
||||
void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
|
||||
void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
if (hw->phy.media_type != e1000_media_type_internal_serdes)
|
||||
if (hw->phy.media_type != e1000_media_type_internal_serdes ||
|
||||
igb_sgmii_active_82575(hw))
|
||||
return;
|
||||
|
||||
/* if the management interface is not enabled, then power down */
|
||||
|
@ -788,7 +802,7 @@ void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
|
|||
|
||||
/* shutdown the laser */
|
||||
reg = rd32(E1000_CTRL_EXT);
|
||||
reg |= E1000_CTRL_EXT_SDP7_DATA;
|
||||
reg |= E1000_CTRL_EXT_SDP3_DATA;
|
||||
wr32(E1000_CTRL_EXT, reg);
|
||||
|
||||
/* flush the write to verify completion */
|
||||
|
@ -927,6 +941,17 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
|
|||
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
|
||||
wr32(E1000_CTRL, ctrl);
|
||||
|
||||
ret_val = igb_setup_serdes_link_82575(hw);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
||||
if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
|
||||
ret_val = hw->phy.ops.reset(hw);
|
||||
if (ret_val) {
|
||||
hw_dbg("Error resetting the PHY.\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
switch (hw->phy.type) {
|
||||
case e1000_phy_m88:
|
||||
ret_val = igb_copper_link_setup_m88(hw);
|
||||
|
@ -963,8 +988,6 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
|
|||
}
|
||||
}
|
||||
|
||||
igb_configure_pcs_link_82575(hw);
|
||||
|
||||
/*
|
||||
* Check link status. Wait up to 100 microseconds for link to become
|
||||
* valid.
|
||||
|
@ -987,14 +1010,18 @@ out:
|
|||
}
|
||||
|
||||
/**
|
||||
* igb_setup_fiber_serdes_link_82575 - Setup link for fiber/serdes
|
||||
* igb_setup_serdes_link_82575 - Setup link for fiber/serdes
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Configures speed and duplex for fiber and serdes links.
|
||||
**/
|
||||
static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
|
||||
static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
|
||||
{
|
||||
u32 reg;
|
||||
u32 ctrl_reg, reg;
|
||||
|
||||
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
|
||||
!igb_sgmii_active_82575(hw))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* On the 82575, SerDes loopback mode persists until it is
|
||||
|
@ -1004,26 +1031,38 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
|
|||
*/
|
||||
wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
|
||||
|
||||
/* Force link up, set 1gb, set both sw defined pins */
|
||||
reg = rd32(E1000_CTRL);
|
||||
reg |= E1000_CTRL_SLU |
|
||||
E1000_CTRL_SPD_1000 |
|
||||
E1000_CTRL_FRCSPD |
|
||||
E1000_CTRL_SWDPIN0 |
|
||||
E1000_CTRL_SWDPIN1;
|
||||
wr32(E1000_CTRL, reg);
|
||||
/* power on the sfp cage if present */
|
||||
reg = rd32(E1000_CTRL_EXT);
|
||||
reg &= ~E1000_CTRL_EXT_SDP3_DATA;
|
||||
wr32(E1000_CTRL_EXT, reg);
|
||||
|
||||
/* Power on phy for 82576 fiber adapters */
|
||||
if (hw->mac.type == e1000_82576) {
|
||||
reg = rd32(E1000_CTRL_EXT);
|
||||
reg &= ~E1000_CTRL_EXT_SDP7_DATA;
|
||||
wr32(E1000_CTRL_EXT, reg);
|
||||
ctrl_reg = rd32(E1000_CTRL);
|
||||
ctrl_reg |= E1000_CTRL_SLU;
|
||||
|
||||
if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
|
||||
/* set both sw defined pins */
|
||||
ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
|
||||
|
||||
/* Set switch control to serdes energy detect */
|
||||
reg = rd32(E1000_CONNSW);
|
||||
reg |= E1000_CONNSW_ENRGSRC;
|
||||
wr32(E1000_CONNSW, reg);
|
||||
}
|
||||
|
||||
/* Set switch control to serdes energy detect */
|
||||
reg = rd32(E1000_CONNSW);
|
||||
reg |= E1000_CONNSW_ENRGSRC;
|
||||
wr32(E1000_CONNSW, reg);
|
||||
reg = rd32(E1000_PCS_LCTL);
|
||||
|
||||
if (igb_sgmii_active_82575(hw)) {
|
||||
/* allow time for SFP cage to power up phy */
|
||||
msleep(300);
|
||||
|
||||
/* AN time out should be disabled for SGMII mode */
|
||||
reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
|
||||
} else {
|
||||
ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
|
||||
E1000_CTRL_FD | E1000_CTRL_FRCDPX;
|
||||
}
|
||||
|
||||
wr32(E1000_CTRL, ctrl_reg);
|
||||
|
||||
/*
|
||||
* New SerDes mode allows for forcing speed or autonegotiating speed
|
||||
|
@ -1031,12 +1070,21 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
|
|||
* mode that will be compatible with older link partners and switches.
|
||||
* However, both are supported by the hardware and some drivers/tools.
|
||||
*/
|
||||
reg = rd32(E1000_PCS_LCTL);
|
||||
|
||||
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
|
||||
E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
|
||||
|
||||
if (hw->mac.autoneg) {
|
||||
/*
|
||||
* We force flow control to prevent the CTRL register values from being
|
||||
* overwritten by the autonegotiated flow control values
|
||||
*/
|
||||
reg |= E1000_PCS_LCTL_FORCE_FCTRL;
|
||||
|
||||
/*
|
||||
* we always set sgmii to autoneg since it is the phy that will be
|
||||
* forcing the link and the serdes is just a go-between
|
||||
*/
|
||||
if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) {
|
||||
/* Set PCS register for autoneg */
|
||||
reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
|
||||
E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
|
||||
|
@ -1053,77 +1101,14 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
|
|||
hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
|
||||
}
|
||||
|
||||
if (hw->mac.type == e1000_82576) {
|
||||
reg |= E1000_PCS_LCTL_FORCE_FCTRL;
|
||||
igb_force_mac_fc(hw);
|
||||
}
|
||||
|
||||
wr32(E1000_PCS_LCTL, reg);
|
||||
|
||||
if (!igb_sgmii_active_82575(hw))
|
||||
igb_force_mac_fc(hw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_configure_pcs_link_82575 - Configure PCS link
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Configure the physical coding sub-layer (PCS) link. The PCS link is
|
||||
* only used on copper connections where the serialized gigabit media
|
||||
* independent interface (sgmii) is being used. Configures the link
|
||||
* for auto-negotiation or forces speed/duplex.
|
||||
**/
|
||||
static void igb_configure_pcs_link_82575(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_mac_info *mac = &hw->mac;
|
||||
u32 reg = 0;
|
||||
|
||||
if (hw->phy.media_type != e1000_media_type_copper ||
|
||||
!(igb_sgmii_active_82575(hw)))
|
||||
return;
|
||||
|
||||
/* For SGMII, we need to issue a PCS autoneg restart */
|
||||
reg = rd32(E1000_PCS_LCTL);
|
||||
|
||||
/* AN time out should be disabled for SGMII mode */
|
||||
reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
|
||||
|
||||
if (mac->autoneg) {
|
||||
/* Make sure forced speed and force link are not set */
|
||||
reg &= ~(E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
|
||||
|
||||
/*
|
||||
* The PHY should be setup prior to calling this function.
|
||||
* All we need to do is restart autoneg and enable autoneg.
|
||||
*/
|
||||
reg |= E1000_PCS_LCTL_AN_RESTART | E1000_PCS_LCTL_AN_ENABLE;
|
||||
} else {
|
||||
/* Set PCS register for forced speed */
|
||||
|
||||
/* Turn off bits for full duplex, speed, and autoneg */
|
||||
reg &= ~(E1000_PCS_LCTL_FSV_1000 |
|
||||
E1000_PCS_LCTL_FSV_100 |
|
||||
E1000_PCS_LCTL_FDV_FULL |
|
||||
E1000_PCS_LCTL_AN_ENABLE);
|
||||
|
||||
/* Check for duplex first */
|
||||
if (mac->forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
|
||||
reg |= E1000_PCS_LCTL_FDV_FULL;
|
||||
|
||||
/* Now set speed */
|
||||
if (mac->forced_speed_duplex & E1000_ALL_100_SPEED)
|
||||
reg |= E1000_PCS_LCTL_FSV_100;
|
||||
|
||||
/* Force speed and force link */
|
||||
reg |= E1000_PCS_LCTL_FSD |
|
||||
E1000_PCS_LCTL_FORCE_LINK |
|
||||
E1000_PCS_LCTL_FLV_LINK_UP;
|
||||
|
||||
hw_dbg("Wrote 0x%08X to PCS_LCTL to configure forced link\n",
|
||||
reg);
|
||||
}
|
||||
wr32(E1000_PCS_LCTL, reg);
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_sgmii_active_82575 - Return sgmii state
|
||||
* @hw: pointer to the HW structure
|
||||
|
@ -1248,7 +1233,8 @@ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
|
|||
temp = rd32(E1000_LENERRS);
|
||||
|
||||
/* This register should not be read in copper configurations */
|
||||
if (hw->phy.media_type == e1000_media_type_internal_serdes)
|
||||
if (hw->phy.media_type == e1000_media_type_internal_serdes ||
|
||||
igb_sgmii_active_82575(hw))
|
||||
temp = rd32(E1000_SCVPC);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#ifndef _E1000_82575_H_
|
||||
#define _E1000_82575_H_
|
||||
|
||||
extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
|
||||
extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
|
||||
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
|
||||
|
||||
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
|
||||
|
||||
/* Extended Device Control */
|
||||
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
|
||||
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
|
||||
/* Physical Func Reset Done Indication */
|
||||
#define E1000_CTRL_EXT_PFRSTD 0x00004000
|
||||
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
|
||||
|
|
|
@ -1565,9 +1565,12 @@ out:
|
|||
**/
|
||||
s32 igb_phy_sw_reset(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val;
|
||||
s32 ret_val = 0;
|
||||
u16 phy_ctrl;
|
||||
|
||||
if (!(hw->phy.ops.read_reg))
|
||||
goto out;
|
||||
|
||||
ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
|
|
@ -5320,7 +5320,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|||
|
||||
*enable_wake = wufc || adapter->en_mng_pt;
|
||||
if (!*enable_wake)
|
||||
igb_shutdown_fiber_serdes_link_82575(hw);
|
||||
igb_shutdown_serdes_link_82575(hw);
|
||||
|
||||
/* Release control of h/w to f/w. If f/w is AMT enabled, this
|
||||
* would have already happened in close and is redundant. */
|
||||
|
|
|
@ -322,14 +322,16 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
|
|||
break;
|
||||
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
|
||||
case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
|
||||
case IXGBE_DEV_ID_82598EB_CX4:
|
||||
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
|
||||
case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
|
||||
case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
|
||||
case IXGBE_DEV_ID_82598EB_XF_LR:
|
||||
case IXGBE_DEV_ID_82598EB_SFP_LOM:
|
||||
media_type = ixgbe_media_type_fiber;
|
||||
break;
|
||||
case IXGBE_DEV_ID_82598EB_CX4:
|
||||
case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
|
||||
media_type = ixgbe_media_type_cx4;
|
||||
break;
|
||||
case IXGBE_DEV_ID_82598AT:
|
||||
case IXGBE_DEV_ID_82598AT2:
|
||||
media_type = ixgbe_media_type_copper;
|
||||
|
|
|
@ -337,6 +337,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
|
|||
case IXGBE_DEV_ID_82599_SFP:
|
||||
media_type = ixgbe_media_type_fiber;
|
||||
break;
|
||||
case IXGBE_DEV_ID_82599_CX4:
|
||||
media_type = ixgbe_media_type_cx4;
|
||||
break;
|
||||
default:
|
||||
media_type = ixgbe_media_type_unknown;
|
||||
break;
|
||||
|
|
|
@ -97,6 +97,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
|
|||
board_82599 },
|
||||
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
|
||||
board_82599 },
|
||||
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
|
||||
board_82599 },
|
||||
|
||||
/* required last entry */
|
||||
{0, }
|
||||
|
@ -2055,6 +2057,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
|
|||
|
||||
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
|
||||
rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
|
||||
else
|
||||
rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
if (netdev->features & NETIF_F_FCOE_MTU) {
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
|
||||
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
|
||||
#define IXGBE_DEV_ID_82599_KX4 0x10F7
|
||||
#define IXGBE_DEV_ID_82599_CX4 0x10F9
|
||||
#define IXGBE_DEV_ID_82599_SFP 0x10FB
|
||||
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
|
||||
|
||||
|
@ -2143,6 +2144,7 @@ enum ixgbe_media_type {
|
|||
ixgbe_media_type_fiber,
|
||||
ixgbe_media_type_copper,
|
||||
ixgbe_media_type_backplane,
|
||||
ixgbe_media_type_cx4,
|
||||
ixgbe_media_type_virtual
|
||||
};
|
||||
|
||||
|
|
|
@ -96,12 +96,17 @@ static void catas_reset(struct work_struct *work)
|
|||
spin_unlock_irq(&catas_lock);
|
||||
|
||||
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
|
||||
struct pci_dev *pdev = priv->dev.pdev;
|
||||
|
||||
ret = mlx4_restart_one(priv->dev.pdev);
|
||||
dev = &priv->dev;
|
||||
/* 'priv' now is not valid */
|
||||
if (ret)
|
||||
mlx4_err(dev, "Reset failed (%d)\n", ret);
|
||||
else
|
||||
printk(KERN_ERR "mlx4 %s: Reset failed (%d)\n",
|
||||
pci_name(pdev), ret);
|
||||
else {
|
||||
dev = pci_get_drvdata(pdev);
|
||||
mlx4_dbg(dev, "Reset succeeded\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1750,11 +1750,11 @@ static struct pcmcia_device_id pcnet_ids[] = {
|
|||
PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078),
|
||||
/* too generic! */
|
||||
/* PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100 Ethernet Card", 0x281f1c5d, 0x11b0ffc0), */
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
|
||||
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
|
||||
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
|
||||
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"),
|
||||
|
|
|
@ -229,7 +229,7 @@ static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
|
|||
static atomic_t pppol2tp_tunnel_count;
|
||||
static atomic_t pppol2tp_session_count;
|
||||
static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
|
||||
static struct proto_ops pppol2tp_ops;
|
||||
static const struct proto_ops pppol2tp_ops;
|
||||
|
||||
/* per-net private data for this module */
|
||||
static int pppol2tp_net_id;
|
||||
|
@ -2574,7 +2574,7 @@ static const struct file_operations pppol2tp_proc_fops = {
|
|||
* Init and cleanup
|
||||
*****************************************************************************/
|
||||
|
||||
static struct proto_ops pppol2tp_ops = {
|
||||
static const struct proto_ops pppol2tp_ops = {
|
||||
.family = AF_PPPOX,
|
||||
.owner = THIS_MODULE,
|
||||
.release = pppol2tp_release,
|
||||
|
|
|
@ -65,8 +65,8 @@
|
|||
#define RX_DEF_PENDING RX_MAX_PENDING
|
||||
|
||||
/* This is the worst case number of transmit list elements for a single skb:
|
||||
VLAN + TSO + CKSUM + Data + skb_frags * DMA */
|
||||
#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
|
||||
VLAN:GSO + CKSUM + Data + skb_frags * DMA */
|
||||
#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
|
||||
#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
|
||||
#define TX_MAX_PENDING 4096
|
||||
#define TX_DEF_PENDING 127
|
||||
|
@ -1567,11 +1567,13 @@ static unsigned tx_le_req(const struct sk_buff *skb)
|
|||
{
|
||||
unsigned count;
|
||||
|
||||
count = sizeof(dma_addr_t) / sizeof(u32);
|
||||
count += skb_shinfo(skb)->nr_frags * count;
|
||||
count = (skb_shinfo(skb)->nr_frags + 1)
|
||||
* (sizeof(dma_addr_t) / sizeof(u32));
|
||||
|
||||
if (skb_is_gso(skb))
|
||||
++count;
|
||||
else if (sizeof(dma_addr_t) == sizeof(u32))
|
||||
++count; /* possible vlan */
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
++count;
|
||||
|
@ -4548,16 +4550,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
|||
if (hw->ports > 1) {
|
||||
struct net_device *dev1;
|
||||
|
||||
err = -ENOMEM;
|
||||
dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
|
||||
if (!dev1)
|
||||
dev_warn(&pdev->dev, "allocation for second device failed\n");
|
||||
else if ((err = register_netdev(dev1))) {
|
||||
if (dev1 && (err = register_netdev(dev1)) == 0)
|
||||
sky2_show_addr(dev1);
|
||||
else {
|
||||
dev_warn(&pdev->dev,
|
||||
"register of second port failed (%d)\n", err);
|
||||
hw->dev[1] = NULL;
|
||||
free_netdev(dev1);
|
||||
} else
|
||||
sky2_show_addr(dev1);
|
||||
hw->ports = 1;
|
||||
if (dev1)
|
||||
free_netdev(dev1);
|
||||
}
|
||||
}
|
||||
|
||||
setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
|
||||
|
|
|
@ -83,34 +83,6 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
|
|||
}
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_BLACKFIN)
|
||||
|
||||
#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
|
||||
#define RPC_LSA_DEFAULT RPC_LED_100_10
|
||||
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
|
||||
|
||||
#define SMC_CAN_USE_8BIT 0
|
||||
#define SMC_CAN_USE_16BIT 1
|
||||
# if defined(CONFIG_BF561)
|
||||
#define SMC_CAN_USE_32BIT 1
|
||||
# else
|
||||
#define SMC_CAN_USE_32BIT 0
|
||||
# endif
|
||||
#define SMC_IO_SHIFT 0
|
||||
#define SMC_NOWAIT 1
|
||||
#define SMC_USE_BFIN_DMA 0
|
||||
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_outw(v, a, r) writew(v, (a) + (r))
|
||||
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
||||
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
||||
# if SMC_CAN_USE_32BIT
|
||||
#define SMC_inl(a, r) readl((a) + (r))
|
||||
#define SMC_outl(v, a, r) writel(v, (a) + (r))
|
||||
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
|
||||
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
|
||||
# endif
|
||||
|
||||
#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6)
|
||||
|
||||
/* We can only do 16-bit reads and writes in the static memory space. */
|
||||
|
|
|
@ -264,7 +264,6 @@ static int usbpn_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
switch (cmd) {
|
||||
case SIOCPNGAUTOCONF:
|
||||
req->ifr_phonet_autoconf.device = PN_DEV_PC;
|
||||
printk(KERN_CRIT"device is PN_DEV_PC\n");
|
||||
return 0;
|
||||
}
|
||||
return -ENOIOCTLCMD;
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
menuconfig WLAN
|
||||
bool "Wireless LAN"
|
||||
depends on !S390
|
||||
default y
|
||||
---help---
|
||||
This section contains all the pre 802.11 and 802.11 wireless
|
||||
device drivers. For a complete list of drivers and documentation
|
||||
|
|
|
@ -327,7 +327,8 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
|
|||
aniState->firstepLevel + 1);
|
||||
return;
|
||||
} else {
|
||||
if (conf->channel->band == IEEE80211_BAND_2GHZ) {
|
||||
if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
|
||||
!conf_is_ht(conf)) {
|
||||
if (!aniState->ofdmWeakSigDetectOff)
|
||||
ath9k_hw_ani_control(ah,
|
||||
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
|
||||
|
@ -369,7 +370,8 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
|
|||
ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
|
||||
aniState->firstepLevel + 1);
|
||||
} else {
|
||||
if (conf->channel->band == IEEE80211_BAND_2GHZ) {
|
||||
if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
|
||||
!conf_is_ht(conf)) {
|
||||
if (aniState->firstepLevel > 0)
|
||||
ath9k_hw_ani_control(ah,
|
||||
ATH9K_ANI_FIRSTEP_LEVEL, 0);
|
||||
|
|
|
@ -2289,11 +2289,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
|
|||
err = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
msleep_interruptible(50);
|
||||
if (signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
goto error;
|
||||
}
|
||||
msleep(50);
|
||||
}
|
||||
b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); /* dummy read */
|
||||
|
||||
|
@ -4287,6 +4283,8 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
|
|||
if (!dev->suspend_in_progress)
|
||||
b43_rng_init(wl);
|
||||
|
||||
ieee80211_wake_queues(dev->wl->hw);
|
||||
|
||||
b43_set_status(dev, B43_STAT_INITIALIZED);
|
||||
|
||||
if (!dev->suspend_in_progress)
|
||||
|
|
|
@ -875,15 +875,16 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
|
|||
|
||||
switch(type) {
|
||||
case HOSTAP_INTERFACE_AP:
|
||||
dev->tx_queue_len = 0; /* use main radio device queue */
|
||||
dev->netdev_ops = &hostap_mgmt_netdev_ops;
|
||||
dev->type = ARPHRD_IEEE80211;
|
||||
dev->header_ops = &hostap_80211_ops;
|
||||
break;
|
||||
case HOSTAP_INTERFACE_MASTER:
|
||||
dev->tx_queue_len = 0; /* use main radio device queue */
|
||||
dev->netdev_ops = &hostap_master_ops;
|
||||
break;
|
||||
default:
|
||||
dev->tx_queue_len = 0; /* use main radio device queue */
|
||||
dev->netdev_ops = &hostap_netdev_ops;
|
||||
}
|
||||
|
||||
|
|
|
@ -2346,6 +2346,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
|
|||
.mod_params = &iwl4965_mod_params,
|
||||
.use_isr_legacy = true,
|
||||
.ht_greenfield_support = false,
|
||||
.broken_powersave = true,
|
||||
};
|
||||
|
||||
/* Module firmware */
|
||||
|
|
|
@ -760,6 +760,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
|
|||
u16 high_low;
|
||||
u8 switch_to_legacy = 0;
|
||||
u8 is_green = lq_sta->is_green;
|
||||
struct iwl_priv *priv = lq_sta->drv;
|
||||
|
||||
/* check if we need to switch from HT to legacy rates.
|
||||
* assumption is that mandatory rates (1Mbps or 6Mbps)
|
||||
|
@ -773,7 +774,8 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
|
|||
tbl->lq_type = LQ_G;
|
||||
|
||||
if (num_of_ant(tbl->ant_type) > 1)
|
||||
tbl->ant_type = ANT_A;/*FIXME:RS*/
|
||||
tbl->ant_type =
|
||||
first_antenna(priv->hw_params.valid_tx_ant);
|
||||
|
||||
tbl->is_ht40 = 0;
|
||||
tbl->is_SGI = 0;
|
||||
|
@ -883,6 +885,12 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
|
|||
mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
|
||||
if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
|
||||
mac_index++;
|
||||
/*
|
||||
* mac80211 HT index is always zero-indexed; we need to move
|
||||
* HT OFDM rates after CCK rates in 2.4 GHz band
|
||||
*/
|
||||
if (priv->band == IEEE80211_BAND_2GHZ)
|
||||
mac_index += IWL_FIRST_OFDM_RATE;
|
||||
}
|
||||
|
||||
if ((mac_index < 0) ||
|
||||
|
|
|
@ -1585,9 +1585,12 @@ int iwl_setup_mac(struct iwl_priv *priv)
|
|||
hw->flags = IEEE80211_HW_SIGNAL_DBM |
|
||||
IEEE80211_HW_NOISE_DBM |
|
||||
IEEE80211_HW_AMPDU_AGGREGATION |
|
||||
IEEE80211_HW_SPECTRUM_MGMT |
|
||||
IEEE80211_HW_SUPPORTS_PS |
|
||||
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
|
||||
IEEE80211_HW_SPECTRUM_MGMT;
|
||||
|
||||
if (!priv->cfg->broken_powersave)
|
||||
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
|
||||
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
|
||||
|
||||
hw->wiphy->interface_modes =
|
||||
BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_ADHOC);
|
||||
|
|
|
@ -252,6 +252,7 @@ struct iwl_cfg {
|
|||
const u16 max_ll_items;
|
||||
const bool shadow_ram_support;
|
||||
const bool ht_greenfield_support;
|
||||
const bool broken_powersave;
|
||||
};
|
||||
|
||||
/***************************
|
||||
|
|
|
@ -292,8 +292,9 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
|
|||
else
|
||||
dtimper = 1;
|
||||
|
||||
/* TT power setting overwrites everything */
|
||||
if (tt->state >= IWL_TI_1)
|
||||
if (priv->cfg->broken_powersave)
|
||||
iwl_power_sleep_cam_cmd(priv, &cmd);
|
||||
else if (tt->state >= IWL_TI_1)
|
||||
iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper);
|
||||
else if (!enabled)
|
||||
iwl_power_sleep_cam_cmd(priv, &cmd);
|
||||
|
|
|
@ -239,13 +239,34 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
struct list_head *element;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
|
||||
priority);
|
||||
|
||||
if (!skb) {
|
||||
IWL_CRIT(priv, "Can not allocate SKB buffers\n");
|
||||
/* We don't reschedule replenish work here -- we will
|
||||
* call the restock method and if it still needs
|
||||
* more buffers it will schedule replenish */
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
element = rxq->rx_used.next;
|
||||
|
@ -254,18 +275,7 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
|
||||
priority);
|
||||
|
||||
if (!rxb->skb) {
|
||||
IWL_CRIT(priv, "Can not allocate SKB buffers\n");
|
||||
/* We don't reschedule replenish work here -- we will
|
||||
* call the restock method and if it still needs
|
||||
* more buffers it will schedule replenish */
|
||||
break;
|
||||
}
|
||||
|
||||
rxb->skb = skb;
|
||||
/* Get physical address of RB/SKB */
|
||||
rxb->real_dma_addr = pci_map_single(
|
||||
priv->pci_dev,
|
||||
|
|
|
@ -1134,6 +1134,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
struct list_head *element;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
|
||||
while (1) {
|
||||
|
@ -1143,17 +1144,11 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
element = rxq->rx_used.next;
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
list_del(element);
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
rxb->skb =
|
||||
alloc_skb(priv->hw_params.rx_buf_size,
|
||||
priority);
|
||||
if (!rxb->skb) {
|
||||
skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
|
||||
if (!skb) {
|
||||
if (net_ratelimit())
|
||||
IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
|
||||
/* We don't reschedule replenish work here -- we will
|
||||
|
@ -1162,6 +1157,19 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
|||
break;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
element = rxq->rx_used.next;
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
list_del(element);
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
rxb->skb = skb;
|
||||
|
||||
/* If radiotap head is required, reserve some headroom here.
|
||||
* The physical head count is a variable rx_stats->phy_count.
|
||||
* We reserve 4 bytes here. Plus these extra bytes, the
|
||||
|
|
|
@ -67,6 +67,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
|
|||
{USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/
|
||||
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion MD40900 */
|
||||
{USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */
|
||||
{USB_DEVICE(0x0cde, 0x0015)}, /* Zcomax XG-705A */
|
||||
{USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */
|
||||
{USB_DEVICE(0x124a, 0x4025)}, /* IOGear GWU513 (GW3887IK chip) */
|
||||
{USB_DEVICE(0x1260, 0xee22)}, /* SMC 2862W-G version 2 */
|
||||
|
|
|
@ -644,11 +644,10 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
|
|||
{
|
||||
struct wl1271 *wl = hw->priv;
|
||||
struct sk_buff *beacon;
|
||||
DECLARE_MAC_BUF(mac);
|
||||
int ret;
|
||||
|
||||
wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %s",
|
||||
print_mac(mac, conf->bssid));
|
||||
wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %pM",
|
||||
conf->bssid);
|
||||
wl1271_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid,
|
||||
conf->ssid_len);
|
||||
|
||||
|
|
|
@ -868,11 +868,11 @@ static struct pcmcia_device_id serial_ids[] = {
|
|||
PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab),
|
||||
PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f),
|
||||
PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
|
||||
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
|
||||
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
|
||||
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
|
||||
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
|
||||
|
@ -883,10 +883,10 @@ static struct pcmcia_device_id serial_ids[] = {
|
|||
PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
|
||||
PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
|
||||
PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
|
||||
PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"),
|
||||
PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"),
|
||||
PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"),
|
||||
PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
|
||||
PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "RS-COM-2P.cis"),
|
||||
PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
|
||||
PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
|
||||
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
|
||||
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
|
||||
|
|
|
@ -600,6 +600,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
|
|||
ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
|
||||
" revision %d detected. Will extract"
|
||||
" v1\n", out->revision);
|
||||
out->revision = 1;
|
||||
sprom_extract_r123(out, in);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include "ssb_private.h"
|
||||
|
||||
/* Define the following to 1 to enable a printk on each coreswitch. */
|
||||
#define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 1
|
||||
#define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 0
|
||||
|
||||
|
||||
/* Hardware invariants CIS tuples */
|
||||
|
@ -333,7 +333,7 @@ static void ssb_sdio_block_read(struct ssb_device *dev, void *buffer,
|
|||
goto out;
|
||||
|
||||
err_out:
|
||||
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%u), error %d\n",
|
||||
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%zu), error %d\n",
|
||||
bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error);
|
||||
out:
|
||||
sdio_release_host(bus->host_sdio);
|
||||
|
@ -440,7 +440,7 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
|
|||
goto out;
|
||||
|
||||
err_out:
|
||||
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%u), error %d\n",
|
||||
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%zu), error %d\n",
|
||||
bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error);
|
||||
out:
|
||||
sdio_release_host(bus->host_sdio);
|
||||
|
|
|
@ -51,9 +51,10 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
|
|||
fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
|
||||
e100/d102e_ucode.bin
|
||||
fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
|
||||
fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis
|
||||
fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis
|
||||
fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis
|
||||
fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis
|
||||
fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis
|
||||
fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
|
||||
fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
|
||||
advansys/3550.bin advansys/38C0800.bin
|
||||
|
|
|
@ -596,6 +596,7 @@ Found in hex form in kernel source.
|
|||
Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter
|
||||
|
||||
File: cis/LA-PCM.cis
|
||||
cis/PCMLM28.cis
|
||||
|
||||
Licence: GPL
|
||||
|
||||
|
@ -623,6 +624,17 @@ Originally developed by the pcmcia-cs project
|
|||
|
||||
--------------------------------------------------------------------------
|
||||
|
||||
Driver: SERIAL_8250_CS - Serial PCMCIA adapter
|
||||
|
||||
File: cis/MT5634ZLX.cis
|
||||
cis/RS-COM-2P.cis
|
||||
|
||||
Licence: GPL
|
||||
|
||||
Originally developed by the pcmcia-cs project
|
||||
|
||||
--------------------------------------------------------------------------
|
||||
|
||||
Driver: PCMCIA_SMC91C92 - SMC 91Cxx PCMCIA
|
||||
|
||||
File: ositech/Xilinx7OD.bin
|
||||
|
|
11
firmware/cis/MT5634ZLX.cis.ihex
Normal file
11
firmware/cis/MT5634ZLX.cis.ihex
Normal file
|
@ -0,0 +1,11 @@
|
|||
:100000000101FF152204014D756C74695465636824
|
||||
:100010000050434D4349412035364B2044617461C3
|
||||
:10002000466178000000FF20040002010021020266
|
||||
:10003000001A05012780FF671B0FCF418B01550177
|
||||
:10004000550155AA60F80307281B08970108AA6004
|
||||
:10005000F802071B089F0108AA60E803071B08A70E
|
||||
:0B0060000108AA60E802071400FF007E
|
||||
:00000001FF
|
||||
#
|
||||
# Replacement CIS for Multitech MT5634ZLX modems
|
||||
#
|
18
firmware/cis/PCMLM28.cis.ihex
Normal file
18
firmware/cis/PCMLM28.cis.ihex
Normal file
|
@ -0,0 +1,18 @@
|
|||
:1000000001030000FF151504014C494E4B53595391
|
||||
:100010000050434D4C4D3238000000FF2004430196
|
||||
:10002000ABC0210200001A05012FF803031B10E4E6
|
||||
:1000300001190155E06100031FF8020730FFFF1BA3
|
||||
:100040000BA50108E06120031FF802071B0BA601A6
|
||||
:1000500008E06140031FF802071B0BA70108E061DD
|
||||
:1000600060031FF802071B0BA80108E06100031FD3
|
||||
:10007000E803071B0BA90108E06120031FE8030741
|
||||
:100080001B0BAA0108E06140031FE803071B0BAB31
|
||||
:100090000108E06160031FE803071B0BAC0108E0E7
|
||||
:1000A0006100031FE802071B0BAD0108E06120039C
|
||||
:1000B0001FE802071B0BAE0108E06140031FE802C6
|
||||
:1000C000071B0BAF0108E06160031FE80207140083
|
||||
:0200D000FF002F
|
||||
:00000001FF
|
||||
#
|
||||
# The on-card CIS says it is MFC-compliant, but it is not
|
||||
#
|
10
firmware/cis/RS-COM-2P.cis.ihex
Normal file
10
firmware/cis/RS-COM-2P.cis.ihex
Normal file
|
@ -0,0 +1,10 @@
|
|||
:1000000001030000FF1516040150434D4349410010
|
||||
:1000100052532D434F4D203250000000FF21020269
|
||||
:10002000011A0501030001011B0EC18118AA61E834
|
||||
:100030000307E8020730B89E1B0B820108AA615033
|
||||
:1000400002075802071B0B830108AA6160020768B8
|
||||
:0600500002071400FF008E
|
||||
:00000001FF
|
||||
#
|
||||
# Replacement CIS for dual-serial-port IO card
|
||||
#
|
|
@ -21,4 +21,111 @@ extern struct key_type key_type_rxrpc;
|
|||
|
||||
extern struct key *rxrpc_get_null_key(const char *);
|
||||
|
||||
/*
|
||||
* RxRPC key for Kerberos IV (type-2 security)
|
||||
*/
|
||||
struct rxkad_key {
|
||||
u32 vice_id;
|
||||
u32 start; /* time at which ticket starts */
|
||||
u32 expiry; /* time at which ticket expires */
|
||||
u32 kvno; /* key version number */
|
||||
u8 primary_flag; /* T if key for primary cell for this user */
|
||||
u16 ticket_len; /* length of ticket[] */
|
||||
u8 session_key[8]; /* DES session key */
|
||||
u8 ticket[0]; /* the encrypted ticket */
|
||||
};
|
||||
|
||||
/*
|
||||
* Kerberos 5 principal
|
||||
* name/name/name@realm
|
||||
*/
|
||||
struct krb5_principal {
|
||||
u8 n_name_parts; /* N of parts of the name part of the principal */
|
||||
char **name_parts; /* parts of the name part of the principal */
|
||||
char *realm; /* parts of the realm part of the principal */
|
||||
};
|
||||
|
||||
/*
|
||||
* Kerberos 5 tagged data
|
||||
*/
|
||||
struct krb5_tagged_data {
|
||||
/* for tag value, see /usr/include/krb5/krb5.h
|
||||
* - KRB5_AUTHDATA_* for auth data
|
||||
* -
|
||||
*/
|
||||
s32 tag;
|
||||
u32 data_len;
|
||||
u8 *data;
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC key for Kerberos V (type-5 security)
|
||||
*/
|
||||
struct rxk5_key {
|
||||
u64 authtime; /* time at which auth token generated */
|
||||
u64 starttime; /* time at which auth token starts */
|
||||
u64 endtime; /* time at which auth token expired */
|
||||
u64 renew_till; /* time to which auth token can be renewed */
|
||||
s32 is_skey; /* T if ticket is encrypted in another ticket's
|
||||
* skey */
|
||||
s32 flags; /* mask of TKT_FLG_* bits (krb5/krb5.h) */
|
||||
struct krb5_principal client; /* client principal name */
|
||||
struct krb5_principal server; /* server principal name */
|
||||
u16 ticket_len; /* length of ticket */
|
||||
u16 ticket2_len; /* length of second ticket */
|
||||
u8 n_authdata; /* number of authorisation data elements */
|
||||
u8 n_addresses; /* number of addresses */
|
||||
struct krb5_tagged_data session; /* session data; tag is enctype */
|
||||
struct krb5_tagged_data *addresses; /* addresses */
|
||||
u8 *ticket; /* krb5 ticket */
|
||||
u8 *ticket2; /* second krb5 ticket, if related to ticket (via
|
||||
* DUPLICATE-SKEY or ENC-TKT-IN-SKEY) */
|
||||
struct krb5_tagged_data *authdata; /* authorisation data */
|
||||
};
|
||||
|
||||
/*
|
||||
* list of tokens attached to an rxrpc key
|
||||
*/
|
||||
struct rxrpc_key_token {
|
||||
u16 security_index; /* RxRPC header security index */
|
||||
struct rxrpc_key_token *next; /* the next token in the list */
|
||||
union {
|
||||
struct rxkad_key *kad;
|
||||
struct rxk5_key *k5;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* structure of raw payloads passed to add_key() or instantiate key
|
||||
*/
|
||||
struct rxrpc_key_data_v1 {
|
||||
u32 kif_version; /* 1 */
|
||||
u16 security_index;
|
||||
u16 ticket_length;
|
||||
u32 expiry; /* time_t */
|
||||
u32 kvno;
|
||||
u8 session_key[8];
|
||||
u8 ticket[0];
|
||||
};
|
||||
|
||||
/*
|
||||
* AF_RXRPC key payload derived from XDR format
|
||||
* - based on openafs-1.4.10/src/auth/afs_token.xg
|
||||
*/
|
||||
#define AFSTOKEN_LENGTH_MAX 16384 /* max payload size */
|
||||
#define AFSTOKEN_STRING_MAX 256 /* max small string length */
|
||||
#define AFSTOKEN_DATA_MAX 64 /* max small data length */
|
||||
#define AFSTOKEN_CELL_MAX 64 /* max cellname length */
|
||||
#define AFSTOKEN_MAX 8 /* max tokens per payload */
|
||||
#define AFSTOKEN_BDATALN_MAX 16384 /* max big data length */
|
||||
#define AFSTOKEN_RK_TIX_MAX 12000 /* max RxKAD ticket size */
|
||||
#define AFSTOKEN_GK_KEY_MAX 64 /* max GSSAPI key size */
|
||||
#define AFSTOKEN_GK_TOKEN_MAX 16384 /* max GSSAPI token size */
|
||||
#define AFSTOKEN_K5_COMPONENTS_MAX 16 /* max K5 components */
|
||||
#define AFSTOKEN_K5_NAME_MAX 128 /* max K5 name length */
|
||||
#define AFSTOKEN_K5_REALM_MAX 64 /* max K5 realm name length */
|
||||
#define AFSTOKEN_K5_TIX_MAX 16384 /* max K5 ticket size */
|
||||
#define AFSTOKEN_K5_ADDRESSES_MAX 16 /* max K5 addresses */
|
||||
#define AFSTOKEN_K5_AUTHDATA_MAX 16 /* max K5 pieces of auth data */
|
||||
|
||||
#endif /* _KEYS_RXRPC_TYPE_H */
|
||||
|
|
|
@ -233,6 +233,8 @@ extern void ip_mc_init_dev(struct in_device *);
|
|||
extern void ip_mc_destroy_dev(struct in_device *);
|
||||
extern void ip_mc_up(struct in_device *);
|
||||
extern void ip_mc_down(struct in_device *);
|
||||
extern void ip_mc_unmap(struct in_device *);
|
||||
extern void ip_mc_remap(struct in_device *);
|
||||
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
|
||||
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
|
||||
extern void ip_mc_rejoin_group(struct ip_mc_list *im);
|
||||
|
|
|
@ -57,6 +57,7 @@ typedef enum {
|
|||
#include <linux/random.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
|
||||
#include <linux/kmemcheck.h>
|
||||
|
||||
struct poll_table_struct;
|
||||
struct pipe_inode_info;
|
||||
|
@ -127,7 +128,11 @@ enum sock_shutdown_cmd {
|
|||
*/
|
||||
struct socket {
|
||||
socket_state state;
|
||||
|
||||
kmemcheck_bitfield_begin(type);
|
||||
short type;
|
||||
kmemcheck_bitfield_end(type);
|
||||
|
||||
unsigned long flags;
|
||||
/*
|
||||
* Please keep fasync_list & wait fields in the same cache line
|
||||
|
|
|
@ -1873,7 +1873,8 @@ extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct
|
|||
extern int dev_set_promiscuity(struct net_device *dev, int inc);
|
||||
extern int dev_set_allmulti(struct net_device *dev, int inc);
|
||||
extern void netdev_state_change(struct net_device *dev);
|
||||
extern void netdev_bonding_change(struct net_device *dev);
|
||||
extern void netdev_bonding_change(struct net_device *dev,
|
||||
unsigned long event);
|
||||
extern void netdev_features_change(struct net_device *dev);
|
||||
/* Load a device via the kmod */
|
||||
extern void dev_load(struct net *net, const char *name);
|
||||
|
|
|
@ -176,12 +176,16 @@ struct netlink_skb_parms
|
|||
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
|
||||
|
||||
|
||||
extern void netlink_table_grab(void);
|
||||
extern void netlink_table_ungrab(void);
|
||||
|
||||
extern struct sock *netlink_kernel_create(struct net *net,
|
||||
int unit,unsigned int groups,
|
||||
void (*input)(struct sk_buff *skb),
|
||||
struct mutex *cb_mutex,
|
||||
struct module *module);
|
||||
extern void netlink_kernel_release(struct sock *sk);
|
||||
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
|
||||
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
|
||||
extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
|
||||
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
|
||||
|
|
|
@ -199,6 +199,8 @@ static inline int notifier_to_errno(int ret)
|
|||
#define NETDEV_FEAT_CHANGE 0x000B
|
||||
#define NETDEV_BONDING_FAILOVER 0x000C
|
||||
#define NETDEV_PRE_UP 0x000D
|
||||
#define NETDEV_BONDING_OLDTYPE 0x000E
|
||||
#define NETDEV_BONDING_NEWTYPE 0x000F
|
||||
|
||||
#define SYS_DOWN 0x0001 /* Notify of system down */
|
||||
#define SYS_RESTART SYS_DOWN
|
||||
|
|
|
@ -58,5 +58,12 @@ struct sockaddr_rxrpc {
|
|||
#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */
|
||||
#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */
|
||||
|
||||
/*
|
||||
* RxRPC security indices
|
||||
*/
|
||||
#define RXRPC_SECURITY_NONE 0 /* no security protocol */
|
||||
#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */
|
||||
#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */
|
||||
#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */
|
||||
|
||||
#endif /* _LINUX_RXRPC_H */
|
||||
|
|
|
@ -143,6 +143,8 @@ extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr
|
|||
extern int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
|
||||
extern void ipv6_mc_up(struct inet6_dev *idev);
|
||||
extern void ipv6_mc_down(struct inet6_dev *idev);
|
||||
extern void ipv6_mc_unmap(struct inet6_dev *idev);
|
||||
extern void ipv6_mc_remap(struct inet6_dev *idev);
|
||||
extern void ipv6_mc_init_dev(struct inet6_dev *idev);
|
||||
extern void ipv6_mc_destroy_dev(struct inet6_dev *idev);
|
||||
extern void addrconf_dad_failure(struct inet6_ifaddr *ifp);
|
||||
|
|
|
@ -94,21 +94,20 @@ struct inet_protosw {
|
|||
#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */
|
||||
#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */
|
||||
|
||||
extern struct net_protocol *inet_protocol_base;
|
||||
extern struct net_protocol *inet_protos[MAX_INET_PROTOS];
|
||||
extern const struct net_protocol *inet_protos[MAX_INET_PROTOS];
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
extern struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
|
||||
extern const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
|
||||
#endif
|
||||
|
||||
extern int inet_add_protocol(struct net_protocol *prot, unsigned char num);
|
||||
extern int inet_del_protocol(struct net_protocol *prot, unsigned char num);
|
||||
extern int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
|
||||
extern int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
|
||||
extern void inet_register_protosw(struct inet_protosw *p);
|
||||
extern void inet_unregister_protosw(struct inet_protosw *p);
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
||||
extern int inet6_add_protocol(struct inet6_protocol *prot, unsigned char num);
|
||||
extern int inet6_del_protocol(struct inet6_protocol *prot, unsigned char num);
|
||||
extern int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
|
||||
extern int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
|
||||
extern int inet6_register_protosw(struct inet_protosw *p);
|
||||
extern void inet6_unregister_protosw(struct inet_protosw *p);
|
||||
#endif
|
||||
|
|
|
@ -81,7 +81,7 @@ struct Qdisc
|
|||
struct Qdisc_class_ops
|
||||
{
|
||||
/* Child qdisc manipulation */
|
||||
unsigned int (*select_queue)(struct Qdisc *, struct tcmsg *);
|
||||
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
|
||||
int (*graft)(struct Qdisc *, unsigned long cl,
|
||||
struct Qdisc *, struct Qdisc **);
|
||||
struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
|
||||
|
|
|
@ -793,6 +793,13 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
|
|||
return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
|
||||
}
|
||||
|
||||
#define TCP_INFINITE_SSTHRESH 0x7fffffff
|
||||
|
||||
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
|
||||
{
|
||||
return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
|
||||
}
|
||||
|
||||
/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
|
||||
* The exception is rate halving phase, when cwnd is decreasing towards
|
||||
* ssthresh.
|
||||
|
|
|
@ -1372,7 +1372,7 @@ static int atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
|
|||
|
||||
if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == NET_XMIT_DROP)
|
||||
return NET_RX_DROP;
|
||||
return NET_XMIT_SUCCESS;
|
||||
return NET_RX_SUCCESS;
|
||||
free_it:
|
||||
kfree_skb(skb);
|
||||
drop:
|
||||
|
|
|
@ -199,6 +199,8 @@ static int can_create(struct net *net, struct socket *sock, int protocol)
|
|||
* @skb: pointer to socket buffer with CAN frame in data section
|
||||
* @loop: loopback for listeners on local CAN sockets (recommended default!)
|
||||
*
|
||||
* Due to the loopback this routine must not be called from hardirq context.
|
||||
*
|
||||
* Return:
|
||||
* 0 on success
|
||||
* -ENETDOWN when the selected interface is down
|
||||
|
@ -278,7 +280,7 @@ int can_send(struct sk_buff *skb, int loop)
|
|||
}
|
||||
|
||||
if (newskb)
|
||||
netif_rx(newskb);
|
||||
netif_rx_ni(newskb);
|
||||
|
||||
/* update statistics */
|
||||
can_stats.tx_frames++;
|
||||
|
|
|
@ -1017,9 +1017,9 @@ void netdev_state_change(struct net_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(netdev_state_change);
|
||||
|
||||
void netdev_bonding_change(struct net_device *dev)
|
||||
void netdev_bonding_change(struct net_device *dev, unsigned long event)
|
||||
{
|
||||
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
|
||||
call_netdevice_notifiers(event, dev);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_bonding_change);
|
||||
|
||||
|
|
|
@ -66,9 +66,9 @@ config IP_DCCP_CCID3_RTO
|
|||
A value of 0 disables this feature by enforcing the value specified
|
||||
in RFC 3448. The following values have been suggested as bounds for
|
||||
experimental use:
|
||||
* 16-20ms to match the typical multimedia inter-frame interval
|
||||
* 100ms as a reasonable compromise [default]
|
||||
* 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
|
||||
* 16-20ms to match the typical multimedia inter-frame interval
|
||||
* 100ms as a reasonable compromise [default]
|
||||
* 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
|
||||
|
||||
The default of 100ms is a compromise between a large value for
|
||||
efficient DCCP implementations, and a small value to avoid disrupting
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
/*
|
||||
* net/dccp/ccids/ccid2.c
|
||||
*
|
||||
* Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
|
||||
*
|
||||
* Changes to meet Linux coding standards, and DCCP infrastructure fixes.
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
/*
|
||||
* net/dccp/ccids/ccid2.h
|
||||
*
|
||||
* Copyright (c) 2005 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -40,14 +38,14 @@ struct ccid2_seq {
|
|||
#define CCID2_SEQBUF_LEN 1024
|
||||
#define CCID2_SEQBUF_MAX 128
|
||||
|
||||
/** struct ccid2_hc_tx_sock - CCID2 TX half connection
|
||||
*
|
||||
/**
|
||||
* struct ccid2_hc_tx_sock - CCID2 TX half connection
|
||||
* @ccid2hctx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
|
||||
* @ccid2hctx_packets_acked - Ack counter for deriving cwnd growth (RFC 3465)
|
||||
* @ccid2hctx_lastrtt -time RTT was last measured
|
||||
* @ccid2hctx_rpseq - last consecutive seqno
|
||||
* @ccid2hctx_rpdupack - dupacks since rpseq
|
||||
*/
|
||||
*/
|
||||
struct ccid2_hc_tx_sock {
|
||||
u32 ccid2hctx_cwnd;
|
||||
u32 ccid2hctx_ssthresh;
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
/*
|
||||
* net/dccp/ccids/ccid3.c
|
||||
*
|
||||
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
|
||||
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
|
||||
* Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
|
||||
|
@ -750,7 +748,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** ccid3_first_li - Implements [RFC 3448, 6.3.1]
|
||||
/**
|
||||
* ccid3_first_li - Implements [RFC 5348, 6.3.1]
|
||||
*
|
||||
* Determine the length of the first loss interval via inverse lookup.
|
||||
* Assume that X_recv can be computed by the throughput equation
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
/*
|
||||
* net/dccp/ccids/ccid3.h
|
||||
*
|
||||
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
|
||||
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
|
||||
*
|
||||
|
@ -75,8 +73,8 @@ enum ccid3_hc_tx_states {
|
|||
TFRC_SSTATE_TERM,
|
||||
};
|
||||
|
||||
/** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
|
||||
*
|
||||
/**
|
||||
* struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
|
||||
* @ccid3hctx_x - Current sending rate in 64 * bytes per second
|
||||
* @ccid3hctx_x_recv - Receive rate in 64 * bytes per second
|
||||
* @ccid3hctx_x_calc - Calculated rate in bytes per second
|
||||
|
@ -119,9 +117,9 @@ struct ccid3_hc_tx_sock {
|
|||
|
||||
static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
|
||||
{
|
||||
struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);
|
||||
BUG_ON(hctx == NULL);
|
||||
return hctx;
|
||||
struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);
|
||||
BUG_ON(hctx == NULL);
|
||||
return hctx;
|
||||
}
|
||||
|
||||
/* TFRC receiver states */
|
||||
|
@ -131,22 +129,22 @@ enum ccid3_hc_rx_states {
|
|||
TFRC_RSTATE_TERM = 127,
|
||||
};
|
||||
|
||||
/** struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
|
||||
*
|
||||
* @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3)
|
||||
* @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard)
|
||||
* @ccid3hcrx_p - Current loss event rate (RFC 3448 5.4)
|
||||
* @ccid3hcrx_last_counter - Tracks window counter (RFC 4342, 8.1)
|
||||
* @ccid3hcrx_state - Receiver state, one of %ccid3_hc_rx_states
|
||||
* @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes
|
||||
* @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448, sec. 4.3)
|
||||
* @ccid3hcrx_rtt - Receiver estimate of RTT
|
||||
* @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent
|
||||
* @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent
|
||||
* @ccid3hcrx_hist - Packet history (loss detection + RTT sampling)
|
||||
* @ccid3hcrx_li_hist - Loss Interval database
|
||||
* @ccid3hcrx_s - Received packet size in bytes
|
||||
* @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5)
|
||||
/**
|
||||
* struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
|
||||
* @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3)
|
||||
* @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard)
|
||||
* @ccid3hcrx_p - Current loss event rate (RFC 3448 5.4)
|
||||
* @ccid3hcrx_last_counter - Tracks window counter (RFC 4342, 8.1)
|
||||
* @ccid3hcrx_state - Receiver state, one of %ccid3_hc_rx_states
|
||||
* @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes
|
||||
* @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448, sec. 4.3)
|
||||
* @ccid3hcrx_rtt - Receiver estimate of RTT
|
||||
* @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent
|
||||
* @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent
|
||||
* @ccid3hcrx_hist - Packet history (loss detection + RTT sampling)
|
||||
* @ccid3hcrx_li_hist - Loss Interval database
|
||||
* @ccid3hcrx_s - Received packet size in bytes
|
||||
* @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5)
|
||||
*/
|
||||
struct ccid3_hc_rx_sock {
|
||||
u8 ccid3hcrx_last_counter:4;
|
||||
|
@ -163,9 +161,9 @@ struct ccid3_hc_rx_sock {
|
|||
|
||||
static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk)
|
||||
{
|
||||
struct ccid3_hc_rx_sock *hcrx = ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid);
|
||||
BUG_ON(hcrx == NULL);
|
||||
return hcrx;
|
||||
struct ccid3_hc_rx_sock *hcrx = ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid);
|
||||
BUG_ON(hcrx == NULL);
|
||||
return hcrx;
|
||||
}
|
||||
|
||||
#endif /* _DCCP_CCID3_H_ */
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
/*
|
||||
* net/dccp/ccids/lib/loss_interval.c
|
||||
*
|
||||
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
|
||||
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
|
||||
* Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
|
||||
|
@ -21,7 +19,7 @@ static const int tfrc_lh_weights[NINTERVAL] = { 10, 10, 10, 10, 8, 6, 4, 2 };
|
|||
/* implements LIFO semantics on the array */
|
||||
static inline u8 LIH_INDEX(const u8 ctr)
|
||||
{
|
||||
return (LIH_SIZE - 1 - (ctr % LIH_SIZE));
|
||||
return LIH_SIZE - 1 - (ctr % LIH_SIZE);
|
||||
}
|
||||
|
||||
/* the `counter' index always points at the next entry to be populated */
|
||||
|
@ -129,7 +127,8 @@ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
|
|||
(cur->li_is_closed || SUB16(new_loss->tfrchrx_ccval, cur->li_ccval) > 4);
|
||||
}
|
||||
|
||||
/** tfrc_lh_interval_add - Insert new record into the Loss Interval database
|
||||
/**
|
||||
* tfrc_lh_interval_add - Insert new record into the Loss Interval database
|
||||
* @lh: Loss Interval database
|
||||
* @rh: Receive history containing a fresh loss event
|
||||
* @calc_first_li: Caller-dependent routine to compute length of first interval
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#ifndef _DCCP_LI_HIST_
|
||||
#define _DCCP_LI_HIST_
|
||||
/*
|
||||
* net/dccp/ccids/lib/loss_interval.h
|
||||
*
|
||||
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
|
||||
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
|
||||
* Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
/*
|
||||
* net/dccp/packet_history.c
|
||||
*
|
||||
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
|
||||
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
|
||||
*
|
||||
|
@ -128,7 +126,7 @@ u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
|
|||
|
||||
|
||||
/*
|
||||
* Receiver History Routines
|
||||
* Receiver History Routines
|
||||
*/
|
||||
static struct kmem_cache *tfrc_rx_hist_slab;
|
||||
|
||||
|
|
|
@ -70,7 +70,6 @@ struct tfrc_rx_hist_entry {
|
|||
|
||||
/**
|
||||
* tfrc_rx_hist - RX history structure for TFRC-based protocols
|
||||
*
|
||||
* @ring: Packet history for RTT sampling and loss detection
|
||||
* @loss_count: Number of entries in circular history
|
||||
* @loss_start: Movable index (for loss detection)
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#ifndef _TFRC_H_
|
||||
#define _TFRC_H_
|
||||
/*
|
||||
* net/dccp/ccids/lib/tfrc.h
|
||||
*
|
||||
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
|
||||
* Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
|
||||
* Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
|
||||
|
@ -32,7 +30,7 @@ extern int tfrc_debug;
|
|||
/* integer-arithmetic divisions of type (a * 1000000)/b */
|
||||
static inline u64 scaled_div(u64 a, u64 b)
|
||||
{
|
||||
BUG_ON(b==0);
|
||||
BUG_ON(b == 0);
|
||||
return div64_u64(a * 1000000, b);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
/*
|
||||
* net/dccp/ccids/lib/tfrc_equation.c
|
||||
*
|
||||
* Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
|
||||
* Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
|
||||
* Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
||||
|
@ -79,10 +77,10 @@
|
|||
}
|
||||
|
||||
With the given configuration, we have, with M = TFRC_CALC_X_ARRSIZE-1,
|
||||
lookup[0][0] = g(1000000/(M+1)) = 1000000 * f(0.2%)
|
||||
lookup[M][0] = g(1000000) = 1000000 * f(100%)
|
||||
lookup[0][1] = g(TFRC_SMALLEST_P) = 1000000 * f(0.01%)
|
||||
lookup[M][1] = g(TFRC_CALC_X_SPLIT) = 1000000 * f(5%)
|
||||
lookup[0][0] = g(1000000/(M+1)) = 1000000 * f(0.2%)
|
||||
lookup[M][0] = g(1000000) = 1000000 * f(100%)
|
||||
lookup[0][1] = g(TFRC_SMALLEST_P) = 1000000 * f(0.01%)
|
||||
lookup[M][1] = g(TFRC_CALC_X_SPLIT) = 1000000 * f(5%)
|
||||
|
||||
In summary, the two columns represent f(p) for the following ranges:
|
||||
* The first column is for 0.002 <= p <= 1.0
|
||||
|
@ -610,11 +608,10 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
|
|||
|
||||
/**
|
||||
* tfrc_calc_x - Calculate the send rate as per section 3.1 of RFC3448
|
||||
*
|
||||
* @s: packet size in bytes
|
||||
* @R: RTT scaled by 1000000 (i.e., microseconds)
|
||||
* @p: loss ratio estimate scaled by 1000000
|
||||
* Returns X_calc in bytes per second (not scaled).
|
||||
* @s: packet size in bytes
|
||||
* @R: RTT scaled by 1000000 (i.e., microseconds)
|
||||
* @p: loss ratio estimate scaled by 1000000
|
||||
* Returns X_calc in bytes per second (not scaled).
|
||||
*/
|
||||
u32 tfrc_calc_x(u16 s, u32 R, u32 p)
|
||||
{
|
||||
|
@ -630,17 +627,17 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
|
|||
return ~0U;
|
||||
}
|
||||
|
||||
if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */
|
||||
if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */
|
||||
if (p < TFRC_SMALLEST_P) { /* 0.0000 < p < 0.0001 */
|
||||
DCCP_WARN("Value of p (%d) below resolution. "
|
||||
"Substituting %d\n", p, TFRC_SMALLEST_P);
|
||||
index = 0;
|
||||
} else /* 0.0001 <= p <= 0.05 */
|
||||
} else /* 0.0001 <= p <= 0.05 */
|
||||
index = p/TFRC_SMALLEST_P - 1;
|
||||
|
||||
f = tfrc_calc_x_lookup[index][1];
|
||||
|
||||
} else { /* 0.05 < p <= 1.00 */
|
||||
} else { /* 0.05 < p <= 1.00 */
|
||||
index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1;
|
||||
|
||||
f = tfrc_calc_x_lookup[index][0];
|
||||
|
@ -661,7 +658,6 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
|
|||
|
||||
/**
|
||||
* tfrc_calc_x_reverse_lookup - try to find p given f(p)
|
||||
*
|
||||
* @fvalue: function value to match, scaled by 1000000
|
||||
* Returns closest match for p, also scaled by 1000000
|
||||
*/
|
||||
|
|
|
@ -948,7 +948,7 @@ static struct proto dccp_v4_prot = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static struct net_protocol dccp_v4_protocol = {
|
||||
static const struct net_protocol dccp_v4_protocol = {
|
||||
.handler = dccp_v4_rcv,
|
||||
.err_handler = dccp_v4_err,
|
||||
.no_policy = 1,
|
||||
|
|
|
@ -1152,13 +1152,13 @@ static struct proto dccp_v6_prot = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static struct inet6_protocol dccp_v6_protocol = {
|
||||
static const struct inet6_protocol dccp_v6_protocol = {
|
||||
.handler = dccp_v6_rcv,
|
||||
.err_handler = dccp_v6_err,
|
||||
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
|
||||
};
|
||||
|
||||
static struct proto_ops inet6_dccp_ops = {
|
||||
static const struct proto_ops inet6_dccp_ops = {
|
||||
.family = PF_INET6,
|
||||
.owner = THIS_MODULE,
|
||||
.release = inet6_release,
|
||||
|
|
|
@ -414,7 +414,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
|
|||
}
|
||||
|
||||
static int dgram_setsockopt(struct sock *sk, int level, int optname,
|
||||
char __user *optval, int __user optlen)
|
||||
char __user *optval, int optlen)
|
||||
{
|
||||
struct dgram_sock *ro = dgram_sk(sk);
|
||||
int val;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <net/ieee802154_netdev.h>
|
||||
|
||||
static unsigned int ieee802154_seq_num;
|
||||
static DEFINE_SPINLOCK(ieee802154_seq_lock);
|
||||
|
||||
static struct genl_family ieee802154_coordinator_family = {
|
||||
.id = GENL_ID_GENERATE,
|
||||
|
@ -57,12 +58,15 @@ static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
|
|||
{
|
||||
void *hdr;
|
||||
struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
|
||||
unsigned long f;
|
||||
|
||||
if (!msg)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&ieee802154_seq_lock, f);
|
||||
hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
|
||||
&ieee802154_coordinator_family, flags, req);
|
||||
spin_unlock_irqrestore(&ieee802154_seq_lock, f);
|
||||
if (!hdr) {
|
||||
nlmsg_free(msg);
|
||||
return NULL;
|
||||
|
|
|
@ -244,7 +244,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
|
|||
}
|
||||
|
||||
static int raw_setsockopt(struct sock *sk, int level, int optname,
|
||||
char __user *optval, int __user optlen)
|
||||
char __user *optval, int optlen)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -244,7 +244,7 @@ EXPORT_SYMBOL(build_ehash_secret);
|
|||
static inline int inet_netns_ok(struct net *net, int protocol)
|
||||
{
|
||||
int hash;
|
||||
struct net_protocol *ipprot;
|
||||
const struct net_protocol *ipprot;
|
||||
|
||||
if (net_eq(net, &init_net))
|
||||
return 1;
|
||||
|
@ -1162,7 +1162,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
|
|||
static int inet_gso_send_check(struct sk_buff *skb)
|
||||
{
|
||||
struct iphdr *iph;
|
||||
struct net_protocol *ops;
|
||||
const struct net_protocol *ops;
|
||||
int proto;
|
||||
int ihl;
|
||||
int err = -EINVAL;
|
||||
|
@ -1198,7 +1198,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
|
|||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
struct iphdr *iph;
|
||||
struct net_protocol *ops;
|
||||
const struct net_protocol *ops;
|
||||
int proto;
|
||||
int ihl;
|
||||
int id;
|
||||
|
@ -1265,7 +1265,7 @@ out:
|
|||
static struct sk_buff **inet_gro_receive(struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct net_protocol *ops;
|
||||
const struct net_protocol *ops;
|
||||
struct sk_buff **pp = NULL;
|
||||
struct sk_buff *p;
|
||||
struct iphdr *iph;
|
||||
|
@ -1342,7 +1342,7 @@ out:
|
|||
|
||||
static int inet_gro_complete(struct sk_buff *skb)
|
||||
{
|
||||
struct net_protocol *ops;
|
||||
const struct net_protocol *ops;
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
int proto = iph->protocol & (MAX_INET_PROTOS - 1);
|
||||
int err = -ENOSYS;
|
||||
|
@ -1427,13 +1427,13 @@ void snmp_mib_free(void *ptr[2])
|
|||
EXPORT_SYMBOL_GPL(snmp_mib_free);
|
||||
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
static struct net_protocol igmp_protocol = {
|
||||
static const struct net_protocol igmp_protocol = {
|
||||
.handler = igmp_rcv,
|
||||
.netns_ok = 1,
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct net_protocol tcp_protocol = {
|
||||
static const struct net_protocol tcp_protocol = {
|
||||
.handler = tcp_v4_rcv,
|
||||
.err_handler = tcp_v4_err,
|
||||
.gso_send_check = tcp_v4_gso_send_check,
|
||||
|
@ -1444,7 +1444,7 @@ static struct net_protocol tcp_protocol = {
|
|||
.netns_ok = 1,
|
||||
};
|
||||
|
||||
static struct net_protocol udp_protocol = {
|
||||
static const struct net_protocol udp_protocol = {
|
||||
.handler = udp_rcv,
|
||||
.err_handler = udp_err,
|
||||
.gso_send_check = udp4_ufo_send_check,
|
||||
|
@ -1453,7 +1453,7 @@ static struct net_protocol udp_protocol = {
|
|||
.netns_ok = 1,
|
||||
};
|
||||
|
||||
static struct net_protocol icmp_protocol = {
|
||||
static const struct net_protocol icmp_protocol = {
|
||||
.handler = icmp_rcv,
|
||||
.no_policy = 1,
|
||||
.netns_ok = 1,
|
||||
|
|
|
@ -311,7 +311,7 @@ static const struct xfrm_type ah_type =
|
|||
.output = ah_output
|
||||
};
|
||||
|
||||
static struct net_protocol ah4_protocol = {
|
||||
static const struct net_protocol ah4_protocol = {
|
||||
.handler = xfrm4_rcv,
|
||||
.err_handler = ah4_err,
|
||||
.no_policy = 1,
|
||||
|
|
|
@ -1087,6 +1087,12 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
|
|||
case NETDEV_DOWN:
|
||||
ip_mc_down(in_dev);
|
||||
break;
|
||||
case NETDEV_BONDING_OLDTYPE:
|
||||
ip_mc_unmap(in_dev);
|
||||
break;
|
||||
case NETDEV_BONDING_NEWTYPE:
|
||||
ip_mc_remap(in_dev);
|
||||
break;
|
||||
case NETDEV_CHANGEMTU:
|
||||
if (inetdev_valid_mtu(dev->mtu))
|
||||
break;
|
||||
|
|
|
@ -615,7 +615,7 @@ static const struct xfrm_type esp_type =
|
|||
.output = esp_output
|
||||
};
|
||||
|
||||
static struct net_protocol esp4_protocol = {
|
||||
static const struct net_protocol esp4_protocol = {
|
||||
.handler = xfrm4_rcv,
|
||||
.err_handler = esp4_err,
|
||||
.no_policy = 1,
|
||||
|
|
|
@ -655,7 +655,7 @@ static void icmp_unreach(struct sk_buff *skb)
|
|||
struct iphdr *iph;
|
||||
struct icmphdr *icmph;
|
||||
int hash, protocol;
|
||||
struct net_protocol *ipprot;
|
||||
const struct net_protocol *ipprot;
|
||||
u32 info = 0;
|
||||
struct net *net;
|
||||
|
||||
|
|
|
@ -1298,6 +1298,28 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
|
|||
}
|
||||
}
|
||||
|
||||
/* Device changing type */
|
||||
|
||||
void ip_mc_unmap(struct in_device *in_dev)
|
||||
{
|
||||
struct ip_mc_list *i;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
for (i = in_dev->mc_list; i; i = i->next)
|
||||
igmp_group_dropped(i);
|
||||
}
|
||||
|
||||
void ip_mc_remap(struct in_device *in_dev)
|
||||
{
|
||||
struct ip_mc_list *i;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
for (i = in_dev->mc_list; i; i = i->next)
|
||||
igmp_group_added(i);
|
||||
}
|
||||
|
||||
/* Device going down */
|
||||
|
||||
void ip_mc_down(struct in_device *in_dev)
|
||||
|
|
|
@ -1288,7 +1288,7 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
|
|||
}
|
||||
|
||||
|
||||
static struct net_protocol ipgre_protocol = {
|
||||
static const struct net_protocol ipgre_protocol = {
|
||||
.handler = ipgre_rcv,
|
||||
.err_handler = ipgre_err,
|
||||
.netns_ok = 1,
|
||||
|
|
|
@ -202,7 +202,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
|
|||
{
|
||||
int protocol = ip_hdr(skb)->protocol;
|
||||
int hash, raw;
|
||||
struct net_protocol *ipprot;
|
||||
const struct net_protocol *ipprot;
|
||||
|
||||
resubmit:
|
||||
raw = raw_local_deliver(skb, protocol);
|
||||
|
|
|
@ -146,7 +146,7 @@ static const struct xfrm_type ipcomp_type = {
|
|||
.output = ipcomp_output
|
||||
};
|
||||
|
||||
static struct net_protocol ipcomp4_protocol = {
|
||||
static const struct net_protocol ipcomp4_protocol = {
|
||||
.handler = xfrm4_rcv,
|
||||
.err_handler = ipcomp4_err,
|
||||
.no_policy = 1,
|
||||
|
|
|
@ -99,10 +99,6 @@ static int ipmr_cache_report(struct net *net,
|
|||
struct sk_buff *pkt, vifi_t vifi, int assert);
|
||||
static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
|
||||
|
||||
#ifdef CONFIG_IP_PIMSM_V2
|
||||
static struct net_protocol pim_protocol;
|
||||
#endif
|
||||
|
||||
static struct timer_list ipmr_expire_timer;
|
||||
|
||||
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
|
||||
|
@ -1945,7 +1941,7 @@ static const struct file_operations ipmr_mfc_fops = {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_IP_PIMSM_V2
|
||||
static struct net_protocol pim_protocol = {
|
||||
static const struct net_protocol pim_protocol = {
|
||||
.handler = pim_rcv,
|
||||
.netns_ok = 1,
|
||||
};
|
||||
|
|
|
@ -28,14 +28,14 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <net/protocol.h>
|
||||
|
||||
struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp;
|
||||
const struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp;
|
||||
static DEFINE_SPINLOCK(inet_proto_lock);
|
||||
|
||||
/*
|
||||
* Add a protocol handler to the hash tables
|
||||
*/
|
||||
|
||||
int inet_add_protocol(struct net_protocol *prot, unsigned char protocol)
|
||||
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
|
||||
{
|
||||
int hash, ret;
|
||||
|
||||
|
@ -57,7 +57,7 @@ int inet_add_protocol(struct net_protocol *prot, unsigned char protocol)
|
|||
* Remove a protocol from the hash tables.
|
||||
*/
|
||||
|
||||
int inet_del_protocol(struct net_protocol *prot, unsigned char protocol)
|
||||
int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
|
||||
{
|
||||
int hash, ret;
|
||||
|
||||
|
|
|
@ -2012,7 +2012,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|||
tp->snd_cwnd = 2;
|
||||
icsk->icsk_probes_out = 0;
|
||||
tp->packets_out = 0;
|
||||
tp->snd_ssthresh = 0x7fffffff;
|
||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
tp->snd_cwnd_cnt = 0;
|
||||
tp->bytes_acked = 0;
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
|
|
|
@ -761,7 +761,7 @@ void tcp_update_metrics(struct sock *sk)
|
|||
set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
|
||||
}
|
||||
|
||||
if (tp->snd_ssthresh >= 0xFFFF) {
|
||||
if (tcp_in_initial_slowstart(tp)) {
|
||||
/* Slow start still did not finish. */
|
||||
if (dst_metric(dst, RTAX_SSTHRESH) &&
|
||||
!dst_metric_locked(dst, RTAX_SSTHRESH) &&
|
||||
|
|
|
@ -1808,7 +1808,7 @@ static int tcp_v4_init_sock(struct sock *sk)
|
|||
/* See draft-stevens-tcpca-spec-01 for discussion of the
|
||||
* initialization of these values.
|
||||
*/
|
||||
tp->snd_ssthresh = 0x7fffffff; /* Infinity */
|
||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
tp->snd_cwnd_clamp = ~0;
|
||||
tp->mss_cache = 536;
|
||||
|
||||
|
@ -2284,7 +2284,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
|
|||
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
||||
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
|
||||
tp->snd_cwnd,
|
||||
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
|
||||
tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
|
||||
len);
|
||||
}
|
||||
|
||||
|
|
|
@ -363,7 +363,7 @@ void tcp_twsk_destructor(struct sock *sk)
|
|||
#ifdef CONFIG_TCP_MD5SIG
|
||||
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
|
||||
if (twsk->tw_md5_keylen)
|
||||
tcp_put_md5sig_pool();
|
||||
tcp_free_md5sig_pool();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -410,7 +410,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
|
|||
newtp->retrans_out = 0;
|
||||
newtp->sacked_out = 0;
|
||||
newtp->fackets_out = 0;
|
||||
newtp->snd_ssthresh = 0x7fffffff;
|
||||
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
|
||||
/* So many TCP implementations out there (incorrectly) count the
|
||||
* initial SYN frame in their delayed-ACK and congestion control
|
||||
|
|
|
@ -132,7 +132,7 @@ static void tunnel64_err(struct sk_buff *skb, u32 info)
|
|||
}
|
||||
#endif
|
||||
|
||||
static struct net_protocol tunnel4_protocol = {
|
||||
static const struct net_protocol tunnel4_protocol = {
|
||||
.handler = tunnel4_rcv,
|
||||
.err_handler = tunnel4_err,
|
||||
.no_policy = 1,
|
||||
|
@ -140,7 +140,7 @@ static struct net_protocol tunnel4_protocol = {
|
|||
};
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
static struct net_protocol tunnel64_protocol = {
|
||||
static const struct net_protocol tunnel64_protocol = {
|
||||
.handler = tunnel64_rcv,
|
||||
.err_handler = tunnel64_err,
|
||||
.no_policy = 1,
|
||||
|
|
|
@ -25,7 +25,7 @@ static void udplite_err(struct sk_buff *skb, u32 info)
|
|||
__udp4_lib_err(skb, info, &udplite_table);
|
||||
}
|
||||
|
||||
static struct net_protocol udplite_protocol = {
|
||||
static const struct net_protocol udplite_protocol = {
|
||||
.handler = udplite_rcv,
|
||||
.err_handler = udplite_err,
|
||||
.no_policy = 1,
|
||||
|
|
|
@ -137,6 +137,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock);
|
|||
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
|
||||
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
|
||||
|
||||
static void addrconf_bonding_change(struct net_device *dev,
|
||||
unsigned long event);
|
||||
static int addrconf_ifdown(struct net_device *dev, int how);
|
||||
|
||||
static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
|
||||
|
@ -1405,8 +1407,8 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
|
|||
struct inet6_dev *idev = ifp->idev;
|
||||
|
||||
if (net_ratelimit())
|
||||
printk(KERN_INFO "%s: IPv6 duplicate address detected!\n",
|
||||
ifp->idev->dev->name);
|
||||
printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
|
||||
ifp->idev->dev->name, &ifp->addr);
|
||||
|
||||
if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
|
||||
struct in6_addr addr;
|
||||
|
@ -2582,6 +2584,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||
return notifier_from_errno(err);
|
||||
}
|
||||
break;
|
||||
case NETDEV_BONDING_OLDTYPE:
|
||||
case NETDEV_BONDING_NEWTYPE:
|
||||
addrconf_bonding_change(dev, event);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
@ -2595,6 +2601,19 @@ static struct notifier_block ipv6_dev_notf = {
|
|||
.priority = 0
|
||||
};
|
||||
|
||||
static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
|
||||
{
|
||||
struct inet6_dev *idev;
|
||||
ASSERT_RTNL();
|
||||
|
||||
idev = __in6_dev_get(dev);
|
||||
|
||||
if (event == NETDEV_BONDING_NEWTYPE)
|
||||
ipv6_mc_remap(idev);
|
||||
else if (event == NETDEV_BONDING_OLDTYPE)
|
||||
ipv6_mc_unmap(idev);
|
||||
}
|
||||
|
||||
static int addrconf_ifdown(struct net_device *dev, int how)
|
||||
{
|
||||
struct inet6_dev *idev;
|
||||
|
|
|
@ -710,7 +710,7 @@ EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
|
|||
|
||||
static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
|
||||
{
|
||||
struct inet6_protocol *ops = NULL;
|
||||
const struct inet6_protocol *ops = NULL;
|
||||
|
||||
for (;;) {
|
||||
struct ipv6_opt_hdr *opth;
|
||||
|
@ -745,7 +745,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
|
|||
static int ipv6_gso_send_check(struct sk_buff *skb)
|
||||
{
|
||||
struct ipv6hdr *ipv6h;
|
||||
struct inet6_protocol *ops;
|
||||
const struct inet6_protocol *ops;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
|
||||
|
@ -773,7 +773,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
|
|||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
struct ipv6hdr *ipv6h;
|
||||
struct inet6_protocol *ops;
|
||||
const struct inet6_protocol *ops;
|
||||
int proto;
|
||||
struct frag_hdr *fptr;
|
||||
unsigned int unfrag_ip6hlen;
|
||||
|
@ -840,7 +840,7 @@ struct ipv6_gro_cb {
|
|||
static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct inet6_protocol *ops;
|
||||
const struct inet6_protocol *ops;
|
||||
struct sk_buff **pp = NULL;
|
||||
struct sk_buff *p;
|
||||
struct ipv6hdr *iph;
|
||||
|
@ -926,7 +926,7 @@ out:
|
|||
|
||||
static int ipv6_gro_complete(struct sk_buff *skb)
|
||||
{
|
||||
struct inet6_protocol *ops;
|
||||
const struct inet6_protocol *ops;
|
||||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
int err = -ENOSYS;
|
||||
|
||||
|
|
|
@ -527,7 +527,7 @@ static const struct xfrm_type ah6_type =
|
|||
.hdr_offset = xfrm6_find_1stfragopt,
|
||||
};
|
||||
|
||||
static struct inet6_protocol ah6_protocol = {
|
||||
static const struct inet6_protocol ah6_protocol = {
|
||||
.handler = xfrm6_rcv,
|
||||
.err_handler = ah6_err,
|
||||
.flags = INET6_PROTO_NOPOLICY,
|
||||
|
|
|
@ -558,7 +558,7 @@ static const struct xfrm_type esp6_type =
|
|||
.hdr_offset = xfrm6_find_1stfragopt,
|
||||
};
|
||||
|
||||
static struct inet6_protocol esp6_protocol = {
|
||||
static const struct inet6_protocol esp6_protocol = {
|
||||
.handler = xfrm6_rcv,
|
||||
.err_handler = esp6_err,
|
||||
.flags = INET6_PROTO_NOPOLICY,
|
||||
|
|
|
@ -500,17 +500,17 @@ unknown_rh:
|
|||
return -1;
|
||||
}
|
||||
|
||||
static struct inet6_protocol rthdr_protocol = {
|
||||
static const struct inet6_protocol rthdr_protocol = {
|
||||
.handler = ipv6_rthdr_rcv,
|
||||
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
|
||||
};
|
||||
|
||||
static struct inet6_protocol destopt_protocol = {
|
||||
static const struct inet6_protocol destopt_protocol = {
|
||||
.handler = ipv6_destopt_rcv,
|
||||
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
|
||||
};
|
||||
|
||||
static struct inet6_protocol nodata_protocol = {
|
||||
static const struct inet6_protocol nodata_protocol = {
|
||||
.handler = dst_discard,
|
||||
.flags = INET6_PROTO_NOPOLICY,
|
||||
};
|
||||
|
|
|
@ -86,7 +86,7 @@ static inline struct sock *icmpv6_sk(struct net *net)
|
|||
|
||||
static int icmpv6_rcv(struct sk_buff *skb);
|
||||
|
||||
static struct inet6_protocol icmpv6_protocol = {
|
||||
static const struct inet6_protocol icmpv6_protocol = {
|
||||
.handler = icmpv6_rcv,
|
||||
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
|
||||
};
|
||||
|
@ -583,7 +583,7 @@ out:
|
|||
|
||||
static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
|
||||
{
|
||||
struct inet6_protocol *ipprot;
|
||||
const struct inet6_protocol *ipprot;
|
||||
int inner_offset;
|
||||
int hash;
|
||||
u8 nexthdr;
|
||||
|
|
|
@ -159,7 +159,7 @@ drop:
|
|||
|
||||
static int ip6_input_finish(struct sk_buff *skb)
|
||||
{
|
||||
struct inet6_protocol *ipprot;
|
||||
const struct inet6_protocol *ipprot;
|
||||
unsigned int nhoff;
|
||||
int nexthdr, raw;
|
||||
u8 hash;
|
||||
|
|
|
@ -83,10 +83,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt,
|
|||
static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
|
||||
static void mroute_clean_tables(struct net *net);
|
||||
|
||||
#ifdef CONFIG_IPV6_PIMSM_V2
|
||||
static struct inet6_protocol pim6_protocol;
|
||||
#endif
|
||||
|
||||
static struct timer_list ipmr_expire_timer;
|
||||
|
||||
|
||||
|
@ -410,7 +406,7 @@ static int pim6_rcv(struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct inet6_protocol pim6_protocol = {
|
||||
static const struct inet6_protocol pim6_protocol = {
|
||||
.handler = pim6_rcv,
|
||||
};
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue