[SCSI] fnic: Add new Cisco PCI-Express FCoE HBA

fnic is a driver for the Cisco PCI-Express FCoE HBA

Signed-off-by: Abhijeet Joglekar <abjoglek@cisco.com>
Signed-off-by: Joe Eykholt <jeykholt@cisco.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
This commit is contained in:
Abhijeet Joglekar 2009-04-17 18:33:26 -07:00 committed by James Bottomley
parent 210af919c9
commit 5df6d737dd
37 changed files with 9199 additions and 0 deletions

View file

@ -1431,6 +1431,14 @@ P: Russell King
M: linux@arm.linux.org.uk M: linux@arm.linux.org.uk
F: include/linux/clk.h F: include/linux/clk.h
CISCO FCOE HBA DRIVER
P: Abhijeet Joglekar
M: abjoglek@cisco.com
P: Joe Eykholt
M: jeykholt@cisco.com
L: linux-scsi@vger.kernel.org
S: Supported
CODA FILE SYSTEM CODA FILE SYSTEM
P: Jan Harkes P: Jan Harkes
M: jaharkes@cs.cmu.edu M: jaharkes@cs.cmu.edu

View file

@ -628,6 +628,17 @@ config FCOE
---help--- ---help---
Fibre Channel over Ethernet module Fibre Channel over Ethernet module
config FCOE_FNIC
tristate "Cisco FNIC Driver"
depends on PCI && X86
select LIBFC
help
This is support for the Cisco PCI-Express FCoE HBA.
To compile this driver as a module, choose M here and read
<file:Documentation/scsi/scsi.txt>.
The module will be called fnic.
config SCSI_DMX3191D config SCSI_DMX3191D
tristate "DMX3191D SCSI support" tristate "DMX3191D SCSI support"
depends on PCI && SCSI depends on PCI && SCSI

View file

@ -39,6 +39,7 @@ obj-$(CONFIG_SCSI_DH) += device_handler/
obj-$(CONFIG_LIBFC) += libfc/ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/ obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/ obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o

View file

@ -0,0 +1,15 @@
obj-$(CONFIG_FCOE_FNIC) += fnic.o
fnic-y := \
fnic_attrs.o \
fnic_isr.o \
fnic_main.o \
fnic_res.o \
fnic_fcs.o \
fnic_scsi.o \
vnic_cq.o \
vnic_dev.o \
vnic_intr.o \
vnic_rq.o \
vnic_wq_copy.o \
vnic_wq.o

View file

@ -0,0 +1,78 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
/*
* Completion queue descriptor types
*/
enum cq_desc_types {
CQ_DESC_TYPE_WQ_ENET = 0,
CQ_DESC_TYPE_DESC_COPY = 1,
CQ_DESC_TYPE_WQ_EXCH = 2,
CQ_DESC_TYPE_RQ_ENET = 3,
CQ_DESC_TYPE_RQ_FCP = 4,
};
/* Completion queue descriptor: 16B
*
* All completion queues have this basic layout. The
* type_specfic area is unique for each completion
* queue type.
*/
struct cq_desc {
__le16 completed_index;
__le16 q_number;
u8 type_specfic[11];
u8 type_color;
};
#define CQ_DESC_TYPE_BITS 4
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
const struct cq_desc *desc = desc_arg;
const u8 type_color = desc->type_color;
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
/*
* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
*completed_index = le16_to_cpu(desc->completed_index) &
CQ_DESC_COMP_NDX_MASK;
}
#endif /* _CQ_DESC_H_ */

View file

@ -0,0 +1,167 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
struct cq_enet_wq_desc {
__le16 completed_index;
__le16 q_number;
u8 reserved[11];
u8 type_color;
};
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
__le16 completed_index_flags;
__le16 q_number_rss_type_flags;
__le32 rss_hash;
__le16 bytes_written_flags;
__le16 vlan;
__le16 checksum_fcoe;
u8 flags;
u8 type_color;
};
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
{
u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
u16 q_number_rss_type_flags =
le16_to_cpu(desc->q_number_rss_type_flags);
u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
*ingress_port = (completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
1 : 0;
*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
1 : 0;
*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
1 : 0;
*rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
*csum_not_calc = (q_number_rss_type_flags &
CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
*rss_hash = le32_to_cpu(desc->rss_hash);
*bytes_written = bytes_written_flags &
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
*packet_error = (bytes_written_flags &
CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
*vlan_stripped = (bytes_written_flags &
CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
*vlan = le16_to_cpu(desc->vlan);
if (*fcoe) {
*fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
*fcoe_fc_crc_ok = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
*fcoe_enc_error = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
*fcoe_eof = (u8)((desc->checksum_fcoe >>
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
*checksum = 0;
} else {
*fcoe_sof = 0;
*fcoe_fc_crc_ok = 0;
*fcoe_enc_error = 0;
*fcoe_eof = 0;
*checksum = le16_to_cpu(desc->checksum_fcoe);
}
*tcp_udp_csum_ok =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
*ipv4_csum_ok =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
*ipv4_fragment =
(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
}
#endif /* _CQ_ENET_DESC_H_ */

View file

@ -0,0 +1,182 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_EXCH_DESC_H_
#define _CQ_EXCH_DESC_H_
#include "cq_desc.h"
/* Exchange completion queue descriptor: 16B */
struct cq_exch_wq_desc {
u16 completed_index;
u16 q_number;
u16 exchange_id;
u8 tmpl;
u8 reserved0;
u32 reserved1;
u8 exch_status;
u8 reserved2[2];
u8 type_color;
};
#define CQ_EXCH_WQ_STATUS_BITS 2
#define CQ_EXCH_WQ_STATUS_MASK ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
enum cq_exch_status_types {
CQ_EXCH_WQ_STATUS_TYPE_COMPLETE = 0,
CQ_EXCH_WQ_STATUS_TYPE_ABORT = 1,
CQ_EXCH_WQ_STATUS_TYPE_SGL_EOF = 2,
CQ_EXCH_WQ_STATUS_TYPE_TMPL_ERR = 3,
};
static inline void cq_exch_wq_desc_dec(struct cq_exch_wq_desc *desc_ptr,
u8 *type,
u8 *color,
u16 *q_number,
u16 *completed_index,
u8 *exch_status)
{
cq_desc_dec((struct cq_desc *)desc_ptr, type,
color, q_number, completed_index);
*exch_status = desc_ptr->exch_status & CQ_EXCH_WQ_STATUS_MASK;
}
struct cq_fcp_rq_desc {
u16 completed_index_eop_sop_prt;
u16 q_number;
u16 exchange_id;
u16 tmpl;
u16 bytes_written;
u16 vlan;
u8 sof;
u8 eof;
u8 fcs_fer_fck;
u8 type_color;
};
#define CQ_FCP_RQ_DESC_FLAGS_SOP (1 << 15)
#define CQ_FCP_RQ_DESC_FLAGS_EOP (1 << 14)
#define CQ_FCP_RQ_DESC_FLAGS_PRT (1 << 12)
#define CQ_FCP_RQ_DESC_TMPL_MASK 0x1f
#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK 0x3fff
#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT 14
#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT 15
#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK 0x1
#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT 1
#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT 7
#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr,
u8 *type,
u8 *color,
u16 *q_number,
u16 *completed_index,
u8 *eop,
u8 *sop,
u8 *fck,
u16 *exchange_id,
u16 *tmpl,
u32 *bytes_written,
u8 *sof,
u8 *eof,
u8 *ingress_port,
u8 *packet_err,
u8 *fcoe_err,
u8 *fcs_ok,
u8 *vlan_stripped,
u16 *vlan)
{
cq_desc_dec((struct cq_desc *)desc_ptr, type,
color, q_number, completed_index);
*eop = (desc_ptr->completed_index_eop_sop_prt &
CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0;
*sop = (desc_ptr->completed_index_eop_sop_prt &
CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0;
*ingress_port =
(desc_ptr->completed_index_eop_sop_prt &
CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0;
*exchange_id = desc_ptr->exchange_id;
*tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK;
*bytes_written =
desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK;
*packet_err =
(desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >>
CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT;
*vlan_stripped =
(desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >>
CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT;
*vlan = desc_ptr->vlan;
*sof = desc_ptr->sof;
*fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK;
*fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >>
CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT;
*eof = desc_ptr->eof;
*fcs_ok =
(desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >>
CQ_FCP_RQ_DESC_FCS_OK_SHIFT;
}
struct cq_sgl_desc {
u16 exchange_id;
u16 q_number;
u32 active_burst_offset;
u32 tot_data_bytes;
u16 tmpl;
u8 sgl_err;
u8 type_color;
};
enum cq_sgl_err_types {
CQ_SGL_ERR_NO_ERROR = 0,
CQ_SGL_ERR_OVERFLOW, /* data ran beyond end of SGL */
CQ_SGL_ERR_SGL_LCL_ADDR_ERR, /* sgl access to local vnic addr illegal*/
CQ_SGL_ERR_ADDR_RSP_ERR, /* sgl address error */
CQ_SGL_ERR_DATA_RSP_ERR, /* sgl data rsp error */
CQ_SGL_ERR_CNT_ZERO_ERR, /* SGL count is 0 */
CQ_SGL_ERR_CNT_MAX_ERR, /* SGL count is larger than supported */
CQ_SGL_ERR_ORDER_ERR, /* frames recv on both ports, order err */
CQ_SGL_ERR_DATA_LCL_ADDR_ERR,/* sgl data buf to local vnic addr ill */
CQ_SGL_ERR_HOST_CQ_ERR, /* host cq entry to local vnic addr ill */
};
#define CQ_SGL_SGL_ERR_MASK 0x1f
#define CQ_SGL_TMPL_MASK 0x1f
static inline void cq_sgl_desc_dec(struct cq_sgl_desc *desc_ptr,
u8 *type,
u8 *color,
u16 *q_number,
u16 *exchange_id,
u32 *active_burst_offset,
u32 *tot_data_bytes,
u16 *tmpl,
u8 *sgl_err)
{
/* Cheat a little by assuming exchange_id is the same as completed
index */
cq_desc_dec((struct cq_desc *)desc_ptr, type, color, q_number,
exchange_id);
*active_burst_offset = desc_ptr->active_burst_offset;
*tot_data_bytes = desc_ptr->tot_data_bytes;
*tmpl = desc_ptr->tmpl & CQ_SGL_TMPL_MASK;
*sgl_err = desc_ptr->sgl_err & CQ_SGL_SGL_ERR_MASK;
}
#endif /* _CQ_EXCH_DESC_H_ */

780
drivers/scsi/fnic/fcpio.h Normal file
View file

@ -0,0 +1,780 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _FCPIO_H_
#define _FCPIO_H_
#include <linux/if_ether.h>
/*
* This header file includes all of the data structures used for
* communication by the host driver to the fcp firmware.
*/
/*
* Exchange and sequence id space allocated to the host driver
*/
#define FCPIO_HOST_EXCH_RANGE_START 0x1000
#define FCPIO_HOST_EXCH_RANGE_END 0x1fff
#define FCPIO_HOST_SEQ_ID_RANGE_START 0x80
#define FCPIO_HOST_SEQ_ID_RANGE_END 0xff
/*
* Command entry type
*/
enum fcpio_type {
/*
* Initiator request types
*/
FCPIO_ICMND_16 = 0x1,
FCPIO_ICMND_32,
FCPIO_ICMND_CMPL,
FCPIO_ITMF,
FCPIO_ITMF_CMPL,
/*
* Target request types
*/
FCPIO_TCMND_16 = 0x11,
FCPIO_TCMND_32,
FCPIO_TDATA,
FCPIO_TXRDY,
FCPIO_TRSP,
FCPIO_TDRSP_CMPL,
FCPIO_TTMF,
FCPIO_TTMF_ACK,
FCPIO_TABORT,
FCPIO_TABORT_CMPL,
/*
* Misc request types
*/
FCPIO_ACK = 0x20,
FCPIO_RESET,
FCPIO_RESET_CMPL,
FCPIO_FLOGI_REG,
FCPIO_FLOGI_REG_CMPL,
FCPIO_ECHO,
FCPIO_ECHO_CMPL,
FCPIO_LUNMAP_CHNG,
FCPIO_LUNMAP_REQ,
FCPIO_LUNMAP_REQ_CMPL,
FCPIO_FLOGI_FIP_REG,
FCPIO_FLOGI_FIP_REG_CMPL,
};
/*
* Header status codes from the firmware
*/
enum fcpio_status {
FCPIO_SUCCESS = 0, /* request was successful */
/*
* If a request to the firmware is rejected, the original request
* header will be returned with the status set to one of the following:
*/
FCPIO_INVALID_HEADER, /* header contains invalid data */
FCPIO_OUT_OF_RESOURCE, /* out of resources to complete request */
FCPIO_INVALID_PARAM, /* some parameter in request is invalid */
FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */
FCPIO_IO_NOT_FOUND, /* requested I/O was not found */
/*
* Once a request is processed, the firmware will usually return
* a cmpl message type. In cases where errors occurred,
* the header status field would be filled in with one of the following:
*/
FCPIO_ABORTED = 0x41, /* request was aborted */
FCPIO_TIMEOUT, /* request was timed out */
FCPIO_SGL_INVALID, /* request was aborted due to sgl error */
FCPIO_MSS_INVALID, /* request was aborted due to mss error */
FCPIO_DATA_CNT_MISMATCH, /* recv/sent more/less data than exp. */
FCPIO_FW_ERR, /* request was terminated due to fw error */
FCPIO_ITMF_REJECTED, /* itmf req was rejected by remote node */
FCPIO_ITMF_FAILED, /* itmf req was failed by remote node */
FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */
FCPIO_CMND_REJECTED, /* request was invalid and rejected */
FCPIO_NO_PATH_AVAIL, /* no paths to the lun was available */
FCPIO_PATH_FAILED, /* i/o sent to current path failed */
FCPIO_LUNMAP_CHNG_PEND, /* i/o rejected due to lunmap change */
};
/*
* The header command tag. All host requests will use the "tag" field
* to mark commands with a unique tag. When the firmware responds to
* a host request, it will copy the tag field into the response.
*
* The only firmware requests that will use the rx_id/ox_id fields instead
* of the tag field will be the target command and target task management
* requests. These two requests do not have corresponding host requests
* since they come directly from the FC initiator on the network.
*/
struct fcpio_tag {
union {
u32 req_id;
struct {
u16 rx_id;
u16 ox_id;
} ex_id;
} u;
};
static inline void
fcpio_tag_id_enc(struct fcpio_tag *tag, u32 id)
{
tag->u.req_id = id;
}
static inline void
fcpio_tag_id_dec(struct fcpio_tag *tag, u32 *id)
{
*id = tag->u.req_id;
}
static inline void
fcpio_tag_exid_enc(struct fcpio_tag *tag, u16 ox_id, u16 rx_id)
{
tag->u.ex_id.rx_id = rx_id;
tag->u.ex_id.ox_id = ox_id;
}
static inline void
fcpio_tag_exid_dec(struct fcpio_tag *tag, u16 *ox_id, u16 *rx_id)
{
*rx_id = tag->u.ex_id.rx_id;
*ox_id = tag->u.ex_id.ox_id;
}
/*
* The header for an fcpio request, whether from the firmware or from the
* host driver
*/
struct fcpio_header {
u8 type; /* enum fcpio_type */
u8 status; /* header status entry */
u16 _resvd; /* reserved */
struct fcpio_tag tag; /* header tag */
};
static inline void
fcpio_header_enc(struct fcpio_header *hdr,
u8 type, u8 status,
struct fcpio_tag tag)
{
hdr->type = type;
hdr->status = status;
hdr->_resvd = 0;
hdr->tag = tag;
}
static inline void
fcpio_header_dec(struct fcpio_header *hdr,
u8 *type, u8 *status,
struct fcpio_tag *tag)
{
*type = hdr->type;
*status = hdr->status;
*tag = hdr->tag;
}
#define CDB_16 16
#define CDB_32 32
#define LUN_ADDRESS 8
/*
* fcpio_icmnd_16: host -> firmware request
*
* used for sending out an initiator SCSI 16-byte command
*/
struct fcpio_icmnd_16 {
u32 lunmap_id; /* index into lunmap table */
u8 special_req_flags; /* special exchange request flags */
u8 _resvd0[3]; /* reserved */
u32 sgl_cnt; /* scatter-gather list count */
u32 sense_len; /* sense buffer length */
u64 sgl_addr; /* scatter-gather list addr */
u64 sense_addr; /* sense buffer address */
u8 crn; /* SCSI Command Reference No. */
u8 pri_ta; /* SCSI Priority and Task attribute */
u8 _resvd1; /* reserved: should be 0 */
u8 flags; /* command flags */
u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
u32 data_len; /* length of data expected */
u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
u8 _resvd2; /* reserved */
u8 d_id[3]; /* FC vNIC only: Target D_ID */
u16 mss; /* FC vNIC only: max burst */
u16 _resvd3; /* reserved */
u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
u32 e_d_tov; /* FC vNIC only: Err Detect Timeout */
};
/*
* Special request flags
*/
#define FCPIO_ICMND_SRFLAG_RETRY 0x01 /* Enable Retry handling on exchange */
/*
* Priority/Task Attribute settings
*/
#define FCPIO_ICMND_PTA_SIMPLE 0 /* simple task attribute */
#define FCPIO_ICMND_PTA_HEADQ 1 /* head of queue task attribute */
#define FCPIO_ICMND_PTA_ORDERED 2 /* ordered task attribute */
#define FCPIO_ICMND_PTA_ACA 4 /* auto contingent allegiance */
#define FCPIO_ICMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
/*
* Command flags
*/
#define FCPIO_ICMND_RDDATA 0x02 /* read data */
#define FCPIO_ICMND_WRDATA 0x01 /* write data */
/*
* fcpio_icmnd_32: host -> firmware request
*
* used for sending out an initiator SCSI 32-byte command
*/
struct fcpio_icmnd_32 {
u32 lunmap_id; /* index into lunmap table */
u8 special_req_flags; /* special exchange request flags */
u8 _resvd0[3]; /* reserved */
u32 sgl_cnt; /* scatter-gather list count */
u32 sense_len; /* sense buffer length */
u64 sgl_addr; /* scatter-gather list addr */
u64 sense_addr; /* sense buffer address */
u8 crn; /* SCSI Command Reference No. */
u8 pri_ta; /* SCSI Priority and Task attribute */
u8 _resvd1; /* reserved: should be 0 */
u8 flags; /* command flags */
u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
u32 data_len; /* length of data expected */
u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
u8 _resvd2; /* reserved */
u8 d_id[3]; /* FC vNIC only: Target D_ID */
u16 mss; /* FC vNIC only: max burst */
u16 _resvd3; /* reserved */
u32 r_a_tov; /* FC vNIC only: Res. Alloc Timeout */
u32 e_d_tov; /* FC vNIC only: Error Detect Timeout */
};
/*
* fcpio_itmf: host -> firmware request
*
* used for requesting the firmware to abort a request and/or send out
* a task management function
*
* The t_tag field is only needed when the request type is ABT_TASK.
*/
struct fcpio_itmf {
u32 lunmap_id; /* index into lunmap table */
u32 tm_req; /* SCSI Task Management request */
u32 t_tag; /* header tag of fcpio to be aborted */
u32 _resvd; /* _reserved */
u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
u8 _resvd1; /* reserved */
u8 d_id[3]; /* FC vNIC only: Target D_ID */
u32 r_a_tov; /* FC vNIC only: R_A_TOV in msec */
u32 e_d_tov; /* FC vNIC only: E_D_TOV in msec */
};
/*
* Task Management request
*/
enum fcpio_itmf_tm_req_type {
FCPIO_ITMF_ABT_TASK_TERM = 0x01, /* abort task and terminate */
FCPIO_ITMF_ABT_TASK, /* abort task and issue abts */
FCPIO_ITMF_ABT_TASK_SET, /* abort task set */
FCPIO_ITMF_CLR_TASK_SET, /* clear task set */
FCPIO_ITMF_LUN_RESET, /* logical unit reset task mgmt */
FCPIO_ITMF_CLR_ACA, /* Clear ACA condition */
};
/*
* fcpio_tdata: host -> firmware request
*
* used for requesting the firmware to send out a read data transfer for a
* target command
*/
struct fcpio_tdata {
u16 rx_id; /* FC rx_id of target command */
u16 flags; /* command flags */
u32 rel_offset; /* data sequence relative offset */
u32 sgl_cnt; /* scatter-gather list count */
u32 data_len; /* length of data expected to send */
u64 sgl_addr; /* scatter-gather list address */
};
/*
* Command flags
*/
#define FCPIO_TDATA_SCSI_RSP 0x01 /* send a scsi resp. after last frame */
/*
* fcpio_txrdy: host -> firmware request
*
* used for requesting the firmware to send out a write data transfer for a
* target command
*/
struct fcpio_txrdy {
u16 rx_id; /* FC rx_id of target command */
u16 _resvd0; /* reserved */
u32 rel_offset; /* data sequence relative offset */
u32 sgl_cnt; /* scatter-gather list count */
u32 data_len; /* length of data expected to send */
u64 sgl_addr; /* scatter-gather list address */
};
/*
* fcpio_trsp: host -> firmware request
*
* used for requesting the firmware to send out a response for a target
* command
*/
struct fcpio_trsp {
u16 rx_id; /* FC rx_id of target command */
u16 _resvd0; /* reserved */
u32 sense_len; /* sense data buffer length */
u64 sense_addr; /* sense data buffer address */
u16 _resvd1; /* reserved */
u8 flags; /* response request flags */
u8 scsi_status; /* SCSI status */
u32 residual; /* SCSI data residual value of I/O */
};
/*
* resposnse request flags
*/
#define FCPIO_TRSP_RESID_UNDER 0x08 /* residual is valid and is underflow */
#define FCPIO_TRSP_RESID_OVER 0x04 /* residual is valid and is overflow */
/*
* fcpio_ttmf_ack: host -> firmware response
*
* used by the host to indicate to the firmware it has received and processed
* the target tmf request
*/
struct fcpio_ttmf_ack {
u16 rx_id; /* FC rx_id of target command */
u16 _resvd0; /* reserved */
u32 tmf_status; /* SCSI task management status */
};
/*
* fcpio_tabort: host -> firmware request
*
* used by the host to request the firmware to abort a target request that was
* received by the firmware
*/
struct fcpio_tabort {
u16 rx_id; /* rx_id of the target request */
};
/*
* fcpio_reset: host -> firmware request
*
* used by the host to signal a reset of the driver to the firmware
* and to request firmware to clean up all outstanding I/O
*/
struct fcpio_reset {
u32 _resvd;
};
enum fcpio_flogi_reg_format_type {
FCPIO_FLOGI_REG_DEF_DEST = 0, /* Use the oui | s_id mac format */
FCPIO_FLOGI_REG_GW_DEST, /* Use the fixed gateway mac */
};
/*
* fcpio_flogi_reg: host -> firmware request
*
* fc vnic only
* used by the host to notify the firmware of the lif's s_id
* and destination mac address format
*/
struct fcpio_flogi_reg {
u8 format;
u8 s_id[3]; /* FC vNIC only: Source S_ID */
u8 gateway_mac[ETH_ALEN]; /* Destination gateway mac */
u16 _resvd;
u32 r_a_tov; /* R_A_TOV in msec */
u32 e_d_tov; /* E_D_TOV in msec */
};
/*
* fcpio_echo: host -> firmware request
*
* sends a heartbeat echo request to the firmware
*/
struct fcpio_echo {
u32 _resvd;
};
/*
* fcpio_lunmap_req: host -> firmware request
*
* scsi vnic only
* sends a request to retrieve the lunmap table for scsi vnics
*/
struct fcpio_lunmap_req {
u64 addr; /* address of the buffer */
u32 len; /* len of the buffer */
};
/*
* fcpio_flogi_fip_reg: host -> firmware request
*
* fc vnic only
* used by the host to notify the firmware of the lif's s_id
* and destination mac address format
*/
struct fcpio_flogi_fip_reg {
u8 _resvd0;
u8 s_id[3]; /* FC vNIC only: Source S_ID */
u8 fcf_mac[ETH_ALEN]; /* FCF Target destination mac */
u16 _resvd1;
u32 r_a_tov; /* R_A_TOV in msec */
u32 e_d_tov; /* E_D_TOV in msec */
u8 ha_mac[ETH_ALEN]; /* Host adapter source mac */
u16 _resvd2;
};
/*
* Basic structure for all fcpio structures that are sent from the host to the
* firmware. They are 128 bytes per structure.
*/
#define FCPIO_HOST_REQ_LEN 128 /* expected length of host requests */
struct fcpio_host_req {
struct fcpio_header hdr;
union {
/*
* Defines space needed for request
*/
u8 buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)];
/*
* Initiator host requests
*/
struct fcpio_icmnd_16 icmnd_16;
struct fcpio_icmnd_32 icmnd_32;
struct fcpio_itmf itmf;
/*
* Target host requests
*/
struct fcpio_tdata tdata;
struct fcpio_txrdy txrdy;
struct fcpio_trsp trsp;
struct fcpio_ttmf_ack ttmf_ack;
struct fcpio_tabort tabort;
/*
* Misc requests
*/
struct fcpio_reset reset;
struct fcpio_flogi_reg flogi_reg;
struct fcpio_echo echo;
struct fcpio_lunmap_req lunmap_req;
struct fcpio_flogi_fip_reg flogi_fip_reg;
} u;
};
/*
* fcpio_icmnd_cmpl: firmware -> host response
*
* used for sending the host a response to an initiator command
*/
struct fcpio_icmnd_cmpl {
u8 _resvd0[6]; /* reserved */
u8 flags; /* response flags */
u8 scsi_status; /* SCSI status */
u32 residual; /* SCSI data residual length */
u32 sense_len; /* SCSI sense length */
};
/*
* response flags
*/
#define FCPIO_ICMND_CMPL_RESID_UNDER 0x08 /* resid under and valid */
#define FCPIO_ICMND_CMPL_RESID_OVER 0x04 /* resid over and valid */
/*
* fcpio_itmf_cmpl: firmware -> host response
*
* used for sending the host a response for a itmf request
*/
struct fcpio_itmf_cmpl {
u32 _resvd; /* reserved */
};
/*
* fcpio_tcmnd_16: firmware -> host request
*
* used by the firmware to notify the host of an incoming target SCSI 16-Byte
* request
*/
struct fcpio_tcmnd_16 {
u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
u8 crn; /* SCSI Command Reference No. */
u8 pri_ta; /* SCSI Priority and Task attribute */
u8 _resvd2; /* reserved: should be 0 */
u8 flags; /* command flags */
u8 scsi_cdb[CDB_16]; /* SCSI Cmnd Descriptor Block */
u32 data_len; /* length of data expected */
u8 _resvd1; /* reserved */
u8 s_id[3]; /* FC vNIC only: Source S_ID */
};
/*
* Priority/Task Attribute settings
*/
#define FCPIO_TCMND_PTA_SIMPLE 0 /* simple task attribute */
#define FCPIO_TCMND_PTA_HEADQ 1 /* head of queue task attribute */
#define FCPIO_TCMND_PTA_ORDERED 2 /* ordered task attribute */
#define FCPIO_TCMND_PTA_ACA 4 /* auto contingent allegiance */
#define FCPIO_TCMND_PRI_SHIFT 3 /* priority field starts in bit 3 */
/*
* Command flags
*/
#define FCPIO_TCMND_RDDATA 0x02 /* read data */
#define FCPIO_TCMND_WRDATA 0x01 /* write data */
/*
* fcpio_tcmnd_32: firmware -> host request
*
* used by the firmware to notify the host of an incoming target SCSI 32-Byte
* request
*/
struct fcpio_tcmnd_32 {
u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
u8 crn; /* SCSI Command Reference No. */
u8 pri_ta; /* SCSI Priority and Task attribute */
u8 _resvd2; /* reserved: should be 0 */
u8 flags; /* command flags */
u8 scsi_cdb[CDB_32]; /* SCSI Cmnd Descriptor Block */
u32 data_len; /* length of data expected */
u8 _resvd0; /* reserved */
u8 s_id[3]; /* FC vNIC only: Source S_ID */
};
/*
* fcpio_tdrsp_cmpl: firmware -> host response
*
* used by the firmware to notify the host of a response to a host target
* command
*/
struct fcpio_tdrsp_cmpl {
u16 rx_id; /* rx_id of the target request */
u16 _resvd0; /* reserved */
};
/*
* fcpio_ttmf: firmware -> host request
*
* used by the firmware to notify the host of an incoming task management
* function request
*/
struct fcpio_ttmf {
u8 _resvd0; /* reserved */
u8 s_id[3]; /* FC vNIC only: Source S_ID */
u8 lun[LUN_ADDRESS]; /* FC vNIC only: LUN address */
u8 crn; /* SCSI Command Reference No. */
u8 _resvd2[3]; /* reserved */
u32 tmf_type; /* task management request type */
};
/*
* Task Management request
*/
#define FCPIO_TTMF_CLR_ACA 0x40 /* Clear ACA condition */
#define FCPIO_TTMF_LUN_RESET 0x10 /* logical unit reset task mgmt */
#define FCPIO_TTMF_CLR_TASK_SET 0x04 /* clear task set */
#define FCPIO_TTMF_ABT_TASK_SET 0x02 /* abort task set */
#define FCPIO_TTMF_ABT_TASK 0x01 /* abort task */
/*
* fcpio_tabort_cmpl: firmware -> host response
*
* used by the firmware to respond to a host's tabort request
*/
struct fcpio_tabort_cmpl {
u16 rx_id; /* rx_id of the target request */
u16 _resvd0; /* reserved */
};
/*
* fcpio_ack: firmware -> host response
*
* used by firmware to notify the host of the last work request received
*/
struct fcpio_ack {
u16 request_out; /* last host entry received */
u16 _resvd;
};
/*
* fcpio_reset_cmpl: firmware -> host response
*
* use by firmware to respond to the host's reset request
*/
struct fcpio_reset_cmpl {
u16 vnic_id;
};
/*
* fcpio_flogi_reg_cmpl: firmware -> host response
*
* fc vnic only
* response to the fcpio_flogi_reg request
*/
struct fcpio_flogi_reg_cmpl {
u32 _resvd;
};
/*
* fcpio_echo_cmpl: firmware -> host response
*
* response to the fcpio_echo request
*/
struct fcpio_echo_cmpl {
u32 _resvd;
};
/*
* fcpio_lunmap_chng: firmware -> host notification
*
* scsi vnic only
* notifies the host that the lunmap tables have changed
*/
struct fcpio_lunmap_chng {
u32 _resvd;
};
/*
* fcpio_lunmap_req_cmpl: firmware -> host response
*
* scsi vnic only
* response for lunmap table request from the host
*/
struct fcpio_lunmap_req_cmpl {
u32 _resvd;
};
/*
* Basic structure for all fcpio structures that are sent from the firmware to
* the host. They are 64 bytes per structure.
*/
#define FCPIO_FW_REQ_LEN 64 /* expected length of fw requests */
struct fcpio_fw_req {
struct fcpio_header hdr;
union {
/*
* Defines space needed for request
*/
u8 buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)];
/*
* Initiator firmware responses
*/
struct fcpio_icmnd_cmpl icmnd_cmpl;
struct fcpio_itmf_cmpl itmf_cmpl;
/*
* Target firmware new requests
*/
struct fcpio_tcmnd_16 tcmnd_16;
struct fcpio_tcmnd_32 tcmnd_32;
/*
* Target firmware responses
*/
struct fcpio_tdrsp_cmpl tdrsp_cmpl;
struct fcpio_ttmf ttmf;
struct fcpio_tabort_cmpl tabort_cmpl;
/*
* Firmware response to work received
*/
struct fcpio_ack ack;
/*
* Misc requests
*/
struct fcpio_reset_cmpl reset_cmpl;
struct fcpio_flogi_reg_cmpl flogi_reg_cmpl;
struct fcpio_echo_cmpl echo_cmpl;
struct fcpio_lunmap_chng lunmap_chng;
struct fcpio_lunmap_req_cmpl lunmap_req_cmpl;
} u;
};
/*
* Access routines to encode and decode the color bit, which is the most
* significant bit of the MSB of the structure
*/
static inline void fcpio_color_enc(struct fcpio_fw_req *fw_req, u8 color)
{
u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
if (color)
*c |= 0x80;
else
*c &= ~0x80;
}
static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, u8 *color)
{
u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
*color = *c >> 7;
/*
* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
}
/*
* Lunmap table entry for scsi vnics
*/
#define FCPIO_LUNMAP_TABLE_SIZE 256
#define FCPIO_FLAGS_LUNMAP_VALID 0x80
#define FCPIO_FLAGS_BOOT 0x01
struct fcpio_lunmap_entry {
u8 bus;
u8 target;
u8 lun;
u8 path_cnt;
u16 flags;
u16 update_cnt;
};
struct fcpio_lunmap_tbl {
u32 update_cnt;
struct fcpio_lunmap_entry lunmaps[FCPIO_LUNMAP_TABLE_SIZE];
};
#endif /* _FCPIO_H_ */

265
drivers/scsi/fnic/fnic.h Normal file
View file

@ -0,0 +1,265 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _FNIC_H_
#define _FNIC_H_
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <scsi/libfc.h>
#include "fnic_io.h"
#include "fnic_res.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "vnic_cq.h"
#include "vnic_wq_copy.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_scsi.h"
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
#define DRV_VERSION "1.0.0.1121"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
#define DESC_CLEAN_LOW_WATERMARK 8
#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */
#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 32
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
/*
* Tag bits used for special requests.
*/
#define BIT(nr) (1UL << (nr))
#define FNIC_TAG_ABORT BIT(30) /* tag bit indicating abort */
#define FNIC_TAG_DEV_RST BIT(29) /* indicates device reset */
#define FNIC_TAG_MASK (BIT(24) - 1) /* mask for lookup */
#define FNIC_NO_TAG -1
/*
* Usage of the scsi_cmnd scratchpad.
* These fields are locked by the hashed io_req_lock.
*/
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
#define FNIC_LUN_RESET_TIMEOUT 10000 /* mSec */
#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
#define FNIC_MAX_FCP_TARGET 256
extern unsigned int fnic_log_level;
#define FNIC_MAIN_LOGGING 0x01
#define FNIC_FCS_LOGGING 0x02
#define FNIC_SCSI_LOGGING 0x04
#define FNIC_ISR_LOGGING 0x08
#define FNIC_CHECK_LOGGING(LEVEL, CMD) \
do { \
if (unlikely(fnic_log_level & LEVEL)) \
do { \
CMD; \
} while (0); \
} while (0)
#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
shost_printk(kern_level, host, fmt, ##args);)
extern const char *fnic_state_str[];
enum fnic_intx_intr_index {
FNIC_INTX_WQ_RQ_COPYWQ,
FNIC_INTX_ERR,
FNIC_INTX_NOTIFY,
FNIC_INTX_INTR_MAX,
};
enum fnic_msix_intr_index {
FNIC_MSIX_RQ,
FNIC_MSIX_WQ,
FNIC_MSIX_WQ_COPY,
FNIC_MSIX_ERR_NOTIFY,
FNIC_MSIX_INTR_MAX,
};
struct fnic_msix_entry {
int requested;
char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
};
enum fnic_state {
FNIC_IN_FC_MODE = 0,
FNIC_IN_FC_TRANS_ETH_MODE,
FNIC_IN_ETH_MODE,
FNIC_IN_ETH_TRANS_FC_MODE,
};
#define FNIC_WQ_COPY_MAX 1
#define FNIC_WQ_MAX 1
#define FNIC_RQ_MAX 1
#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
struct mempool;
/* Per-instance private data structure */
struct fnic {
struct fc_lport *lport;
struct vnic_dev_bar bar0;
struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
struct vnic_stats *stats;
unsigned long stats_time; /* time of stats update */
struct vnic_nic_cfg *nic_cfg;
char name[IFNAMSIZ];
struct timer_list notify_timer; /* used for MSI interrupts */
unsigned int err_intr_offset;
unsigned int link_intr_offset;
unsigned int wq_count;
unsigned int cq_count;
u32 fcoui_mode:1; /* use fcoui address*/
u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
struct completion *remove_wait; /* device remove thread blocks */
struct fc_frame *flogi;
struct fc_frame *flogi_resp;
u16 flogi_oxid;
unsigned long s_id;
enum fnic_state state;
spinlock_t fnic_lock;
u16 vlan_id; /* VLAN tag including priority */
u8 mac_addr[ETH_ALEN];
u8 dest_addr[ETH_ALEN];
u8 data_src_addr[ETH_ALEN];
u64 fcp_input_bytes; /* internal statistic */
u64 fcp_output_bytes; /* internal statistic */
u32 link_down_cnt;
int link_status;
struct list_head list;
struct pci_dev *pdev;
struct vnic_fc_config config;
struct vnic_dev *vdev;
unsigned int raw_wq_count;
unsigned int wq_copy_count;
unsigned int rq_count;
int fw_ack_index[FNIC_WQ_COPY_MAX];
unsigned short fw_ack_recd[FNIC_WQ_COPY_MAX];
unsigned short wq_copy_desc_low[FNIC_WQ_COPY_MAX];
unsigned int intr_count;
u32 __iomem *legacy_pba;
struct fnic_host_tag *tags;
mempool_t *io_req_pool;
mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
struct work_struct link_work;
struct work_struct frame_work;
struct sk_buff_head frame_queue;
/* copy work queue cache line section */
____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
spinlock_t wq_copy_lock[FNIC_WQ_COPY_MAX];
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX];
spinlock_t wq_lock[FNIC_WQ_MAX];
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
};
extern struct workqueue_struct *fnic_event_queue;
extern struct device_attribute *fnic_attrs[];
void fnic_clear_intr_mode(struct fnic *fnic);
int fnic_set_intr_mode(struct fnic *fnic);
void fnic_free_intr(struct fnic *fnic);
int fnic_request_intr(struct fnic *fnic);
int fnic_send(struct fc_lport *, struct fc_frame *);
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
void fnic_handle_frame(struct work_struct *work);
void fnic_handle_link(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp);
int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
int fnic_abort_cmd(struct scsi_cmnd *);
int fnic_device_reset(struct scsi_cmnd *);
int fnic_host_reset(struct scsi_cmnd *);
int fnic_reset(struct Scsi_Host *);
void fnic_scsi_cleanup(struct fc_lport *);
void fnic_scsi_abort_io(struct fc_lport *);
void fnic_empty_scsi_cleanup(struct fc_lport *);
void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
int fnic_flogi_reg_handler(struct fnic *fnic);
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
struct fcpio_host_req *desc);
int fnic_fw_reset_handler(struct fnic *fnic);
void fnic_terminate_rport_io(struct fc_rport *);
const char *fnic_state_to_str(unsigned int state);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
#endif /* _FNIC_H_ */

View file

@ -0,0 +1,56 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include "fnic.h"
static ssize_t fnic_show_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fc_lport *lp = shost_priv(class_to_shost(dev));
struct fnic *fnic = lport_priv(lp);
return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
}
static ssize_t fnic_show_drv_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
}
static ssize_t fnic_show_link_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fc_lport *lp = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
? "Link Up" : "Link Down");
}
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
struct device_attribute *fnic_attrs[] = {
&dev_attr_fnic_state,
&dev_attr_drv_version,
&dev_attr_link_state,
NULL,
};

View file

@ -0,0 +1,742 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/fc_frame.h>
#include <scsi/libfc.h>
#include "fnic_io.h"
#include "fnic.h"
#include "cq_enet_desc.h"
#include "cq_exch_desc.h"
struct workqueue_struct *fnic_event_queue;
void fnic_handle_link(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, link_work);
unsigned long flags;
int old_link_status;
u32 old_link_down_cnt;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
old_link_down_cnt = fnic->link_down_cnt;
old_link_status = fnic->link_status;
fnic->link_status = vnic_dev_link_status(fnic->vdev);
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
if (old_link_status == fnic->link_status) {
if (!fnic->link_status)
/* DOWN -> DOWN */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
else {
if (old_link_down_cnt != fnic->link_down_cnt) {
/* UP -> DOWN -> UP */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link down\n");
fc_linkdown(fnic->lport);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link up\n");
fc_linkup(fnic->lport);
} else
/* UP -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
} else if (fnic->link_status) {
/* DOWN -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
fc_linkup(fnic->lport);
} else {
/* UP -> DOWN */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
fc_linkdown(fnic->lport);
}
}
/*
* This function passes incoming fabric frames to libFC
*/
void fnic_handle_frame(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, frame_work);
struct fc_lport *lp = fnic->lport;
unsigned long flags;
struct sk_buff *skb;
struct fc_frame *fp;
while ((skb = skb_dequeue(&fnic->frame_queue))) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
dev_kfree_skb(skb);
return;
}
fp = (struct fc_frame *)skb;
/* if Flogi resp frame, register the address */
if (fr_flags(fp)) {
vnic_dev_add_addr(fnic->vdev,
fnic->data_src_addr);
fr_flags(fp) = 0;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fc_exch_recv(lp, lp->emp, fp);
}
}
static inline void fnic_import_rq_fc_frame(struct sk_buff *skb,
u32 len, u8 sof, u8 eof)
{
struct fc_frame *fp = (struct fc_frame *)skb;
skb_trim(skb, len);
fr_eof(fp) = eof;
fr_sof(fp) = sof;
}
static inline int fnic_import_rq_eth_pkt(struct sk_buff *skb, u32 len)
{
struct fc_frame *fp;
struct ethhdr *eh;
struct vlan_ethhdr *vh;
struct fcoe_hdr *fcoe_hdr;
struct fcoe_crc_eof *ft;
u32 transport_len = 0;
eh = (struct ethhdr *)skb->data;
vh = (struct vlan_ethhdr *)skb->data;
if (vh->h_vlan_proto == htons(ETH_P_8021Q) &&
vh->h_vlan_encapsulated_proto == htons(ETH_P_FCOE)) {
skb_pull(skb, sizeof(struct vlan_ethhdr));
transport_len += sizeof(struct vlan_ethhdr);
} else if (eh->h_proto == htons(ETH_P_FCOE)) {
transport_len += sizeof(struct ethhdr);
skb_pull(skb, sizeof(struct ethhdr));
} else
return -1;
fcoe_hdr = (struct fcoe_hdr *)skb->data;
if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
return -1;
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_sof(fp) = fcoe_hdr->fcoe_sof;
skb_pull(skb, sizeof(struct fcoe_hdr));
transport_len += sizeof(struct fcoe_hdr);
ft = (struct fcoe_crc_eof *)(skb->data + len -
transport_len - sizeof(*ft));
fr_eof(fp) = ft->fcoe_eof;
skb_trim(skb, len - transport_len - sizeof(*ft));
return 0;
}
static inline int fnic_handle_flogi_resp(struct fnic *fnic,
struct fc_frame *fp)
{
u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
struct ethhdr *eth_hdr;
struct fc_frame_header *fh;
int ret = 0;
unsigned long flags;
struct fc_frame *old_flogi_resp = NULL;
fh = (struct fc_frame_header *)fr_hdr(fp);
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state == FNIC_IN_ETH_MODE) {
/*
* Check if oxid matches on taking the lock. A new Flogi
* issued by libFC might have changed the fnic cached oxid
*/
if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Flogi response oxid not"
" matching cached oxid, dropping frame"
"\n");
ret = -1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
dev_kfree_skb_irq(fp_skb(fp));
goto handle_flogi_resp_end;
}
/* Drop older cached flogi response frame, cache this frame */
old_flogi_resp = fnic->flogi_resp;
fnic->flogi_resp = fp;
fnic->flogi_oxid = FC_XID_UNKNOWN;
/*
* this frame is part of flogi get the src mac addr from this
* frame if the src mac is fcoui based then we mark the
* address mode flag to use fcoui base for dst mac addr
* otherwise we have to store the fcoe gateway addr
*/
eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
memcpy(mac, eth_hdr->h_source, ETH_ALEN);
if (ntoh24(mac) == FC_FCOE_OUI)
fnic->fcoui_mode = 1;
else {
fnic->fcoui_mode = 0;
memcpy(fnic->dest_addr, mac, ETH_ALEN);
}
/*
* Except for Flogi frame, all outbound frames from us have the
* Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
* the vnic MAC address as the Eth Src address
*/
fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);
/* We get our s_id from the d_id of the flogi resp frame */
fnic->s_id = ntoh24(fh->fh_d_id);
/* Change state to reflect transition from Eth to FC mode */
fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
} else {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unexpected fnic state %s while"
" processing flogi resp\n",
fnic_state_to_str(fnic->state));
ret = -1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
dev_kfree_skb_irq(fp_skb(fp));
goto handle_flogi_resp_end;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/* Drop older cached frame */
if (old_flogi_resp)
dev_kfree_skb_irq(fp_skb(old_flogi_resp));
/*
* send flogi reg request to firmware, this will put the fnic in
* in FC mode
*/
ret = fnic_flogi_reg_handler(fnic);
if (ret < 0) {
int free_fp = 1;
spin_lock_irqsave(&fnic->fnic_lock, flags);
/*
* free the frame is some other thread is not
* pointing to it
*/
if (fnic->flogi_resp != fp)
free_fp = 0;
else
fnic->flogi_resp = NULL;
if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
fnic->state = FNIC_IN_ETH_MODE;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (free_fp)
dev_kfree_skb_irq(fp_skb(fp));
}
handle_flogi_resp_end:
return ret;
}
/* Returns 1 for a response that matches cached flogi oxid */
static inline int is_matching_flogi_resp_frame(struct fnic *fnic,
struct fc_frame *fp)
{
struct fc_frame_header *fh;
int ret = 0;
u32 f_ctl;
fh = fc_frame_header_get(fp);
f_ctl = ntoh24(fh->fh_f_ctl);
if (fnic->flogi_oxid == ntohs(fh->fh_ox_id) &&
fh->fh_r_ctl == FC_RCTL_ELS_REP &&
(f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) == FC_FC_EX_CTX &&
fh->fh_type == FC_TYPE_ELS)
ret = 1;
return ret;
}
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
*cq_desc, struct vnic_rq_buf *buf,
int skipped __attribute__((unused)),
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb;
struct fc_frame *fp;
unsigned int eth_hdrs_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof;
u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
u8 fcs_ok = 1, packet_error = 0;
u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
u32 rss_hash;
u16 exchange_id, tmpl;
u8 sof = 0;
u8 eof = 0;
u32 fcp_bytes_written = 0;
unsigned long flags;
pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
PCI_DMA_FROMDEVICE);
skb = buf->os_buf;
buf->os_buf = NULL;
cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
if (type == CQ_DESC_TYPE_RQ_FCP) {
cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
&type, &color, &q_number, &completed_index,
&eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
&tmpl, &fcp_bytes_written, &sof, &eof,
&ingress_port, &packet_error,
&fcoe_enc_error, &fcs_ok, &vlan_stripped,
&vlan);
eth_hdrs_stripped = 1;
} else if (type == CQ_DESC_TYPE_RQ_ENET) {
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
&type, &color, &q_number, &completed_index,
&ingress_port, &fcoe, &eop, &sop,
&rss_type, &csum_not_calc, &rss_hash,
&bytes_written, &packet_error,
&vlan_stripped, &vlan, &checksum,
&fcoe_sof, &fcoe_fc_crc_ok,
&fcoe_enc_error, &fcoe_eof,
&tcp_udp_csum_ok, &udp, &tcp,
&ipv4_csum_ok, &ipv6, &ipv4,
&ipv4_fragment, &fcs_ok);
eth_hdrs_stripped = 0;
} else {
/* wrong CQ type*/
shost_printk(KERN_ERR, fnic->lport->host,
"fnic rq_cmpl wrong cq type x%x\n", type);
goto drop;
}
if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fnic rq_cmpl fcoe x%x fcsok x%x"
" pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
" x%x\n",
fcoe, fcs_ok, packet_error,
fcoe_fc_crc_ok, fcoe_enc_error);
goto drop;
}
if (eth_hdrs_stripped)
fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof);
else if (fnic_import_rq_eth_pkt(skb, bytes_written))
goto drop;
fp = (struct fc_frame *)skb;
/*
* If frame is an ELS response that matches the cached FLOGI OX_ID,
* and is accept, issue flogi_reg_request copy wq request to firmware
* to register the S_ID and determine whether FC_OUI mode or GW mode.
*/
if (is_matching_flogi_resp_frame(fnic, fp)) {
if (!eth_hdrs_stripped) {
if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
fnic_handle_flogi_resp(fnic, fp);
return;
}
/*
* Recd. Flogi reject. No point registering
* with fw, but forward to libFC
*/
goto forward;
}
goto drop;
}
if (!eth_hdrs_stripped)
goto drop;
forward:
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto drop;
}
/* Use fr_flags to indicate whether succ. flogi resp or not */
fr_flags(fp) = 0;
fr_dev(fp) = fnic->lport;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
skb_queue_tail(&fnic->frame_queue, skb);
queue_work(fnic_event_queue, &fnic->frame_work);
return;
drop:
dev_kfree_skb_irq(skb);
}
static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc, u8 type,
u16 q_number, u16 completed_index,
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(vdev);
vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
NULL);
return 0;
}
int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
{
unsigned int tot_rq_work_done = 0, cur_work_done;
unsigned int i;
int err;
for (i = 0; i < fnic->rq_count; i++) {
cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
fnic_rq_cmpl_handler_cont,
NULL);
if (cur_work_done) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err)
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_alloc_rq_frame cant alloc"
" frame\n");
}
tot_rq_work_done += cur_work_done;
}
return tot_rq_work_done;
}
/*
* This function is called once at init time to allocate and fill RQ
* buffers. Subsequently, it is called in the interrupt context after RQ
* buffer processing to replenish the buffers in the RQ
*/
int fnic_alloc_rq_frame(struct vnic_rq *rq)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb;
u16 len;
dma_addr_t pa;
len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
skb = dev_alloc_skb(len);
if (!skb) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unable to allocate RQ sk_buff\n");
return -ENOMEM;
}
skb_reset_mac_header(skb);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_put(skb, len);
pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
fnic_queue_rq_desc(rq, skb, pa, len);
return 0;
}
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
}
static inline int is_flogi_frame(struct fc_frame_header *fh)
{
return fh->fh_r_ctl == FC_RCTL_ELS_REQ && *(u8 *)(fh + 1) == ELS_FLOGI;
}
int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
{
struct vnic_wq *wq = &fnic->wq[0];
struct sk_buff *skb;
dma_addr_t pa;
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
struct fcoe_hdr *fcoe_hdr;
struct fc_frame_header *fh;
u32 tot_len, eth_hdr_len;
int ret = 0;
unsigned long flags;
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
if (!fnic->vlan_hw_insert) {
eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
eth_hdr = (struct ethhdr *)vlan_hdr;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
} else {
eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
eth_hdr->h_proto = htons(ETH_P_FCOE);
fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
}
if (is_flogi_frame(fh)) {
fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
} else {
if (fnic->fcoui_mode)
fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
else
memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
}
tot_len = skb->len;
BUG_ON(tot_len % 4);
memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
fcoe_hdr->fcoe_sof = fr_sof(fp);
if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
pci_unmap_single(fnic->pdev, pa,
tot_len, PCI_DMA_TODEVICE);
ret = -1;
goto fnic_send_frame_end;
}
fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
fnic_send_frame_end:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
if (ret)
dev_kfree_skb_any(fp_skb(fp));
return ret;
}
/*
* fnic_send
* Routine to send a raw frame
*/
int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
{
struct fnic *fnic = lport_priv(lp);
struct fc_frame_header *fh;
int ret = 0;
enum fnic_state old_state;
unsigned long flags;
struct fc_frame *old_flogi = NULL;
struct fc_frame *old_flogi_resp = NULL;
if (fnic->in_remove) {
dev_kfree_skb(fp_skb(fp));
ret = -1;
goto fnic_send_end;
}
fh = fc_frame_header_get(fp);
/* if not an Flogi frame, send it out, this is the common case */
if (!is_flogi_frame(fh))
return fnic_send_frame(fnic, fp);
/* Flogi frame, now enter the state machine */
spin_lock_irqsave(&fnic->fnic_lock, flags);
again:
/* Get any old cached frames, free them after dropping lock */
old_flogi = fnic->flogi;
fnic->flogi = NULL;
old_flogi_resp = fnic->flogi_resp;
fnic->flogi_resp = NULL;
fnic->flogi_oxid = FC_XID_UNKNOWN;
old_state = fnic->state;
switch (old_state) {
case FNIC_IN_FC_MODE:
case FNIC_IN_ETH_TRANS_FC_MODE:
default:
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (old_flogi) {
dev_kfree_skb(fp_skb(old_flogi));
old_flogi = NULL;
}
if (old_flogi_resp) {
dev_kfree_skb(fp_skb(old_flogi_resp));
old_flogi_resp = NULL;
}
ret = fnic_fw_reset_handler(fnic);
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
goto again;
if (ret) {
fnic->state = old_state;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
dev_kfree_skb(fp_skb(fp));
goto fnic_send_end;
}
old_flogi = fnic->flogi;
fnic->flogi = fp;
fnic->flogi_oxid = ntohs(fh->fh_ox_id);
old_flogi_resp = fnic->flogi_resp;
fnic->flogi_resp = NULL;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
break;
case FNIC_IN_FC_TRANS_ETH_MODE:
/*
* A reset is pending with the firmware. Store the flogi
* and its oxid. The transition out of this state happens
* only when Firmware completes the reset, either with
* success or failed. If success, transition to
* FNIC_IN_ETH_MODE, if fail, then transition to
* FNIC_IN_FC_MODE
*/
fnic->flogi = fp;
fnic->flogi_oxid = ntohs(fh->fh_ox_id);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
break;
case FNIC_IN_ETH_MODE:
/*
* The fw/hw is already in eth mode. Store the oxid,
* and send the flogi frame out. The transition out of this
* state happens only we receive flogi response from the
* network, and the oxid matches the cached oxid when the
* flogi frame was sent out. If they match, then we issue
* a flogi_reg request and transition to state
* FNIC_IN_ETH_TRANS_FC_MODE
*/
fnic->flogi_oxid = ntohs(fh->fh_ox_id);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
ret = fnic_send_frame(fnic, fp);
break;
}
fnic_send_end:
if (old_flogi)
dev_kfree_skb(fp_skb(old_flogi));
if (old_flogi_resp)
dev_kfree_skb(fp_skb(old_flogi_resp));
return ret;
}
static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf, void *opaque)
{
struct sk_buff *skb = buf->os_buf;
struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr,
buf->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(fp_skb(fp));
buf->os_buf = NULL;
}
static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc, u8 type,
u16 q_number, u16 completed_index,
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(vdev);
unsigned long flags;
spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
fnic_wq_complete_frame_send, NULL);
spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
return 0;
}
int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
{
unsigned int wq_work_done = 0;
unsigned int i;
for (i = 0; i < fnic->raw_wq_count; i++) {
wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
work_to_do,
fnic_wq_cmpl_handler_cont,
NULL);
}
return wq_work_done;
}
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr,
buf->len, PCI_DMA_TODEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
}

View file

@ -0,0 +1,67 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _FNIC_IO_H_
#define _FNIC_IO_H_
#include <scsi/fc/fc_fcp.h>
#define FNIC_DFLT_SG_DESC_CNT 32
#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
struct host_sg_desc {
__le64 addr;
__le32 len;
u32 _resvd;
};
struct fnic_dflt_sgl_list {
struct host_sg_desc sg_desc[FNIC_DFLT_SG_DESC_CNT];
};
struct fnic_sgl_list {
struct host_sg_desc sg_desc[FNIC_MAX_SG_DESC_CNT];
};
enum fnic_sgl_list_type {
FNIC_SGL_CACHE_DFLT = 0, /* cache with default size sgl */
FNIC_SGL_CACHE_MAX, /* cache with max size sgl */
FNIC_SGL_NUM_CACHES /* number of sgl caches */
};
enum fnic_ioreq_state {
FNIC_IOREQ_CMD_PENDING = 0,
FNIC_IOREQ_ABTS_PENDING,
FNIC_IOREQ_ABTS_COMPLETE,
FNIC_IOREQ_CMD_COMPLETE,
};
struct fnic_io_req {
struct host_sg_desc *sgl_list; /* sgl list */
void *sgl_list_alloc; /* sgl list address used for free */
dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
dma_addr_t sgl_list_pa; /* dma address for sgl list */
u16 sgl_cnt;
u8 sgl_type; /* device DMA descriptor list type */
u8 io_completed:1; /* set to 1 when fw completes IO */
u32 port_id; /* remote port DID */
struct completion *abts_done; /* completion for abts */
struct completion *dr_done; /* completion for device reset */
};
#endif /* _FNIC_IO_H_ */

View file

@ -0,0 +1,332 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "fnic_io.h"
#include "fnic.h"
static irqreturn_t fnic_isr_legacy(int irq, void *data)
{
struct fnic *fnic = data;
u32 pba;
unsigned long work_done = 0;
pba = vnic_intr_legacy_pba(fnic->legacy_pba);
if (!pba)
return IRQ_NONE;
if (pba & (1 << FNIC_INTX_NOTIFY)) {
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
fnic_handle_link_event(fnic);
}
if (pba & (1 << FNIC_INTX_ERR)) {
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
fnic_log_q_error(fnic);
}
if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
work_done += fnic_wq_cmpl_handler(fnic, 4);
work_done += fnic_rq_cmpl_handler(fnic, 4);
vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
}
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msi(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long work_done = 0;
work_done += fnic_wq_copy_cmpl_handler(fnic, 8);
work_done += fnic_wq_cmpl_handler(fnic, 4);
work_done += fnic_rq_cmpl_handler(fnic, 4);
vnic_intr_return_credits(&fnic->intr[0],
work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long rq_work_done = 0;
rq_work_done = fnic_rq_cmpl_handler(fnic, 4);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
rq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long wq_work_done = 0;
wq_work_done = fnic_wq_cmpl_handler(fnic, 4);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
wq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long wq_copy_work_done = 0;
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, 8);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
wq_copy_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
{
struct fnic *fnic = data;
vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
fnic_log_q_error(fnic);
fnic_handle_link_event(fnic);
return IRQ_HANDLED;
}
void fnic_free_intr(struct fnic *fnic)
{
int i;
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
case VNIC_DEV_INTR_MODE_MSI:
free_irq(fnic->pdev->irq, fnic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
if (fnic->msix[i].requested)
free_irq(fnic->msix_entry[i].vector,
fnic->msix[i].devid);
break;
default:
break;
}
}
int fnic_request_intr(struct fnic *fnic)
{
int err = 0;
int i;
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
IRQF_SHARED, DRV_NAME, fnic);
break;
case VNIC_DEV_INTR_MODE_MSI:
err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
0, fnic->name, fnic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
"%.11s-fcs-rq", fnic->name);
fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
fnic->msix[FNIC_MSIX_RQ].devid = fnic;
sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
"%.11s-fcs-wq", fnic->name);
fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
fnic->msix[FNIC_MSIX_WQ].devid = fnic;
sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
"%.11s-scsi-wq", fnic->name);
fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
"%.11s-err-notify", fnic->name);
fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
fnic_isr_msix_err_notify;
fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
err = request_irq(fnic->msix_entry[i].vector,
fnic->msix[i].isr, 0,
fnic->msix[i].devname,
fnic->msix[i].devid);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"MSIX: request_irq"
" failed %d\n", err);
fnic_free_intr(fnic);
break;
}
fnic->msix[i].requested = 1;
}
break;
default:
break;
}
return err;
}
int fnic_set_intr_mode(struct fnic *fnic)
{
unsigned int n = ARRAY_SIZE(fnic->rq);
unsigned int m = ARRAY_SIZE(fnic->wq);
unsigned int o = ARRAY_SIZE(fnic->wq_copy);
unsigned int i;
/*
* Set interrupt mode (INTx, MSI, MSI-X) depending
* system capabilities.
*
* Try MSI-X first
*
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
* (last INTR is used for WQ/RQ errors and notification area)
*/
BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1);
for (i = 0; i < n + m + o + 1; i++)
fnic->msix_entry[i].entry = i;
if (fnic->rq_count >= n &&
fnic->raw_wq_count >= m &&
fnic->wq_copy_count >= o &&
fnic->cq_count >= n + m + o) {
if (!pci_enable_msix(fnic->pdev, fnic->msix_entry,
n + m + o + 1)) {
fnic->rq_count = n;
fnic->raw_wq_count = m;
fnic->wq_copy_count = o;
fnic->wq_count = m + o;
fnic->cq_count = n + m + o;
fnic->intr_count = n + m + o + 1;
fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
"Using MSI-X Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev,
VNIC_DEV_INTR_MODE_MSIX);
return 0;
}
}
/*
* Next try MSI
* We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
*/
if (fnic->rq_count >= 1 &&
fnic->raw_wq_count >= 1 &&
fnic->wq_copy_count >= 1 &&
fnic->cq_count >= 3 &&
fnic->intr_count >= 1 &&
!pci_enable_msi(fnic->pdev)) {
fnic->rq_count = 1;
fnic->raw_wq_count = 1;
fnic->wq_copy_count = 1;
fnic->wq_count = 2;
fnic->cq_count = 3;
fnic->intr_count = 1;
fnic->err_intr_offset = 0;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
"Using MSI Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
return 0;
}
/*
* Next try INTx
* We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
* 1 INTR is used for all 3 queues, 1 INTR for queue errors
* 1 INTR for notification area
*/
if (fnic->rq_count >= 1 &&
fnic->raw_wq_count >= 1 &&
fnic->wq_copy_count >= 1 &&
fnic->cq_count >= 3 &&
fnic->intr_count >= 3) {
fnic->rq_count = 1;
fnic->raw_wq_count = 1;
fnic->wq_copy_count = 1;
fnic->cq_count = 3;
fnic->intr_count = 3;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
"Using Legacy Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
return 0;
}
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
return -EINVAL;
}
void fnic_clear_intr_mode(struct fnic *fnic)
{
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_MSIX:
pci_disable_msix(fnic->pdev);
break;
case VNIC_DEV_INTR_MODE_MSI:
pci_disable_msi(fnic->pdev);
break;
default:
break;
}
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
}

View file

@ -0,0 +1,942 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "fnic_io.h"
#include "fnic.h"
#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
/* Timer to poll notification area for events. Used for MSI interrupts */
#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
LIST_HEAD(fnic_list);
DEFINE_SPINLOCK(fnic_list_lock);
/* Supported devices by fnic module */
static struct pci_device_id fnic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
{ 0, }
};
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
"Joseph R. Eykholt <jeykholt@cisco.com>");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, fnic_id_table);
unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
.fcp_abort_io = fnic_empty_scsi_cleanup,
.fcp_cleanup = fnic_empty_scsi_cleanup,
.exch_mgr_reset = fnic_exch_mgr_reset
};
static int fnic_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct fc_lport *lp = shost_priv(sdev->host);
struct fnic *fnic = lport_priv(lp);
sdev->tagged_supported = 1;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH);
rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000;
return 0;
}
static struct scsi_host_template fnic_host_template = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = fnic_queuecommand,
.eh_abort_handler = fnic_abort_cmd,
.eh_device_reset_handler = fnic_device_reset,
.eh_host_reset_handler = fnic_host_reset,
.slave_alloc = fnic_slave_alloc,
.change_queue_depth = fc_change_queue_depth,
.change_queue_type = fc_change_queue_type,
.this_id = -1,
.cmd_per_lun = 3,
.can_queue = FNIC_MAX_IO_REQ,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
.max_sectors = 0xffff,
.shost_attrs = fnic_attrs,
};
static void fnic_get_host_speed(struct Scsi_Host *shost);
static struct scsi_transport_template *fnic_fc_transport;
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
static struct fc_function_template fnic_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fnic_get_host_speed,
.show_host_speed = 1,
.show_host_port_type = 1,
.get_host_port_state = fc_get_host_port_state,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.show_host_fabric_name = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.show_starget_port_id = 1,
.show_rport_dev_loss_tmo = 1,
.issue_fc_host_lip = fnic_reset,
.get_fc_host_stats = fnic_get_stats,
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
.terminate_rport_io = fnic_terminate_rport_io,
};
static void fnic_get_host_speed(struct Scsi_Host *shost)
{
struct fc_lport *lp = shost_priv(shost);
struct fnic *fnic = lport_priv(lp);
u32 port_speed = vnic_dev_port_speed(fnic->vdev);
/* Add in other values as they get defined in fw */
switch (port_speed) {
case 10000:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
}
}
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
{
int ret;
struct fc_lport *lp = shost_priv(host);
struct fnic *fnic = lport_priv(lp);
struct fc_host_statistics *stats = &lp->host_stats;
struct vnic_stats *vs;
unsigned long flags;
if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
return stats;
fnic->stats_time = jiffies;
spin_lock_irqsave(&fnic->fnic_lock, flags);
ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
"fnic: Get vnic stats failed"
" 0x%x", ret);
return stats;
}
vs = fnic->stats;
stats->tx_frames = vs->tx.tx_unicast_frames_ok;
stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
stats->rx_frames = vs->rx.rx_unicast_frames_ok;
stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
stats->invalid_crc_count = vs->rx.rx_crc_errors;
stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ;
stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
return stats;
}
void fnic_log_q_error(struct fnic *fnic)
{
unsigned int i;
u32 error_status;
for (i = 0; i < fnic->raw_wq_count; i++) {
error_status = ioread32(&fnic->wq[i].ctrl->error_status);
if (error_status)
shost_printk(KERN_ERR, fnic->lport->host,
"WQ[%d] error_status"
" %d\n", i, error_status);
}
for (i = 0; i < fnic->rq_count; i++) {
error_status = ioread32(&fnic->rq[i].ctrl->error_status);
if (error_status)
shost_printk(KERN_ERR, fnic->lport->host,
"RQ[%d] error_status"
" %d\n", i, error_status);
}
for (i = 0; i < fnic->wq_copy_count; i++) {
error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
if (error_status)
shost_printk(KERN_ERR, fnic->lport->host,
"CWQ[%d] error_status"
" %d\n", i, error_status);
}
}
void fnic_handle_link_event(struct fnic *fnic)
{
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
queue_work(fnic_event_queue, &fnic->link_work);
}
static int fnic_notify_set(struct fnic *fnic)
{
int err;
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
break;
case VNIC_DEV_INTR_MODE_MSI:
err = vnic_dev_notify_set(fnic->vdev, -1);
break;
case VNIC_DEV_INTR_MODE_MSIX:
err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
break;
default:
shost_printk(KERN_ERR, fnic->lport->host,
"Interrupt mode should be set up"
" before devcmd notify set %d\n",
vnic_dev_get_intr_mode(fnic->vdev));
err = -1;
break;
}
return err;
}
static void fnic_notify_timer(unsigned long data)
{
struct fnic *fnic = (struct fnic *)data;
fnic_handle_link_event(fnic);
mod_timer(&fnic->notify_timer,
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
static void fnic_notify_timer_start(struct fnic *fnic)
{
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_MSI:
/*
* Schedule first timeout immediately. The driver is
* initiatialized and ready to look for link up notification
*/
mod_timer(&fnic->notify_timer, jiffies);
break;
default:
/* Using intr for notification for INTx/MSI-X */
break;
};
}
static int fnic_dev_wait(struct vnic_dev *vdev,
int (*start)(struct vnic_dev *, int),
int (*finished)(struct vnic_dev *, int *),
int arg)
{
unsigned long time;
int done;
int err;
err = start(vdev, arg);
if (err)
return err;
/* Wait for func to complete...2 seconds max */
time = jiffies + (HZ * 2);
do {
err = finished(vdev, &done);
if (err)
return err;
if (done)
return 0;
schedule_timeout_uninterruptible(HZ / 10);
} while (time_after(time, jiffies));
return -ETIMEDOUT;
}
static int fnic_cleanup(struct fnic *fnic)
{
unsigned int i;
int err;
unsigned long flags;
struct fc_frame *flogi = NULL;
struct fc_frame *flogi_resp = NULL;
vnic_dev_disable(fnic->vdev);
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_mask(&fnic->intr[i]);
for (i = 0; i < fnic->rq_count; i++) {
err = vnic_rq_disable(&fnic->rq[i]);
if (err)
return err;
}
for (i = 0; i < fnic->raw_wq_count; i++) {
err = vnic_wq_disable(&fnic->wq[i]);
if (err)
return err;
}
for (i = 0; i < fnic->wq_copy_count; i++) {
err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
if (err)
return err;
}
/* Clean up completed IOs and FCS frames */
fnic_wq_copy_cmpl_handler(fnic, -1);
fnic_wq_cmpl_handler(fnic, -1);
fnic_rq_cmpl_handler(fnic, -1);
/* Clean up the IOs and FCS frames that have not completed */
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_clean(&fnic->wq_copy[i],
fnic_wq_copy_cleanup_handler);
for (i = 0; i < fnic->cq_count; i++)
vnic_cq_clean(&fnic->cq[i]);
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_clean(&fnic->intr[i]);
/*
* Remove cached flogi and flogi resp frames if any
* These frames are not in any queue, and therefore queue
* cleanup does not clean them. So clean them explicitly
*/
spin_lock_irqsave(&fnic->fnic_lock, flags);
flogi = fnic->flogi;
fnic->flogi = NULL;
flogi_resp = fnic->flogi_resp;
fnic->flogi_resp = NULL;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (flogi)
dev_kfree_skb(fp_skb(flogi));
if (flogi_resp)
dev_kfree_skb(fp_skb(flogi_resp));
mempool_destroy(fnic->io_req_pool);
for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
mempool_destroy(fnic->io_sgl_pool[i]);
return 0;
}
static void fnic_iounmap(struct fnic *fnic)
{
if (fnic->bar0.vaddr)
iounmap(fnic->bar0.vaddr);
}
/*
* Allocate element for mempools requiring GFP_DMA flag.
* Otherwise, checks in kmem_flagcheck() hit BUG_ON().
*/
static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
{
struct kmem_cache *mem = pool_data;
return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
}
static int __devinit fnic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct Scsi_Host *host;
struct fc_lport *lp;
struct fnic *fnic;
mempool_t *pool;
int err;
int i;
unsigned long flags;
/*
* Allocate SCSI Host and set up association between host,
* local port, and fnic
*/
host = scsi_host_alloc(&fnic_host_template,
sizeof(struct fc_lport) + sizeof(struct fnic));
if (!host) {
printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
err = -ENOMEM;
goto err_out;
}
lp = shost_priv(host);
lp->host = host;
fnic = lport_priv(lp);
fnic->lport = lp;
snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
host->host_no);
host->transportt = fnic_fc_transport;
err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to alloc shared tag map\n");
goto err_out_free_hba;
}
/* Setup PCI resources */
pci_set_drvdata(pdev, fnic);
fnic->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Cannot enable PCI device, aborting.\n");
goto err_out_free_hba;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Cannot enable PCI resources, aborting\n");
goto err_out_disable_device;
}
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
* limitation for the device. Try 40-bit first, and
* fail to 32-bit.
*/
err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
if (err) {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration "
"aborting\n");
goto err_out_release_regions;
}
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 32-bit DMA "
"for consistent allocations, aborting.\n");
goto err_out_release_regions;
}
} else {
err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 40-bit DMA "
"for consistent allocations, aborting.\n");
goto err_out_release_regions;
}
}
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
shost_printk(KERN_ERR, fnic->lport->host,
"BAR0 not memory-map'able, aborting.\n");
err = -ENODEV;
goto err_out_release_regions;
}
fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
fnic->bar0.len = pci_resource_len(pdev, 0);
if (!fnic->bar0.vaddr) {
shost_printk(KERN_ERR, fnic->lport->host,
"Cannot memory-map BAR0 res hdr, "
"aborting.\n");
err = -ENODEV;
goto err_out_release_regions;
}
fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
if (!fnic->vdev) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC registration failed, "
"aborting.\n");
err = -ENODEV;
goto err_out_iounmap;
}
err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
vnic_dev_open_done, 0);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC dev open failed, aborting.\n");
goto err_out_vnic_unregister;
}
err = vnic_dev_init(fnic->vdev, 0);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC dev init failed, aborting.\n");
goto err_out_dev_close;
}
err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC get MAC addr failed \n");
goto err_out_dev_close;
}
/* Get vNIC configuration */
err = fnic_get_vnic_config(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Get vNIC configuration failed, "
"aborting.\n");
goto err_out_dev_close;
}
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
fnic_get_res_counts(fnic);
err = fnic_set_intr_mode(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to set intr mode, "
"aborting.\n");
goto err_out_dev_close;
}
err = fnic_request_intr(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to request irq.\n");
goto err_out_clear_intr;
}
err = fnic_alloc_vnic_resources(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to alloc vNIC resources, "
"aborting.\n");
goto err_out_free_intr;
}
/* initialize all fnic locks */
spin_lock_init(&fnic->fnic_lock);
for (i = 0; i < FNIC_WQ_MAX; i++)
spin_lock_init(&fnic->wq_lock[i]);
for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
spin_lock_init(&fnic->wq_copy_lock[i]);
fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
fnic->fw_ack_recd[i] = 0;
fnic->fw_ack_index[i] = -1;
}
for (i = 0; i < FNIC_IO_LOCKS; i++)
spin_lock_init(&fnic->io_req_lock[i]);
fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
if (!fnic->io_req_pool)
goto err_out_free_resources;
pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
if (!pool)
goto err_out_free_ioreq_pool;
fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
if (!pool)
goto err_out_free_dflt_pool;
fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
/* setup vlan config, hw inserts vlan header */
fnic->vlan_hw_insert = 1;
fnic->vlan_id = 0;
fnic->flogi_oxid = FC_XID_UNKNOWN;
fnic->flogi = NULL;
fnic->flogi_resp = NULL;
fnic->state = FNIC_IN_FC_MODE;
/* Enable hardware stripping of vlan header on ingress */
fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
/* Setup notification buffer area */
err = fnic_notify_set(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to alloc notify buffer, aborting.\n");
goto err_out_free_max_pool;
}
/* Setup notify timer when using MSI interrupts */
if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
setup_timer(&fnic->notify_timer,
fnic_notify_timer, (unsigned long)fnic);
/* allocate RQ buffers and post them to RQ*/
for (i = 0; i < fnic->rq_count; i++) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_alloc_rq_frame can't alloc "
"frame\n");
goto err_out_free_rq_buf;
}
}
/*
* Initialization done with PCI system, hardware, firmware.
* Add host to SCSI
*/
err = scsi_add_host(lp->host, &pdev->dev);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"fnic: scsi_add_host failed...exiting\n");
goto err_out_free_rq_buf;
}
/* Start local port initiatialization */
lp->link_up = 0;
lp->tt = fnic_transport_template;
lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
FCPIO_HOST_EXCH_RANGE_START,
FCPIO_HOST_EXCH_RANGE_END);
if (!lp->emp) {
err = -ENOMEM;
goto err_out_remove_scsi_host;
}
lp->max_retry_count = fnic->config.flogi_retries;
lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_CONF_COMPL);
if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
lp->service_params |= FCP_SPPF_RETRY;
lp->boot_time = jiffies;
lp->e_d_tov = fnic->config.ed_tov;
lp->r_a_tov = fnic->config.ra_tov;
lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
fc_set_wwnn(lp, fnic->config.node_wwn);
fc_set_wwpn(lp, fnic->config.port_wwn);
fc_exch_init(lp);
fc_lport_init(lp);
fc_elsct_init(lp);
fc_rport_init(lp);
fc_disc_init(lp);
fc_lport_config(lp);
if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
sizeof(struct fc_frame_header))) {
err = -EINVAL;
goto err_out_free_exch_mgr;
}
fc_host_maxframe_size(lp->host) = lp->mfs;
sprintf(fc_host_symbolic_name(lp->host),
DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
spin_lock_irqsave(&fnic_list_lock, flags);
list_add_tail(&fnic->list, &fnic_list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
INIT_WORK(&fnic->link_work, fnic_handle_link);
INIT_WORK(&fnic->frame_work, fnic_handle_frame);
skb_queue_head_init(&fnic->frame_queue);
/* Enable all queues */
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_enable(&fnic->wq[i]);
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_enable(&fnic->rq[i]);
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_enable(&fnic->wq_copy[i]);
fc_fabric_login(lp);
vnic_dev_enable(fnic->vdev);
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_unmask(&fnic->intr[i]);
fnic_notify_timer_start(fnic);
return 0;
err_out_free_exch_mgr:
fc_exch_mgr_free(lp->emp);
err_out_remove_scsi_host:
fc_remove_host(fnic->lport->host);
scsi_remove_host(fnic->lport->host);
err_out_free_rq_buf:
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
fnic_free_vnic_resources(fnic);
err_out_free_intr:
fnic_free_intr(fnic);
err_out_clear_intr:
fnic_clear_intr_mode(fnic);
err_out_dev_close:
vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
fnic_iounmap(fnic);
err_out_release_regions:
pci_release_regions(pdev);
err_out_disable_device:
pci_disable_device(pdev);
err_out_free_hba:
scsi_host_put(lp->host);
err_out:
return err;
}
static void __devexit fnic_remove(struct pci_dev *pdev)
{
struct fnic *fnic = pci_get_drvdata(pdev);
unsigned long flags;
/*
* Mark state so that the workqueue thread stops forwarding
* received frames and link events to the local port. ISR and
* other threads that can queue work items will also stop
* creating work items on the fnic workqueue
*/
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->stop_rx_link_events = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
del_timer_sync(&fnic->notify_timer);
/*
* Flush the fnic event queue. After this call, there should
* be no event queued for this fnic device in the workqueue
*/
flush_workqueue(fnic_event_queue);
skb_queue_purge(&fnic->frame_queue);
/*
* Log off the fabric. This stops all remote ports, dns port,
* logs off the fabric. This flushes all rport, disc, lport work
* before returning
*/
fc_fabric_logoff(fnic->lport);
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->in_remove = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fc_lport_destroy(fnic->lport);
/*
* This stops the fnic device, masks all interrupts. Completed
* CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
* cleaned up
*/
fnic_cleanup(fnic);
BUG_ON(!skb_queue_empty(&fnic->frame_queue));
spin_lock_irqsave(&fnic_list_lock, flags);
list_del(&fnic->list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
fc_remove_host(fnic->lport->host);
scsi_remove_host(fnic->lport->host);
fc_exch_mgr_free(fnic->lport->emp);
vnic_dev_notify_unset(fnic->vdev);
fnic_free_vnic_resources(fnic);
fnic_free_intr(fnic);
fnic_clear_intr_mode(fnic);
vnic_dev_close(fnic->vdev);
vnic_dev_unregister(fnic->vdev);
fnic_iounmap(fnic);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
scsi_host_put(fnic->lport->host);
}
static struct pci_driver fnic_driver = {
.name = DRV_NAME,
.id_table = fnic_id_table,
.probe = fnic_probe,
.remove = __devexit_p(fnic_remove),
};
static int __init fnic_init_module(void)
{
size_t len;
int err = 0;
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
/* Create a cache for allocation of default size sgls */
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
NULL);
if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
err = -ENOMEM;
goto err_create_fnic_sgl_slab_dflt;
}
/* Create a cache for allocation of max size sgls*/
len = sizeof(struct fnic_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
NULL);
if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
err = -ENOMEM;
goto err_create_fnic_sgl_slab_max;
}
/* Create a cache of io_req structs for use via mempool */
fnic_io_req_cache = kmem_cache_create("fnic_io_req",
sizeof(struct fnic_io_req),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!fnic_io_req_cache) {
printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
err = -ENOMEM;
goto err_create_fnic_ioreq_slab;
}
fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
if (!fnic_event_queue) {
printk(KERN_ERR PFX "fnic work queue create failed\n");
err = -ENOMEM;
goto err_create_fnic_workq;
}
spin_lock_init(&fnic_list_lock);
INIT_LIST_HEAD(&fnic_list);
fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
if (!fnic_fc_transport) {
printk(KERN_ERR PFX "fc_attach_transport error\n");
err = -ENOMEM;
goto err_fc_transport;
}
/* register the driver with PCI system */
err = pci_register_driver(&fnic_driver);
if (err < 0) {
printk(KERN_ERR PFX "pci register error\n");
goto err_pci_register;
}
return err;
err_pci_register:
fc_release_transport(fnic_fc_transport);
err_fc_transport:
destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
kmem_cache_destroy(fnic_io_req_cache);
err_create_fnic_ioreq_slab:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
return err;
}
static void __exit fnic_cleanup_module(void)
{
pci_unregister_driver(&fnic_driver);
destroy_workqueue(fnic_event_queue);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
}
module_init(fnic_init_module);
module_exit(fnic_cleanup_module);

View file

@ -0,0 +1,444 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "wq_enet_desc.h"
#include "rq_enet_desc.h"
#include "cq_enet_desc.h"
#include "vnic_resource.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_nic.h"
#include "fnic.h"
int fnic_get_vnic_config(struct fnic *fnic)
{
struct vnic_fc_config *c = &fnic->config;
int err;
#define GET_CONFIG(m) \
do { \
err = vnic_dev_spec(fnic->vdev, \
offsetof(struct vnic_fc_config, m), \
sizeof(c->m), &c->m); \
if (err) { \
shost_printk(KERN_ERR, fnic->lport->host, \
"Error getting %s, %d\n", #m, \
err); \
return err; \
} \
} while (0);
GET_CONFIG(node_wwn);
GET_CONFIG(port_wwn);
GET_CONFIG(wq_enet_desc_count);
GET_CONFIG(wq_copy_desc_count);
GET_CONFIG(rq_desc_count);
GET_CONFIG(maxdatafieldsize);
GET_CONFIG(ed_tov);
GET_CONFIG(ra_tov);
GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(flags);
GET_CONFIG(flogi_retries);
GET_CONFIG(flogi_timeout);
GET_CONFIG(plogi_retries);
GET_CONFIG(plogi_timeout);
GET_CONFIG(io_throttle_count);
GET_CONFIG(link_down_timeout);
GET_CONFIG(port_down_timeout);
GET_CONFIG(port_down_io_retries);
GET_CONFIG(luns_per_tgt);
c->wq_enet_desc_count =
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
c->wq_enet_desc_count));
c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
c->wq_copy_desc_count =
min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
c->wq_copy_desc_count));
c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
c->rq_desc_count =
min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
c->rq_desc_count));
c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
c->maxdatafieldsize =
min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
c->maxdatafieldsize));
c->ed_tov =
min_t(u32, VNIC_FNIC_EDTOV_MAX,
max_t(u32, VNIC_FNIC_EDTOV_MIN,
c->ed_tov));
c->ra_tov =
min_t(u32, VNIC_FNIC_RATOV_MAX,
max_t(u32, VNIC_FNIC_RATOV_MIN,
c->ra_tov));
c->flogi_retries =
min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);
c->flogi_timeout =
min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
c->flogi_timeout));
c->plogi_retries =
min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);
c->plogi_timeout =
min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
c->plogi_timeout));
c->io_throttle_count =
min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
c->io_throttle_count));
c->link_down_timeout =
min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
c->link_down_timeout);
c->port_down_timeout =
min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
c->port_down_timeout);
c->port_down_io_retries =
min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
c->port_down_io_retries);
c->luns_per_tgt =
min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
c->luns_per_tgt));
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
c->intr_timer_type = c->intr_timer_type;
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
"wq/wq_copy/rq %d/%d/%d\n",
fnic->mac_addr[0], fnic->mac_addr[1], fnic->mac_addr[2],
fnic->mac_addr[3], fnic->mac_addr[4], fnic->mac_addr[5],
c->wq_enet_desc_count, c->wq_copy_desc_count,
c->rq_desc_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC node wwn %llx port wwn %llx\n",
c->node_wwn, c->port_wwn);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC ed_tov %d ra_tov %d\n",
c->ed_tov, c->ra_tov);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC mtu %d intr timer %d\n",
c->maxdatafieldsize, c->intr_timer);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC flags 0x%x luns per tgt %d\n",
c->flags, c->luns_per_tgt);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC flogi_retries %d flogi timeout %d\n",
c->flogi_retries, c->flogi_timeout);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC plogi retries %d plogi timeout %d\n",
c->plogi_retries, c->plogi_timeout);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC io throttle count %d link dn timeout %d\n",
c->io_throttle_count, c->link_down_timeout);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC port dn io retries %d port dn timeout %d\n",
c->port_down_io_retries, c->port_down_timeout);
return 0;
}
int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable,
u8 tso_ipid_split_en, u8 ig_vlan_strip_en)
{
u64 a0, a1;
u32 nic_cfg;
int wait = 1000;
vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
rss_hash_type, rss_hash_bits, rss_base_cpu,
rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
a0 = nic_cfg;
a1 = 0;
return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
}
void fnic_get_res_counts(struct fnic *fnic)
{
fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
fnic->raw_wq_count = fnic->wq_count - 1;
fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
RES_TYPE_INTR_CTRL);
}
void fnic_free_vnic_resources(struct fnic *fnic)
{
unsigned int i;
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_free(&fnic->wq[i]);
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_free(&fnic->wq_copy[i]);
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_free(&fnic->rq[i]);
for (i = 0; i < fnic->cq_count; i++)
vnic_cq_free(&fnic->cq[i]);
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_free(&fnic->intr[i]);
}
int fnic_alloc_vnic_resources(struct fnic *fnic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int mask_on_assertion;
unsigned int interrupt_offset;
unsigned int error_interrupt_enable;
unsigned int error_interrupt_offset;
unsigned int i, cq_index;
unsigned int wq_copy_cq_desc_count;
int err;
intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
"MSI-X" : "unknown");
shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
"wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
fnic->rq_count, fnic->cq_count, fnic->intr_count);
/* Allocate Raw WQ used for FCS frames */
for (i = 0; i < fnic->raw_wq_count; i++) {
err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
fnic->config.wq_enet_desc_count,
sizeof(struct wq_enet_desc));
if (err)
goto err_out_cleanup;
}
/* Allocate Copy WQs used for SCSI IOs */
for (i = 0; i < fnic->wq_copy_count; i++) {
err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
(fnic->raw_wq_count + i),
fnic->config.wq_copy_desc_count,
sizeof(struct fcpio_host_req));
if (err)
goto err_out_cleanup;
}
/* RQ for receiving FCS frames */
for (i = 0; i < fnic->rq_count; i++) {
err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
fnic->config.rq_desc_count,
sizeof(struct rq_enet_desc));
if (err)
goto err_out_cleanup;
}
/* CQ for each RQ */
for (i = 0; i < fnic->rq_count; i++) {
cq_index = i;
err = vnic_cq_alloc(fnic->vdev,
&fnic->cq[cq_index], cq_index,
fnic->config.rq_desc_count,
sizeof(struct cq_enet_rq_desc));
if (err)
goto err_out_cleanup;
}
/* CQ for each WQ */
for (i = 0; i < fnic->raw_wq_count; i++) {
cq_index = fnic->rq_count + i;
err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
fnic->config.wq_enet_desc_count,
sizeof(struct cq_enet_wq_desc));
if (err)
goto err_out_cleanup;
}
/* CQ for each COPY WQ */
wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
for (i = 0; i < fnic->wq_copy_count; i++) {
cq_index = fnic->raw_wq_count + fnic->rq_count + i;
err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
cq_index,
wq_copy_cq_desc_count,
sizeof(struct fcpio_fw_req));
if (err)
goto err_out_cleanup;
}
for (i = 0; i < fnic->intr_count; i++) {
err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
if (err)
goto err_out_cleanup;
}
fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
RES_TYPE_INTR_PBA_LEGACY, 0);
if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to hook legacy pba resource\n");
err = -ENODEV;
goto err_out_cleanup;
}
/*
* Init RQ/WQ resources.
*
* RQ[0 to n-1] point to CQ[0 to n-1]
* WQ[0 to m-1] point to CQ[n to n+m-1]
* WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
*
* Note for copy wq we always initialize with cq_index = 0
*
* Error interrupt is not enabled for MSI.
*/
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_INTX:
case VNIC_DEV_INTR_MODE_MSIX:
error_interrupt_enable = 1;
error_interrupt_offset = fnic->err_intr_offset;
break;
default:
error_interrupt_enable = 0;
error_interrupt_offset = 0;
break;
}
for (i = 0; i < fnic->rq_count; i++) {
cq_index = i;
vnic_rq_init(&fnic->rq[i],
cq_index,
error_interrupt_enable,
error_interrupt_offset);
}
for (i = 0; i < fnic->raw_wq_count; i++) {
cq_index = i + fnic->rq_count;
vnic_wq_init(&fnic->wq[i],
cq_index,
error_interrupt_enable,
error_interrupt_offset);
}
for (i = 0; i < fnic->wq_copy_count; i++) {
vnic_wq_copy_init(&fnic->wq_copy[i],
0 /* cq_index 0 - always */,
error_interrupt_enable,
error_interrupt_offset);
}
for (i = 0; i < fnic->cq_count; i++) {
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSIX:
interrupt_offset = i;
break;
default:
interrupt_offset = 0;
break;
}
vnic_cq_init(&fnic->cq[i],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
1 /* interrupt_enable */,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
interrupt_offset,
0 /* cq_message_addr */);
}
/*
* Init INTR resources
*
* mask_on_assertion is not used for INTx due to the level-
* triggered nature of INTx
*/
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSI:
case VNIC_DEV_INTR_MODE_MSIX:
mask_on_assertion = 1;
break;
default:
mask_on_assertion = 0;
break;
}
for (i = 0; i < fnic->intr_count; i++) {
vnic_intr_init(&fnic->intr[i],
fnic->config.intr_timer,
fnic->config.intr_timer_type,
mask_on_assertion);
}
/* init the stats memory by making the first call here */
err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vnic_dev_stats_dump failed - x%x\n", err);
goto err_out_cleanup;
}
/* Clear LIF stats */
vnic_dev_stats_clear(fnic->vdev);
return 0;
err_out_cleanup:
fnic_free_vnic_resources(fnic);
return err;
}

View file

@ -0,0 +1,197 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _FNIC_RES_H_
#define _FNIC_RES_H_
#include "wq_enet_desc.h"
#include "rq_enet_desc.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "fnic_io.h"
#include "fcpio.h"
#include "vnic_wq_copy.h"
#include "vnic_cq_copy.h"
static inline void fnic_queue_wq_desc(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
unsigned int len, unsigned int fc_eof,
int vlan_tag_insert,
unsigned int vlan_tag,
int cq_entry, int sop, int eop)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)len,
0, /* mss_or_csum_offset */
(u16)fc_eof,
0, /* offload_mode */
(u8)eop, (u8)cq_entry,
1, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
0 /* loopback */);
vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
}
static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq,
u32 req_id,
u32 lunmap_id, u8 spl_flags,
u32 sgl_cnt, u32 sense_len,
u64 sgl_addr, u64 sns_addr,
u8 crn, u8 pri_ta,
u8 flags, u8 *scsi_cdb,
u32 data_len, u8 *lun,
u32 d_id, u16 mss,
u32 ratov, u32 edtov)
{
struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */
desc->hdr.status = 0; /* header status entry */
desc->hdr._resvd = 0; /* reserved */
desc->hdr.tag.u.req_id = req_id; /* id for this request */
desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */
desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */
desc->u.icmnd_16._resvd0[0] = 0; /* reserved */
desc->u.icmnd_16._resvd0[1] = 0; /* reserved */
desc->u.icmnd_16._resvd0[2] = 0; /* reserved */
desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */
desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */
desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */
desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */
desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/
desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */
desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */
desc->u.icmnd_16.flags = flags; /* command flags */
memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, CDB_16); /* SCSI CDB */
desc->u.icmnd_16.data_len = data_len; /* length of data expected */
memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */
desc->u.icmnd_16._resvd2 = 0; /* reserved */
hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */
desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */
desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */
desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */
vnic_wq_copy_post(wq);
}
static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq,
u32 req_id, u32 lunmap_id,
u32 tm_req, u32 tm_id, u8 *lun,
u32 d_id, u32 r_a_tov,
u32 e_d_tov)
{
struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */
desc->hdr.status = 0; /* header status entry */
desc->hdr._resvd = 0; /* reserved */
desc->hdr.tag.u.req_id = req_id; /* id for this request */
desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */
desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */
desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */
desc->u.itmf._resvd = 0;
memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */
desc->u.itmf._resvd1 = 0;
hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */
desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */
desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */
vnic_wq_copy_post(wq);
}
static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
u32 req_id, u8 format,
u32 s_id, u8 *gw_mac)
{
struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */
desc->hdr.status = 0; /* header status entry */
desc->hdr._resvd = 0; /* reserved */
desc->hdr.tag.u.req_id = req_id; /* id for this request */
desc->u.flogi_reg.format = format;
hton24(desc->u.flogi_reg.s_id, s_id);
memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
vnic_wq_copy_post(wq);
}
static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
u32 req_id)
{
struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */
desc->hdr.status = 0; /* header status entry */
desc->hdr._resvd = 0; /* reserved */
desc->hdr.tag.u.req_id = req_id; /* id for this request */
vnic_wq_copy_post(wq);
}
static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq,
u32 req_id, u64 lunmap_addr,
u32 lunmap_len)
{
struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */
desc->hdr.status = 0; /* header status entry */
desc->hdr._resvd = 0; /* reserved */
desc->hdr.tag.u.req_id = req_id; /* id for this request */
desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */
desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */
vnic_wq_copy_post(wq);
}
static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
void *os_buf, dma_addr_t dma_addr,
u16 len)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
rq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
RQ_ENET_TYPE_ONLY_SOP,
(u16)len);
vnic_rq_post(rq, os_buf, 0, dma_addr, len);
}
struct fnic;
int fnic_get_vnic_config(struct fnic *);
int fnic_alloc_vnic_resources(struct fnic *);
void fnic_free_vnic_resources(struct fnic *);
void fnic_get_res_counts(struct fnic *);
int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu,
u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en);
#endif /* _FNIC_RES_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,58 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
/* Ethernet receive queue descriptor: 16B */
struct rq_enet_desc {
__le64 address;
__le16 length_type;
u8 reserved[6];
};
enum rq_enet_type_types {
RQ_ENET_TYPE_ONLY_SOP = 0,
RQ_ENET_TYPE_NOT_SOP = 1,
RQ_ENET_TYPE_RESV2 = 2,
RQ_ENET_TYPE_RESV3 = 3,
};
#define RQ_ENET_ADDR_BITS 64
#define RQ_ENET_LEN_BITS 14
#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
#define RQ_ENET_TYPE_BITS 2
#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
u64 address, u8 type, u16 length)
{
desc->address = cpu_to_le64(address);
desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
}
static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
u64 *address, u8 *type, u16 *length)
{
*address = le64_to_cpu(desc->address);
*length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
*type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
RQ_ENET_TYPE_MASK);
}
#endif /* _RQ_ENET_DESC_H_ */

View file

@ -0,0 +1,85 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
void vnic_cq_free(struct vnic_cq *cq)
{
vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
cq->ctrl = NULL;
}
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
cq->index = index;
cq->vdev = vdev;
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
if (err)
return err;
return 0;
}
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int cq_message_enable,
unsigned int interrupt_offset, u64 cq_message_addr)
{
u64 paddr;
paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &cq->ctrl->ring_base);
iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
iowrite32(color_enable, &cq->ctrl->color_enable);
iowrite32(cq_head, &cq->ctrl->cq_head);
iowrite32(cq_tail, &cq->ctrl->cq_tail);
iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
}
void vnic_cq_clean(struct vnic_cq *cq)
{
cq->to_clean = 0;
cq->last_color = 0;
iowrite32(0, &cq->ctrl->cq_head);
iowrite32(0, &cq->ctrl->cq_tail);
iowrite32(1, &cq->ctrl->cq_tail_color);
vnic_dev_clear_desc_ring(&cq->ring);
}

121
drivers/scsi/fnic/vnic_cq.h Normal file
View file

@ -0,0 +1,121 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
#include "cq_desc.h"
#include "vnic_dev.h"
/*
* These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
* Driver) when both are built with CONFIG options =y
*/
#define vnic_cq_service fnic_cq_service
#define vnic_cq_free fnic_cq_free
#define vnic_cq_alloc fnic_cq_alloc
#define vnic_cq_init fnic_cq_init
#define vnic_cq_clean fnic_cq_clean
/* Completion queue control */
struct vnic_cq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 flow_control_enable; /* 0x10 */
u32 pad1;
u32 color_enable; /* 0x18 */
u32 pad2;
u32 cq_head; /* 0x20 */
u32 pad3;
u32 cq_tail; /* 0x28 */
u32 pad4;
u32 cq_tail_color; /* 0x30 */
u32 pad5;
u32 interrupt_enable; /* 0x38 */
u32 pad6;
u32 cq_entry_enable; /* 0x40 */
u32 pad7;
u32 cq_message_enable; /* 0x48 */
u32 pad8;
u32 interrupt_offset; /* 0x50 */
u32 pad9;
u64 cq_message_addr; /* 0x58 */
u32 pad10;
};
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
unsigned int to_clean;
unsigned int last_color;
};
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
unsigned int work_to_do,
int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
u8 type, u16 q_number, u16 completed_index, void *opaque),
void *opaque)
{
struct cq_desc *cq_desc;
unsigned int work_done = 0;
u16 q_number, completed_index;
u8 type, color;
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
while (color != cq->last_color) {
if ((*q_service)(cq->vdev, cq_desc, type,
q_number, completed_index, opaque))
break;
cq->to_clean++;
if (cq->to_clean == cq->ring.desc_count) {
cq->to_clean = 0;
cq->last_color = cq->last_color ? 0 : 1;
}
cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
cq_desc_dec(cq_desc, &type, &color,
&q_number, &completed_index);
work_done++;
if (work_done >= work_to_do)
break;
}
return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int message_enable,
unsigned int interrupt_offset, u64 message_addr);
void vnic_cq_clean(struct vnic_cq *cq);
#endif /* _VNIC_CQ_H_ */

View file

@ -0,0 +1,62 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_CQ_COPY_H_
#define _VNIC_CQ_COPY_H_
#include "fcpio.h"
static inline unsigned int vnic_cq_copy_service(
struct vnic_cq *cq,
int (*q_service)(struct vnic_dev *vdev,
unsigned int index,
struct fcpio_fw_req *desc),
unsigned int work_to_do)
{
struct fcpio_fw_req *desc;
unsigned int work_done = 0;
u8 color;
desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
fcpio_color_dec(desc, &color);
while (color != cq->last_color) {
if ((*q_service)(cq->vdev, cq->index, desc))
break;
cq->to_clean++;
if (cq->to_clean == cq->ring.desc_count) {
cq->to_clean = 0;
cq->last_color = cq->last_color ? 0 : 1;
}
desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
cq->ring.desc_size * cq->to_clean);
fcpio_color_dec(desc, &color);
work_done++;
if (work_done >= work_to_do)
break;
}
return work_done;
}
#endif /* _VNIC_CQ_COPY_H_ */

View file

@ -0,0 +1,690 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
#include "vnic_stats.h"
struct vnic_res {
void __iomem *vaddr;
unsigned int count;
};
struct vnic_dev {
void *priv;
struct pci_dev *pdev;
struct vnic_res res[RES_TYPE_MAX];
enum vnic_dev_intr_mode intr_mode;
struct vnic_devcmd __iomem *devcmd;
struct vnic_devcmd_notify *notify;
struct vnic_devcmd_notify notify_copy;
dma_addr_t notify_pa;
u32 *linkstatus;
dma_addr_t linkstatus_pa;
struct vnic_stats *stats;
dma_addr_t stats_pa;
struct vnic_devcmd_fw_info *fw_info;
dma_addr_t fw_info_pa;
};
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
sizeof(struct vnic_resource) * RES_TYPE_MAX)
#define VNIC_RES_STRIDE 128
void *vnic_dev_priv(struct vnic_dev *vdev)
{
return vdev->priv;
}
static int vnic_dev_discover_res(struct vnic_dev *vdev,
struct vnic_dev_bar *bar)
{
struct vnic_resource_header __iomem *rh;
struct vnic_resource __iomem *r;
u8 type;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
if (!rh) {
printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
ioread32(&rh->version) != VNIC_RES_VERSION) {
printk(KERN_ERR "vNIC BAR0 res magic/version error "
"exp (%lx/%lx) curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
r = (struct vnic_resource __iomem *)(rh + 1);
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
u8 bar_num = ioread8(&r->bar);
u32 bar_offset = ioread32(&r->bar_offset);
u32 count = ioread32(&r->count);
u32 len;
r++;
if (bar_num != 0) /* only mapping in BAR0 resources */
continue;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar->len) {
printk(KERN_ERR "vNIC BAR0 resource %d "
"out-of-bounds, offset 0x%x + "
"size 0x%x > bar len 0x%lx\n",
type, bar_offset,
len,
bar->len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
len = count;
break;
default:
continue;
}
vdev->res[type].count = count;
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
}
return 0;
}
unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type)
{
return vdev->res[type].count;
}
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
{
if (!vdev->res[type].vaddr)
return NULL;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
return (char __iomem *)vdev->res[type].vaddr +
index * VNIC_RES_STRIDE;
default:
return (char __iomem *)vdev->res[type].vaddr;
}
}
unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count,
unsigned int desc_size)
{
/* The base address of the desc rings must be 512 byte aligned.
* Descriptor count is aligned to groups of 32 descriptors. A
* count of 0 means the maximum 4096 descriptors. Descriptor
* size is aligned to 16 bytes.
*/
unsigned int count_align = 32;
unsigned int desc_align = 16;
ring->base_align = 512;
if (desc_count == 0)
desc_count = 4096;
ring->desc_count = ALIGN(desc_count, count_align);
ring->desc_size = ALIGN(desc_size, desc_align);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
return ring->size_unaligned;
}
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
{
memset(ring->descs, 0, ring->size);
}
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
ring->size_unaligned,
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
printk(KERN_ERR
"Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring->base_align);
ring->descs = (u8 *)ring->descs_unaligned +
(ring->base_addr - ring->base_addr_unaligned);
vnic_dev_clear_desc_ring(ring);
ring->desc_avail = ring->desc_count - 1;
return 0;
}
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
pci_free_consistent(vdev->pdev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
ring->descs = NULL;
}
}
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
u32 status;
int dev_cmd_err[] = {
/* convert from fw's version of error.h to host's version */
0, /* ERR_SUCCESS */
EINVAL, /* ERR_EINVAL */
EFAULT, /* ERR_EFAULT */
EPERM, /* ERR_EPERM */
EBUSY, /* ERR_EBUSY */
};
int err;
status = ioread32(&devcmd->status);
if (status & STAT_BUSY) {
printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
writeq(*a0, &devcmd->args[0]);
writeq(*a1, &devcmd->args[1]);
wmb();
}
iowrite32(cmd, &devcmd->cmd);
if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
return 0;
for (delay = 0; delay < wait; delay++) {
udelay(100);
status = ioread32(&devcmd->status);
if (!(status & STAT_BUSY)) {
if (status & STAT_ERROR) {
err = dev_cmd_err[(int)readq(&devcmd->args[0])];
printk(KERN_ERR "Error %d devcmd %d\n",
err, _CMD_N(cmd));
return -err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
rmb();
*a0 = readq(&devcmd->args[0]);
*a1 = readq(&devcmd->args[1]);
}
return 0;
}
}
printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
u64 a0, a1 = 0;
int wait = 1000;
int err = 0;
if (!vdev->fw_info) {
vdev->fw_info = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa);
if (!vdev->fw_info)
return -ENOMEM;
a0 = vdev->fw_info_pa;
/* only get fw_info once and cache it */
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
}
*fw_info = vdev->fw_info;
return err;
}
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{
u64 a0, a1;
int wait = 1000;
int err;
a0 = offset;
a1 = size;
err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
switch (size) {
case 1:
*(u8 *)value = (u8)a0;
break;
case 2:
*(u16 *)value = (u16)a0;
break;
case 4:
*(u32 *)value = (u32)a0;
break;
case 8:
*(u64 *)value = a0;
break;
default:
BUG();
break;
}
return err;
}
int vnic_dev_stats_clear(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
}
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = 1000;
if (!vdev->stats) {
vdev->stats = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_stats), &vdev->stats_pa);
if (!vdev->stats)
return -ENOMEM;
}
*stats = vdev->stats;
a0 = vdev->stats_pa;
a1 = sizeof(struct vnic_stats);
return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
}
int vnic_dev_close(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
}
int vnic_dev_enable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
}
int vnic_dev_disable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
}
int vnic_dev_open(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
}
int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
}
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
u64 a0, a1;
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = ((u8 *)&a0)[i];
return 0;
}
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti)
{
u64 a0, a1 = 0;
int wait = 1000;
int err;
a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
(multicast ? CMD_PFILTER_MULTICAST : 0) |
(broadcast ? CMD_PFILTER_BROADCAST : 0) |
(promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
(allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't set packet filter\n");
}
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
printk(KERN_ERR
"Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
printk(KERN_ERR
"Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
err);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
{
u64 a0, a1;
int wait = 1000;
if (!vdev->notify) {
vdev->notify = pci_alloc_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_notify),
&vdev->notify_pa);
if (!vdev->notify)
return -ENOMEM;
}
a0 = vdev->notify_pa;
a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
void vnic_dev_notify_unset(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
a0 = 0; /* paddr = 0 to unset notify buffer */
a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
a1 += sizeof(struct vnic_devcmd_notify);
vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
unsigned int i;
u32 csum;
if (!vdev->notify)
return 0;
do {
csum = 0;
memcpy(&vdev->notify_copy, vdev->notify,
sizeof(struct vnic_devcmd_notify));
words = (u32 *)&vdev->notify_copy;
for (i = 1; i < nwords; i++)
csum += words[i];
} while (csum != words[0]);
return 1;
}
int vnic_dev_init(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
}
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
return *vdev->linkstatus;
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_state;
}
u32 vnic_dev_port_speed(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.port_speed;
}
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.msglvl;
}
u32 vnic_dev_mtu(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.mtu;
}
u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_down_cnt;
}
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode)
{
vdev->intr_mode = intr_mode;
}
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
struct vnic_dev *vdev)
{
return vdev->intr_mode;
}
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
pci_free_consistent(vdev->pdev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_dev),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
kfree(vdev);
}
}
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
{
if (!vdev) {
vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
if (!vdev)
return NULL;
}
vdev->priv = priv;
vdev->pdev = pdev;
if (vnic_dev_discover_res(vdev, bar))
goto err_out;
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd)
goto err_out;
return vdev;
err_out:
vnic_dev_unregister(vdev);
return NULL;
}

View file

@ -0,0 +1,161 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
#include "vnic_resource.h"
#include "vnic_devcmd.h"
/*
* These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
* Driver) when both are built with CONFIG options =y
*/
#define vnic_dev_priv fnic_dev_priv
#define vnic_dev_get_res_count fnic_dev_get_res_count
#define vnic_dev_get_res fnic_dev_get_res
#define vnic_dev_desc_ring_size fnic_dev_desc_ring_siz
#define vnic_dev_clear_desc_ring fnic_dev_clear_desc_ring
#define vnic_dev_alloc_desc_ring fnic_dev_alloc_desc_ring
#define vnic_dev_free_desc_ring fnic_dev_free_desc_ring
#define vnic_dev_cmd fnic_dev_cmd
#define vnic_dev_fw_info fnic_dev_fw_info
#define vnic_dev_spec fnic_dev_spec
#define vnic_dev_stats_clear fnic_dev_stats_clear
#define vnic_dev_stats_dump fnic_dev_stats_dump
#define vnic_dev_hang_notify fnic_dev_hang_notify
#define vnic_dev_packet_filter fnic_dev_packet_filter
#define vnic_dev_add_addr fnic_dev_add_addr
#define vnic_dev_del_addr fnic_dev_del_addr
#define vnic_dev_mac_addr fnic_dev_mac_addr
#define vnic_dev_notify_set fnic_dev_notify_set
#define vnic_dev_notify_unset fnic_dev_notify_unset
#define vnic_dev_link_status fnic_dev_link_status
#define vnic_dev_port_speed fnic_dev_port_speed
#define vnic_dev_msg_lvl fnic_dev_msg_lvl
#define vnic_dev_mtu fnic_dev_mtu
#define vnic_dev_link_down_cnt fnic_dev_link_down_cnt
#define vnic_dev_close fnic_dev_close
#define vnic_dev_enable fnic_dev_enable
#define vnic_dev_disable fnic_dev_disable
#define vnic_dev_open fnic_dev_open
#define vnic_dev_open_done fnic_dev_open_done
#define vnic_dev_init fnic_dev_init
#define vnic_dev_soft_reset fnic_dev_soft_reset
#define vnic_dev_soft_reset_done fnic_dev_soft_reset_done
#define vnic_dev_set_intr_mode fnic_dev_set_intr_mode
#define vnic_dev_get_intr_mode fnic_dev_get_intr_mode
#define vnic_dev_unregister fnic_dev_unregister
#define vnic_dev_register fnic_dev_register
#ifndef VNIC_PADDR_TARGET
#define VNIC_PADDR_TARGET 0x0000000000000000ULL
#endif
#ifndef readq
static inline u64 readq(void __iomem *reg)
{
return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg);
}
static inline void writeq(u64 val, void __iomem *reg)
{
writel(val & 0xffffffff, reg);
writel(val >> 32, reg + 0x4UL);
}
#endif
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
VNIC_DEV_INTR_MODE_MSI,
VNIC_DEV_INTR_MODE_MSIX,
};
struct vnic_dev_bar {
void __iomem *vaddr;
dma_addr_t bus_addr;
unsigned long len;
};
struct vnic_dev_ring {
void *descs;
size_t size;
dma_addr_t base_addr;
size_t base_align;
void *descs_unaligned;
size_t size_unaligned;
dma_addr_t base_addr_unaligned;
unsigned int desc_size;
unsigned int desc_count;
unsigned int desc_avail;
};
struct vnic_dev;
struct vnic_stats;
void *vnic_dev_priv(struct vnic_dev *vdev);
unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index);
unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count,
unsigned int desc_size);
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
struct vnic_dev_ring *ring);
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
unsigned int size, void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
void vnic_dev_notify_unset(struct vnic_dev *vdev);
int vnic_dev_link_status(struct vnic_dev *vdev);
u32 vnic_dev_port_speed(struct vnic_dev *vdev);
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
u32 vnic_dev_mtu(struct vnic_dev *vdev);
u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
int vnic_dev_close(struct vnic_dev *vdev);
int vnic_dev_enable(struct vnic_dev *vdev);
int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
void vnic_dev_unregister(struct vnic_dev *vdev);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev,
struct vnic_dev_bar *bar);
#endif /* _VNIC_DEV_H_ */

View file

@ -0,0 +1,281 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
#define _CMD_NBITS 14
#define _CMD_VTYPEBITS 10
#define _CMD_FLAGSBITS 6
#define _CMD_DIRBITS 2
#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
#define _CMD_NSHIFT 0
#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
/*
* Direction bits (from host perspective).
*/
#define _CMD_DIR_NONE 0U
#define _CMD_DIR_WRITE 1U
#define _CMD_DIR_READ 2U
#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
/*
* Flag bits.
*/
#define _CMD_FLAGS_NONE 0U
#define _CMD_FLAGS_NOWAIT 1U
/*
* vNIC type bits.
*/
#define _CMD_VTYPE_NONE 0U
#define _CMD_VTYPE_ENET 1U
#define _CMD_VTYPE_FC 2U
#define _CMD_VTYPE_SCSI 4U
#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
/*
* Used to create cmds..
*/
#define _CMDCF(dir, flags, vtype, nr) \
(((dir) << _CMD_DIRSHIFT) | \
((flags) << _CMD_FLAGSSHIFT) | \
((vtype) << _CMD_VTYPESHIFT) | \
((nr) << _CMD_NSHIFT))
#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
/*
* Used to decode cmds..
*/
#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
/* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
* out: a0=value */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
/* stats dump in mem: (u64)a0=paddr to stats area,
* (u16)a1=sizeof stats area */
CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
/* hang detection notification */
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
/* MAC address in (u48)a0 */
CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
/* disable/enable promisc mode: (u8)a0=0/1 */
/***** XXX DEPRECATED *****/
CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
/* disable/enable all-multi mode: (u8)a0=0/1 */
/***** XXX DEPRECATED *****/
CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
/* add addr from (u48)a0 */
CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
/* del addr from (u48)a0 */
CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
/* add VLAN id in (u16)a0 */
CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
/* del VLAN id in (u16)a0 */
CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
/* nic_cfg in (u32)a0 */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
/* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
/* initiate softreset */
CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
/* softreset status:
* out: a0=0 reset complete, a0=1 reset in progress */
CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
/* set struct vnic_devcmd_notify buffer in mem:
* in:
* (u64)a0=paddr to notify (set paddr=0 to unset)
* (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
* (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
* out:
* (u32)a1 = effective size
*/
CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
/* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
* (u8)a1=PXENV_UNDI_xxx */
CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
/* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
/* open status:
* out: a0=0 open complete, a0=1 open in progress */
CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
/* close vnic */
CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
/* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
/* variant of CMD_INIT, with provisioning info
* (u64)a0=paddr of vnic_devcmd_provinfo
* (u32)a1=sizeof provision info */
CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
/* enable virtual link */
CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
/* disable virtual link */
CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
/* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
/* init status:
* out: a0=0 init complete, a0=1 init in progress
* if a0=0, a1=errno */
CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
/* INT13 API: (u64)a0=paddr to vnic_int13_params struct
* (u8)a1=INT13_CMD_xxx */
CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
/* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
/* undo initialize of virtual link */
CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
};
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
/* flags for CMD_PACKET_FILTER */
#define CMD_PFILTER_DIRECTED 0x01
#define CMD_PFILTER_MULTICAST 0x02
#define CMD_PFILTER_BROADCAST 0x04
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
};
enum vnic_devcmd_error {
ERR_SUCCESS = 0,
ERR_EINVAL = 1,
ERR_EFAULT = 2,
ERR_EPERM = 3,
ERR_EBUSY = 4,
ERR_ECMDUNKNOWN = 5,
ERR_EBADSTATE = 6,
ERR_ENOMEM = 7,
ERR_ETIMEDOUT = 8,
ERR_ELINKDOWN = 9,
};
struct vnic_devcmd_fw_info {
char fw_version[32];
char fw_build[32];
char hw_version[32];
char hw_serial_number[32];
};
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
u32 link_state; /* link up == 1 */
u32 port_speed; /* effective port speed (rate limit) */
u32 mtu; /* MTU */
u32 msglvl; /* requested driver msg lvl */
u32 uif; /* uplink interface */
u32 status; /* status bits (see VNIC_STF_*) */
u32 error; /* error code (see ERR_*) for first ERR */
u32 link_down_cnt; /* running count of link down transitions */
};
#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
u8 data[0];
};
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
*
* If cmd completed successfully STAT_ERROR will be clear
* and args registers contain cmd-specific results.
*
* If cmd error, STAT_ERROR will be set and args[0] contains error code.
*
* status register is read-only. While STAT_BUSY is set,
* all other register contents are read-only.
*/
/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
#define VNIC_DEVCMD_NARGS 15
struct vnic_devcmd {
u32 status; /* RO */
u32 cmd; /* RW */
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
#endif /* _VNIC_DEVCMD_H_ */

View file

@ -0,0 +1,60 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
void vnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = NULL;
}
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index)
{
intr->index = index;
intr->vdev = vdev;
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
index);
return -EINVAL;
}
return 0;
}
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}

View file

@ -0,0 +1,118 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
#include <linux/pci.h>
#include "vnic_dev.h"
/*
* These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
* Driver) when both are built with CONFIG options =y
*/
#define vnic_intr_unmask fnic_intr_unmask
#define vnic_intr_mask fnic_intr_mask
#define vnic_intr_return_credits fnic_intr_return_credits
#define vnic_intr_credits fnic_intr_credits
#define vnic_intr_return_all_credits fnic_intr_return_all_credits
#define vnic_intr_legacy_pba fnic_intr_legacy_pba
#define vnic_intr_free fnic_intr_free
#define vnic_intr_alloc fnic_intr_alloc
#define vnic_intr_init fnic_intr_init
#define vnic_intr_clean fnic_intr_clean
#define VNIC_INTR_TIMER_MAX 0xffff
#define VNIC_INTR_TIMER_TYPE_ABS 0
#define VNIC_INTR_TIMER_TYPE_QUIET 1
/* Interrupt control */
struct vnic_intr_ctrl {
u32 coalescing_timer; /* 0x00 */
u32 pad0;
u32 coalescing_value; /* 0x08 */
u32 pad1;
u32 coalescing_type; /* 0x10 */
u32 pad2;
u32 mask_on_assertion; /* 0x18 */
u32 pad3;
u32 mask; /* 0x20 */
u32 pad4;
u32 int_credits; /* 0x28 */
u32 pad5;
u32 int_credit_return; /* 0x30 */
u32 pad6;
};
struct vnic_intr {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
};
static inline void vnic_intr_unmask(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->mask);
}
static inline void vnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
}
static inline void vnic_intr_return_credits(struct vnic_intr *intr,
unsigned int credits, int unmask, int reset_timer)
{
#define VNIC_INTR_UNMASK_SHIFT 16
#define VNIC_INTR_RESET_TIMER_SHIFT 17
u32 int_credit_return = (credits & 0xffff) |
(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
}
static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
{
return ioread32(&intr->ctrl->int_credits);
}
static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
{
unsigned int credits = vnic_intr_credits(intr);
int unmask = 1;
int reset_timer = 1;
vnic_intr_return_credits(intr, credits, unmask, reset_timer);
}
static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
{
/* read PBA without clearing */
return ioread32(legacy_pba);
}
void vnic_intr_free(struct vnic_intr *intr);
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */

View file

@ -0,0 +1,69 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_NIC_H_
#define _VNIC_NIC_H_
/*
* These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
* Driver) when both are built with CONFIG options =y
*/
#define vnic_set_nic_cfg fnic_set_nic_cfg
#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
#define NIC_CFG_RSS_ENABLE (1UL << 22)
#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
#define NIC_CFG_RSS_ENABLE_SHIFT 22
#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu,
u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
*nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
<< NIC_CFG_RSS_HASH_TYPE_SHIFT) |
((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
<< NIC_CFG_RSS_HASH_BITS_SHIFT) |
((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
<< NIC_CFG_RSS_BASE_CPU_SHIFT) |
((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
<< NIC_CFG_RSS_ENABLE_SHIFT) |
((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
<< NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
<< NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
}
#endif /* _VNIC_NIC_H_ */

View file

@ -0,0 +1,61 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
#define VNIC_RES_VERSION 0x00000000L
/* vNIC resource types */
enum vnic_res_type {
RES_TYPE_EOL, /* End-of-list */
RES_TYPE_WQ, /* Work queues */
RES_TYPE_RQ, /* Receive queues */
RES_TYPE_CQ, /* Completion queues */
RES_TYPE_RSVD1,
RES_TYPE_NIC_CFG, /* Enet NIC config registers */
RES_TYPE_RSVD2,
RES_TYPE_RSVD3,
RES_TYPE_RSVD4,
RES_TYPE_RSVD5,
RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
RES_TYPE_RSVD6,
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
RES_TYPE_MAX, /* Count of resource types */
};
struct vnic_resource_header {
u32 magic;
u32 version;
};
struct vnic_resource {
u8 type;
u8 bar;
u8 pad[2];
u32 bar_offset;
u32 count;
};
#endif /* _VNIC_RESOURCE_H_ */

196
drivers/scsi/fnic/vnic_rq.c Normal file
View file

@ -0,0 +1,196 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_rq.h"
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
struct vnic_dev *vdev;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
vdev = rq->vdev;
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!rq->bufs[i]) {
printk(KERN_ERR "Failed to alloc rq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = rq->bufs[i];
for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)rq->ring.descs +
rq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = rq->bufs[0];
break;
} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
buf->next = rq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
rq->to_use = rq->to_clean = rq->bufs[0];
rq->buf_index = 0;
return 0;
}
void vnic_rq_free(struct vnic_rq *rq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = rq->vdev;
vnic_dev_free_desc_ring(vdev, &rq->ring);
for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
kfree(rq->bufs[i]);
rq->bufs[i] = NULL;
}
rq->ctrl = NULL;
}
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
rq->index = index;
rq->vdev = vdev;
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
vnic_rq_disable(rq);
err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_rq_alloc_bufs(rq);
if (err) {
vnic_rq_free(rq);
return err;
}
return 0;
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
u32 fetch_index;
paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &rq->ctrl->ring_base);
iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
iowrite32(cq_index, &rq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
iowrite32(0, &rq->ctrl->dropped_packet_count);
iowrite32(0, &rq->ctrl->error_status);
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
iowrite32(fetch_index, &rq->ctrl->posted_index);
rq->buf_index = 0;
}
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
{
return ioread32(&rq->ctrl->error_status);
}
void vnic_rq_enable(struct vnic_rq *rq)
{
iowrite32(1, &rq->ctrl->enable);
}
int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
void vnic_rq_clean(struct vnic_rq *rq,
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
{
struct vnic_rq_buf *buf;
u32 fetch_index;
BUG_ON(ioread32(&rq->ctrl->enable));
buf = rq->to_clean;
while (vnic_rq_desc_used(rq) > 0) {
(*buf_clean)(rq, buf);
buf = rq->to_clean = buf->next;
rq->ring.desc_avail++;
}
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
iowrite32(fetch_index, &rq->ctrl->posted_index);
rq->buf_index = 0;
vnic_dev_clear_desc_ring(&rq->ring);
}

235
drivers/scsi/fnic/vnic_rq.h Normal file
View file

@ -0,0 +1,235 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
/*
* These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
* Driver) when both are built with CONFIG options =y
*/
#define vnic_rq_desc_avail fnic_rq_desc_avail
#define vnic_rq_desc_used fnic_rq_desc_used
#define vnic_rq_next_desc fnic_rq_next_desc
#define vnic_rq_next_index fnic_rq_next_index
#define vnic_rq_next_buf_index fnic_rq_next_buf_index
#define vnic_rq_post fnic_rq_post
#define vnic_rq_posting_soon fnic_rq_posting_soon
#define vnic_rq_return_descs fnic_rq_return_descs
#define vnic_rq_service fnic_rq_service
#define vnic_rq_fill fnic_rq_fill
#define vnic_rq_free fnic_rq_free
#define vnic_rq_alloc fnic_rq_alloc
#define vnic_rq_init fnic_rq_init
#define vnic_rq_error_status fnic_rq_error_status
#define vnic_rq_enable fnic_rq_enable
#define vnic_rq_disable fnic_rq_disable
#define vnic_rq_clean fnic_rq_clean
/* Receive queue control */
struct vnic_rq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 posted_index; /* 0x10 */
u32 pad1;
u32 cq_index; /* 0x18 */
u32 pad2;
u32 enable; /* 0x20 */
u32 pad3;
u32 running; /* 0x28 */
u32 pad4;
u32 fetch_index; /* 0x30 */
u32 pad5;
u32 error_interrupt_enable; /* 0x38 */
u32 pad6;
u32 error_interrupt_offset; /* 0x40 */
u32 pad7;
u32 error_status; /* 0x48 */
u32 pad8;
u32 dropped_packet_count; /* 0x50 */
u32 pad9;
u32 dropped_packet_count_rc; /* 0x58 */
u32 pad10;
};
/* Break the vnic_rq_buf allocations into blocks of 64 entries */
#define VNIC_RQ_BUF_BLK_ENTRIES 64
#define VNIC_RQ_BUF_BLK_SZ \
(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
dma_addr_t dma_addr;
void *os_buf;
unsigned int os_buf_index;
unsigned int len;
unsigned int index;
void *desc;
};
struct vnic_rq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
struct vnic_rq_buf *to_use;
struct vnic_rq_buf *to_clean;
void *os_buf_head;
unsigned int buf_index;
unsigned int pkts_outstanding;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
{
/* how many does SW own? */
return rq->ring.desc_avail;
}
static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
{
/* how many does HW own? */
return rq->ring.desc_count - rq->ring.desc_avail - 1;
}
static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
{
return rq->to_use->desc;
}
static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
{
return rq->to_use->index;
}
static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
{
return rq->buf_index++;
}
static inline void vnic_rq_post(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
dma_addr_t dma_addr, unsigned int len)
{
struct vnic_rq_buf *buf = rq->to_use;
buf->os_buf = os_buf;
buf->os_buf_index = os_buf_index;
buf->dma_addr = dma_addr;
buf->len = len;
buf = buf->next;
rq->to_use = buf;
rq->ring.desc_avail--;
/* Move the posted_index every nth descriptor
*/
#ifndef VNIC_RQ_RETURN_RATE
#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
#endif
if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(buf->index, &rq->ctrl->posted_index);
}
}
static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
{
return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
}
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{
rq->ring.desc_avail += count;
}
enum desc_return_options {
VNIC_RQ_RETURN_DESC,
VNIC_RQ_DEFER_RETURN_DESC,
};
static inline void vnic_rq_service(struct vnic_rq *rq,
struct cq_desc *cq_desc, u16 completed_index,
int desc_return, void (*buf_service)(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque), void *opaque)
{
struct vnic_rq_buf *buf;
int skipped;
buf = rq->to_clean;
while (1) {
skipped = (buf->index != completed_index);
(*buf_service)(rq, cq_desc, buf, skipped, opaque);
if (desc_return == VNIC_RQ_RETURN_DESC)
rq->ring.desc_avail++;
rq->to_clean = buf->next;
if (!skipped)
break;
buf = rq->to_clean;
}
}
static inline int vnic_rq_fill(struct vnic_rq *rq,
int (*buf_fill)(struct vnic_rq *rq))
{
int err;
while (vnic_rq_desc_avail(rq) > 1) {
err = (*buf_fill)(rq);
if (err)
return err;
}
return 0;
}
void vnic_rq_free(struct vnic_rq *rq);
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
unsigned int vnic_rq_error_status(struct vnic_rq *rq);
void vnic_rq_enable(struct vnic_rq *rq);
int vnic_rq_disable(struct vnic_rq *rq);
void vnic_rq_clean(struct vnic_rq *rq,
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
#endif /* _VNIC_RQ_H_ */

View file

@ -0,0 +1,99 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_SCSI_H_
#define _VNIC_SCSI_H_
#define VNIC_FNIC_WQ_COPY_COUNT_MIN 1
#define VNIC_FNIC_WQ_COPY_COUNT_MAX 1
#define VNIC_FNIC_WQ_DESCS_MIN 64
#define VNIC_FNIC_WQ_DESCS_MAX 128
#define VNIC_FNIC_WQ_COPY_DESCS_MIN 64
#define VNIC_FNIC_WQ_COPY_DESCS_MAX 512
#define VNIC_FNIC_RQ_DESCS_MIN 64
#define VNIC_FNIC_RQ_DESCS_MAX 128
#define VNIC_FNIC_EDTOV_MIN 1000
#define VNIC_FNIC_EDTOV_MAX 255000
#define VNIC_FNIC_EDTOV_DEF 2000
#define VNIC_FNIC_RATOV_MIN 1000
#define VNIC_FNIC_RATOV_MAX 255000
#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
#define VNIC_FNIC_FLOGI_RETRIES_DEF 0xffffffff
#define VNIC_FNIC_FLOGI_TIMEOUT_MIN 1000
#define VNIC_FNIC_FLOGI_TIMEOUT_MAX 255000
#define VNIC_FNIC_PLOGI_RETRIES_MIN 0
#define VNIC_FNIC_PLOGI_RETRIES_MAX 255
#define VNIC_FNIC_PLOGI_RETRIES_DEF 8
#define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000
#define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000
#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256
#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096
#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0
#define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MIN 0
#define VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX 240000
#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MIN 0
#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
/* Device-specific region: scsi configuration */
struct vnic_fc_config {
u64 node_wwn;
u64 port_wwn;
u32 flags;
u32 wq_enet_desc_count;
u32 wq_copy_desc_count;
u32 rq_desc_count;
u32 flogi_retries;
u32 flogi_timeout;
u32 plogi_retries;
u32 plogi_timeout;
u32 io_throttle_count;
u32 link_down_timeout;
u32 port_down_timeout;
u32 port_down_io_retries;
u32 luns_per_tgt;
u16 maxdatafieldsize;
u16 ed_tov;
u16 ra_tov;
u16 intr_timer;
u8 intr_timer_type;
};
#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
#define VFCF_PERBI 0x2 /* persistent binding info available */
#endif /* _VNIC_SCSI_H_ */

View file

@ -0,0 +1,68 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
/* Tx statistics */
struct vnic_tx_stats {
u64 tx_frames_ok;
u64 tx_unicast_frames_ok;
u64 tx_multicast_frames_ok;
u64 tx_broadcast_frames_ok;
u64 tx_bytes_ok;
u64 tx_unicast_bytes_ok;
u64 tx_multicast_bytes_ok;
u64 tx_broadcast_bytes_ok;
u64 tx_drops;
u64 tx_errors;
u64 tx_tso;
u64 rsvd[16];
};
/* Rx statistics */
struct vnic_rx_stats {
u64 rx_frames_ok;
u64 rx_frames_total;
u64 rx_unicast_frames_ok;
u64 rx_multicast_frames_ok;
u64 rx_broadcast_frames_ok;
u64 rx_bytes_ok;
u64 rx_unicast_bytes_ok;
u64 rx_multicast_bytes_ok;
u64 rx_broadcast_bytes_ok;
u64 rx_drop;
u64 rx_no_bufs;
u64 rx_errors;
u64 rx_rss;
u64 rx_crc_errors;
u64 rx_frames_64;
u64 rx_frames_127;
u64 rx_frames_255;
u64 rx_frames_511;
u64 rx_frames_1023;
u64 rx_frames_1518;
u64 rx_frames_to_max;
u64 rsvd[16];
};
struct vnic_stats {
struct vnic_tx_stats tx;
struct vnic_rx_stats rx;
};
#endif /* _VNIC_STATS_H_ */

182
drivers/scsi/fnic/vnic_wq.c Normal file
View file

@ -0,0 +1,182 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
vdev = wq->vdev;
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!wq->bufs[i]) {
printk(KERN_ERR "Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void vnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
wq->ctrl = NULL;
}
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
vnic_wq_free(wq);
return err;
}
return 0;
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
}
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void vnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
BUG_ON(ioread32(&wq->ctrl->enable));
buf = wq->to_clean;
while (vnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}

175
drivers/scsi/fnic/vnic_wq.h Normal file
View file

@ -0,0 +1,175 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
/*
* These defines avoid symbol clash between fnic and enic (Cisco 10G Eth
* Driver) when both are built with CONFIG options =y
*/
#define vnic_wq_desc_avail fnic_wq_desc_avail
#define vnic_wq_desc_used fnic_wq_desc_used
#define vnic_wq_next_desc fni_cwq_next_desc
#define vnic_wq_post fnic_wq_post
#define vnic_wq_service fnic_wq_service
#define vnic_wq_free fnic_wq_free
#define vnic_wq_alloc fnic_wq_alloc
#define vnic_wq_init fnic_wq_init
#define vnic_wq_error_status fnic_wq_error_status
#define vnic_wq_enable fnic_wq_enable
#define vnic_wq_disable fnic_wq_disable
#define vnic_wq_clean fnic_wq_clean
/* Work queue control */
struct vnic_wq_ctrl {
u64 ring_base; /* 0x00 */
u32 ring_size; /* 0x08 */
u32 pad0;
u32 posted_index; /* 0x10 */
u32 pad1;
u32 cq_index; /* 0x18 */
u32 pad2;
u32 enable; /* 0x20 */
u32 pad3;
u32 running; /* 0x28 */
u32 pad4;
u32 fetch_index; /* 0x30 */
u32 pad5;
u32 dca_value; /* 0x38 */
u32 pad6;
u32 error_interrupt_enable; /* 0x40 */
u32 pad7;
u32 error_interrupt_offset; /* 0x48 */
u32 pad8;
u32 error_status; /* 0x50 */
u32 pad9;
};
struct vnic_wq_buf {
struct vnic_wq_buf *next;
dma_addr_t dma_addr;
void *os_buf;
unsigned int len;
unsigned int index;
int sop;
void *desc;
};
/* Break the vnic_wq_buf allocations into blocks of 64 entries */
#define VNIC_WQ_BUF_BLK_ENTRIES 64
#define VNIC_WQ_BUF_BLK_SZ \
(VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
struct vnic_wq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
struct vnic_wq_buf *to_use;
struct vnic_wq_buf *to_clean;
unsigned int pkts_outstanding;
};
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
{
/* how many does SW own? */
return wq->ring.desc_avail;
}
static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
{
/* how many does HW own? */
return wq->ring.desc_count - wq->ring.desc_avail - 1;
}
static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
{
return wq->to_use->desc;
}
static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
unsigned int len, int sop, int eop)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
buf->os_buf = eop ? os_buf : NULL;
buf->dma_addr = dma_addr;
buf->len = len;
buf = buf->next;
if (eop) {
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(buf->index, &wq->ctrl->posted_index);
}
wq->to_use = buf;
wq->ring.desc_avail--;
}
static inline void vnic_wq_service(struct vnic_wq *wq,
struct cq_desc *cq_desc, u16 completed_index,
void (*buf_service)(struct vnic_wq *wq,
struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
void *opaque)
{
struct vnic_wq_buf *buf;
buf = wq->to_clean;
while (1) {
(*buf_service)(wq, cq_desc, buf, opaque);
wq->ring.desc_avail++;
wq->to_clean = buf->next;
if (buf->index == completed_index)
break;
buf = wq->to_clean;
}
}
void vnic_wq_free(struct vnic_wq *wq);
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size);
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
unsigned int vnic_wq_error_status(struct vnic_wq *wq);
void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
#endif /* _VNIC_WQ_H_ */

View file

@ -0,0 +1,117 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_wq_copy.h"
void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable Copy WQ[%d],"
" fetch index=%d, posted_index=%d\n",
wq->index, ioread32(&wq->ctrl->fetch_index),
ioread32(&wq->ctrl->posted_index));
return -ENODEV;
}
void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
void (*q_clean)(struct vnic_wq_copy *wq,
struct fcpio_host_req *wq_desc))
{
BUG_ON(ioread32(&wq->ctrl->enable));
if (vnic_wq_copy_desc_in_use(wq))
vnic_wq_copy_service(wq, -1, q_clean);
wq->to_use_index = wq->to_clean_index = 0;
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}
void vnic_wq_copy_free(struct vnic_wq_copy *wq)
{
struct vnic_dev *vdev;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
wq->ctrl = NULL;
}
int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
unsigned int index, unsigned int desc_count,
unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
wq->to_use_index = wq->to_clean_index = 0;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_copy_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
if (err)
return err;
return 0;
}
void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
}

View file

@ -0,0 +1,128 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_WQ_COPY_H_
#define _VNIC_WQ_COPY_H_
#include <linux/pci.h>
#include "vnic_wq.h"
#include "fcpio.h"
#define VNIC_WQ_COPY_MAX 1
struct vnic_wq_copy {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
unsigned to_use_index;
unsigned to_clean_index;
};
static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
{
return wq->ring.desc_avail;
}
static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
{
return wq->ring.desc_count - 1 - wq->ring.desc_avail;
}
static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
{
struct fcpio_host_req *desc = wq->ring.descs;
return &desc[wq->to_use_index];
}
static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
{
((wq->to_use_index + 1) == wq->ring.desc_count) ?
(wq->to_use_index = 0) : (wq->to_use_index++);
wq->ring.desc_avail--;
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
}
static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
{
unsigned int cnt;
if (wq->to_clean_index <= index)
cnt = (index - wq->to_clean_index) + 1;
else
cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
wq->ring.desc_avail += cnt;
}
static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
u16 completed_index,
void (*q_service)(struct vnic_wq_copy *wq,
struct fcpio_host_req *wq_desc))
{
struct fcpio_host_req *wq_desc = wq->ring.descs;
unsigned int curr_index;
while (1) {
if (q_service)
(*q_service)(wq, &wq_desc[wq->to_clean_index]);
wq->ring.desc_avail++;
curr_index = wq->to_clean_index;
/* increment the to-clean index so that we start
* with an unprocessed index next time we enter the loop
*/
((wq->to_clean_index + 1) == wq->ring.desc_count) ?
(wq->to_clean_index = 0) : (wq->to_clean_index++);
if (curr_index == completed_index)
break;
/* we have cleaned all the entries */
if ((completed_index == (u16)-1) &&
(wq->to_clean_index == wq->to_use_index))
break;
}
}
void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
void vnic_wq_copy_free(struct vnic_wq_copy *wq);
int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
unsigned int index, unsigned int desc_count, unsigned int desc_size);
void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
void (*q_clean)(struct vnic_wq_copy *wq,
struct fcpio_host_req *wq_desc));
#endif /* _VNIC_WQ_COPY_H_ */

View file

@ -0,0 +1,96 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
/* Ethernet work queue descriptor: 16B */
struct wq_enet_desc {
__le64 address;
__le16 length;
__le16 mss_loopback;
__le16 header_length_flags;
__le16 vlan_tag;
};
#define WQ_ENET_ADDR_BITS 64
#define WQ_ENET_LEN_BITS 14
#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
#define WQ_ENET_MSS_BITS 14
#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
#define WQ_ENET_MSS_SHIFT 2
#define WQ_ENET_LOOPBACK_SHIFT 1
#define WQ_ENET_HDRLEN_BITS 10
#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
#define WQ_ENET_FLAGS_OM_BITS 2
#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
#define WQ_ENET_FLAGS_EOP_SHIFT 12
#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
#define WQ_ENET_OFFLOAD_MODE_CSUM 0
#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
#define WQ_ENET_OFFLOAD_MODE_TSO 3
static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
u64 address, u16 length, u16 mss, u16 header_length,
u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
{
desc->address = cpu_to_le64(address);
desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
desc->header_length_flags = cpu_to_le16(
(header_length & WQ_ENET_HDRLEN_MASK) |
(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
desc->vlan_tag = cpu_to_le16(vlan_tag);
}
static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
u64 *address, u16 *length, u16 *mss, u16 *header_length,
u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
{
*address = le64_to_cpu(desc->address);
*length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
*mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
WQ_ENET_MSS_MASK;
*loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
WQ_ENET_LOOPBACK_SHIFT) & 1);
*header_length = le16_to_cpu(desc->header_length_flags) &
WQ_ENET_HDRLEN_MASK;
*offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
*eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_EOP_SHIFT) & 1);
*cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
*fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
*vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
*vlan_tag = le16_to_cpu(desc->vlan_tag);
}
#endif /* _WQ_ENET_DESC_H_ */