mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (40 commits) [SCSI] 3w-9xxx fix bug in sgl loading [SCSI] fcoe, libfc: adds enable/disable for fcoe interface [SCSI] libfc: reduce hold time on SCSI host lock [SCSI] libfc: remote port gets stuck in restart state without really restarting [SCSI] pm8001: misc code cleanup [SCSI] pm8001: enable read HBA SAS address from VPD [SCSI] pm8001: do not reset local sata as it will not be found if reset [SCSI] pm8001: bit set pm8001_ha->flags [SCSI] pm8001:fix potential NULL pointer dereference [SCSI] pm8001: set SSC down-spreading only to get less errors on some 6G device. [SCSI] pm8001: fix endian issues with SAS address [SCSI] pm8001: enhance error handle for IO patch [SCSI] pm8001: Fix for sata io circular lock dependency. [SCSI] hpsa: add driver for HP Smart Array controllers. [SCSI] cxgb3i: always use negative errno in case of error [SCSI] bnx2i: minor code cleanup and update driver version [SCSI] bnx2i: Task management ABORT TASK fixes [SCSI] bnx2i: update CQ arming algorith for 5771x chipsets [SCSI] bnx2i: Adjust sq_size module parametr to power of 2 only if a non-zero value is specified [SCSI] bnx2i: Add 5771E device support to bnx2i driver ...
This commit is contained in:
commit
fc6f0700d5
49 changed files with 4914 additions and 360 deletions
|
@ -1587,7 +1587,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
|
||||||
{
|
{
|
||||||
u8 __iomem *mem;
|
u8 __iomem *mem;
|
||||||
int ii;
|
int ii;
|
||||||
unsigned long mem_phys;
|
resource_size_t mem_phys;
|
||||||
unsigned long port;
|
unsigned long port;
|
||||||
u32 msize;
|
u32 msize;
|
||||||
u32 psize;
|
u32 psize;
|
||||||
|
@ -1677,8 +1677,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
ioc->memmap = mem;
|
ioc->memmap = mem;
|
||||||
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %lx\n",
|
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
|
||||||
ioc->name, mem, mem_phys));
|
ioc->name, mem, (unsigned long long)mem_phys));
|
||||||
|
|
||||||
ioc->mem_phys = mem_phys;
|
ioc->mem_phys = mem_phys;
|
||||||
ioc->chip = (SYSIF_REGS __iomem *)mem;
|
ioc->chip = (SYSIF_REGS __iomem *)mem;
|
||||||
|
|
|
@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
|
||||||
[ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
|
[ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
|
||||||
[ENCLOSURE_STATUS_UNKNOWN] = "unknown",
|
[ENCLOSURE_STATUS_UNKNOWN] = "unknown",
|
||||||
[ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
|
[ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
|
||||||
|
[ENCLOSURE_STATUS_MAX] = NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char *const enclosure_type [] = {
|
static const char *const enclosure_type [] = {
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
Fix bug in twa_get_param() on 4GB+.
|
Fix bug in twa_get_param() on 4GB+.
|
||||||
Use pci_resource_len() for ioremap().
|
Use pci_resource_len() for ioremap().
|
||||||
2.26.02.012 - Add power management support.
|
2.26.02.012 - Add power management support.
|
||||||
|
2.26.02.013 - Fix bug in twa_load_sgl().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
@ -100,7 +101,7 @@
|
||||||
#include "3w-9xxx.h"
|
#include "3w-9xxx.h"
|
||||||
|
|
||||||
/* Globals */
|
/* Globals */
|
||||||
#define TW_DRIVER_VERSION "2.26.02.012"
|
#define TW_DRIVER_VERSION "2.26.02.013"
|
||||||
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
|
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
|
||||||
static unsigned int twa_device_extension_count;
|
static unsigned int twa_device_extension_count;
|
||||||
static int twa_major = -1;
|
static int twa_major = -1;
|
||||||
|
@ -1382,10 +1383,12 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
|
||||||
newcommand = &full_command_packet->command.newcommand;
|
newcommand = &full_command_packet->command.newcommand;
|
||||||
newcommand->request_id__lunl =
|
newcommand->request_id__lunl =
|
||||||
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
|
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
|
||||||
newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
|
if (length) {
|
||||||
newcommand->sg_list[0].length = cpu_to_le32(length);
|
newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
|
||||||
|
newcommand->sg_list[0].length = cpu_to_le32(length);
|
||||||
|
}
|
||||||
newcommand->sgl_entries__lunh =
|
newcommand->sgl_entries__lunh =
|
||||||
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1));
|
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
|
||||||
} else {
|
} else {
|
||||||
oldcommand = &full_command_packet->command.oldcommand;
|
oldcommand = &full_command_packet->command.oldcommand;
|
||||||
oldcommand->request_id = request_id;
|
oldcommand->request_id = request_id;
|
||||||
|
|
|
@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID
|
||||||
Please read the comments at the top of
|
Please read the comments at the top of
|
||||||
<file:drivers/scsi/3w-xxxx.c>.
|
<file:drivers/scsi/3w-xxxx.c>.
|
||||||
|
|
||||||
|
config SCSI_HPSA
|
||||||
|
tristate "HP Smart Array SCSI driver"
|
||||||
|
depends on PCI && SCSI
|
||||||
|
help
|
||||||
|
This driver supports HP Smart Array Controllers (circa 2009).
|
||||||
|
It is a SCSI alternative to the cciss driver, which is a block
|
||||||
|
driver. Anyone wishing to use HP Smart Array controllers who
|
||||||
|
would prefer the devices be presented to linux as SCSI devices,
|
||||||
|
rather than as generic block devices should say Y here.
|
||||||
|
|
||||||
config SCSI_3W_9XXX
|
config SCSI_3W_9XXX
|
||||||
tristate "3ware 9xxx SATA-RAID support"
|
tristate "3ware 9xxx SATA-RAID support"
|
||||||
depends on PCI && SCSI
|
depends on PCI && SCSI
|
||||||
|
|
|
@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
|
||||||
obj-$(CONFIG_SCSI_PAS16) += pas16.o
|
obj-$(CONFIG_SCSI_PAS16) += pas16.o
|
||||||
obj-$(CONFIG_SCSI_T128) += t128.o
|
obj-$(CONFIG_SCSI_T128) += t128.o
|
||||||
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
|
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
|
||||||
|
obj-$(CONFIG_SCSI_HPSA) += hpsa.o
|
||||||
obj-$(CONFIG_SCSI_DTC3280) += dtc.o
|
obj-$(CONFIG_SCSI_DTC3280) += dtc.o
|
||||||
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
|
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
|
||||||
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
|
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
|
||||||
|
|
|
@ -135,11 +135,15 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
|
||||||
while ((compl = be_mcc_compl_get(phba))) {
|
while ((compl = be_mcc_compl_get(phba))) {
|
||||||
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
|
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
|
||||||
/* Interpret flags as an async trailer */
|
/* Interpret flags as an async trailer */
|
||||||
BUG_ON(!is_link_state_evt(compl->flags));
|
if (is_link_state_evt(compl->flags))
|
||||||
|
/* Interpret compl as a async link evt */
|
||||||
|
beiscsi_async_link_state_process(phba,
|
||||||
|
(struct be_async_event_link_state *) compl);
|
||||||
|
else
|
||||||
|
SE_DEBUG(DBG_LVL_1,
|
||||||
|
" Unsupported Async Event, flags"
|
||||||
|
" = 0x%08x \n", compl->flags);
|
||||||
|
|
||||||
/* Interpret compl as a async link evt */
|
|
||||||
beiscsi_async_link_state_process(phba,
|
|
||||||
(struct be_async_event_link_state *) compl);
|
|
||||||
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
|
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
|
||||||
status = be_mcc_compl_process(ctrl, compl);
|
status = be_mcc_compl_process(ctrl, compl);
|
||||||
atomic_dec(&phba->ctrl.mcc_obj.q.used);
|
atomic_dec(&phba->ctrl.mcc_obj.q.used);
|
||||||
|
|
|
@ -684,6 +684,7 @@ extern unsigned int error_mask1, error_mask2;
|
||||||
extern u64 iscsi_error_mask;
|
extern u64 iscsi_error_mask;
|
||||||
extern unsigned int en_tcp_dack;
|
extern unsigned int en_tcp_dack;
|
||||||
extern unsigned int event_coal_div;
|
extern unsigned int event_coal_div;
|
||||||
|
extern unsigned int event_coal_min;
|
||||||
|
|
||||||
extern struct scsi_transport_template *bnx2i_scsi_xport_template;
|
extern struct scsi_transport_template *bnx2i_scsi_xport_template;
|
||||||
extern struct iscsi_transport bnx2i_iscsi_transport;
|
extern struct iscsi_transport bnx2i_iscsi_transport;
|
||||||
|
|
|
@ -133,20 +133,38 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
|
||||||
{
|
{
|
||||||
struct bnx2i_5771x_cq_db *cq_db;
|
struct bnx2i_5771x_cq_db *cq_db;
|
||||||
u16 cq_index;
|
u16 cq_index;
|
||||||
|
u16 next_index;
|
||||||
|
u32 num_active_cmds;
|
||||||
|
|
||||||
|
|
||||||
|
/* Coalesce CQ entries only on 10G devices */
|
||||||
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
|
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* Do not update CQ DB multiple times before firmware writes
|
||||||
|
* '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
|
||||||
|
* interrupts and other unwanted results
|
||||||
|
*/
|
||||||
|
cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
|
||||||
|
if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
|
||||||
|
return;
|
||||||
|
|
||||||
if (action == CNIC_ARM_CQE) {
|
if (action == CNIC_ARM_CQE) {
|
||||||
cq_index = ep->qp.cqe_exp_seq_sn +
|
num_active_cmds = ep->num_active_cmds;
|
||||||
ep->num_active_cmds / event_coal_div;
|
if (num_active_cmds <= event_coal_min)
|
||||||
cq_index %= (ep->qp.cqe_size * 2 + 1);
|
next_index = 1;
|
||||||
if (!cq_index) {
|
else
|
||||||
|
next_index = event_coal_min +
|
||||||
|
(num_active_cmds - event_coal_min) / event_coal_div;
|
||||||
|
if (!next_index)
|
||||||
|
next_index = 1;
|
||||||
|
cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
|
||||||
|
if (cq_index > ep->qp.cqe_size * 2)
|
||||||
|
cq_index -= ep->qp.cqe_size * 2;
|
||||||
|
if (!cq_index)
|
||||||
cq_index = 1;
|
cq_index = 1;
|
||||||
cq_db = (struct bnx2i_5771x_cq_db *)
|
|
||||||
ep->qp.cq_pgtbl_virt;
|
cq_db->sqn[0] = cq_index;
|
||||||
cq_db->sqn[0] = cq_index;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,6 +384,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
|
||||||
struct bnx2i_cmd *bnx2i_cmd;
|
struct bnx2i_cmd *bnx2i_cmd;
|
||||||
struct bnx2i_tmf_request *tmfabort_wqe;
|
struct bnx2i_tmf_request *tmfabort_wqe;
|
||||||
u32 dword;
|
u32 dword;
|
||||||
|
u32 scsi_lun[2];
|
||||||
|
|
||||||
bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
|
bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
|
||||||
tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
|
tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
|
||||||
|
@ -376,27 +395,35 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
|
||||||
tmfabort_wqe->op_attr = 0;
|
tmfabort_wqe->op_attr = 0;
|
||||||
tmfabort_wqe->op_attr =
|
tmfabort_wqe->op_attr =
|
||||||
ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
|
ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
|
||||||
tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
|
|
||||||
tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
|
|
||||||
|
|
||||||
tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
|
tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
|
||||||
tmfabort_wqe->reserved2 = 0;
|
tmfabort_wqe->reserved2 = 0;
|
||||||
tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
|
tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
|
||||||
|
|
||||||
ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
|
ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
|
||||||
if (!ctask || ctask->sc)
|
if (!ctask || !ctask->sc)
|
||||||
/*
|
/*
|
||||||
* the iscsi layer must have completed the cmd while this
|
* the iscsi layer must have completed the cmd while this
|
||||||
* was starting up.
|
* was starting up.
|
||||||
|
*
|
||||||
|
* Note: In the case of a SCSI cmd timeout, the task's sc
|
||||||
|
* is still active; hence ctask->sc != 0
|
||||||
|
* In this case, the task must be aborted
|
||||||
*/
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ref_sc = ctask->sc;
|
ref_sc = ctask->sc;
|
||||||
|
|
||||||
|
/* Retrieve LUN directly from the ref_sc */
|
||||||
|
int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun);
|
||||||
|
tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
|
||||||
|
tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
|
||||||
|
|
||||||
if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
|
if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
|
||||||
dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
|
dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
|
||||||
else
|
else
|
||||||
dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
|
dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
|
||||||
tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
|
tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
|
||||||
tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
|
tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
|
||||||
|
|
||||||
tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
|
tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
|
||||||
|
|
|
@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
|
||||||
static u32 adapter_count;
|
static u32 adapter_count;
|
||||||
|
|
||||||
#define DRV_MODULE_NAME "bnx2i"
|
#define DRV_MODULE_NAME "bnx2i"
|
||||||
#define DRV_MODULE_VERSION "2.0.1e"
|
#define DRV_MODULE_VERSION "2.1.0"
|
||||||
#define DRV_MODULE_RELDATE "June 22, 2009"
|
#define DRV_MODULE_RELDATE "Dec 06, 2009"
|
||||||
|
|
||||||
static char version[] __devinitdata =
|
static char version[] __devinitdata =
|
||||||
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
|
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
|
||||||
|
@ -32,6 +32,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
|
||||||
|
|
||||||
static DEFINE_MUTEX(bnx2i_dev_lock);
|
static DEFINE_MUTEX(bnx2i_dev_lock);
|
||||||
|
|
||||||
|
unsigned int event_coal_min = 24;
|
||||||
|
module_param(event_coal_min, int, 0664);
|
||||||
|
MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
|
||||||
|
|
||||||
unsigned int event_coal_div = 1;
|
unsigned int event_coal_div = 1;
|
||||||
module_param(event_coal_div, int, 0664);
|
module_param(event_coal_div, int, 0664);
|
||||||
MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
|
MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
|
||||||
|
@ -83,8 +87,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
|
||||||
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
|
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
|
||||||
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
|
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
|
||||||
} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
|
} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
|
||||||
hba->pci_did == PCI_DEVICE_ID_NX2_57711)
|
hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
|
||||||
|
hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
|
||||||
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
|
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
|
||||||
|
else
|
||||||
|
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
|
||||||
|
hba->pci_did);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -363,7 +371,7 @@ static int __init bnx2i_mod_init(void)
|
||||||
|
|
||||||
printk(KERN_INFO "%s", version);
|
printk(KERN_INFO "%s", version);
|
||||||
|
|
||||||
if (!is_power_of_2(sq_size))
|
if (sq_size && !is_power_of_2(sq_size))
|
||||||
sq_size = roundup_pow_of_two(sq_size);
|
sq_size = roundup_pow_of_two(sq_size);
|
||||||
|
|
||||||
mutex_init(&bnx2i_dev_lock);
|
mutex_init(&bnx2i_dev_lock);
|
||||||
|
|
|
@ -485,7 +485,6 @@ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
|
||||||
struct iscsi_task *task = session->cmds[i];
|
struct iscsi_task *task = session->cmds[i];
|
||||||
struct bnx2i_cmd *cmd = task->dd_data;
|
struct bnx2i_cmd *cmd = task->dd_data;
|
||||||
|
|
||||||
/* Anil */
|
|
||||||
task->hdr = &cmd->hdr;
|
task->hdr = &cmd->hdr;
|
||||||
task->hdr_max = sizeof(struct iscsi_hdr);
|
task->hdr_max = sizeof(struct iscsi_hdr);
|
||||||
|
|
||||||
|
@ -765,7 +764,6 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
|
||||||
hba->pci_svid = hba->pcidev->subsystem_vendor;
|
hba->pci_svid = hba->pcidev->subsystem_vendor;
|
||||||
hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
|
hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
|
||||||
hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
|
hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
|
||||||
bnx2i_identify_device(hba);
|
|
||||||
|
|
||||||
bnx2i_identify_device(hba);
|
bnx2i_identify_device(hba);
|
||||||
bnx2i_setup_host_queue_size(hba, shost);
|
bnx2i_setup_host_queue_size(hba, shost);
|
||||||
|
|
|
@ -291,7 +291,7 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
|
||||||
c3cn_hold(c3cn);
|
c3cn_hold(c3cn);
|
||||||
spin_lock_bh(&c3cn->lock);
|
spin_lock_bh(&c3cn->lock);
|
||||||
if (c3cn->state == C3CN_STATE_CONNECTING)
|
if (c3cn->state == C3CN_STATE_CONNECTING)
|
||||||
fail_act_open(c3cn, EHOSTUNREACH);
|
fail_act_open(c3cn, -EHOSTUNREACH);
|
||||||
spin_unlock_bh(&c3cn->lock);
|
spin_unlock_bh(&c3cn->lock);
|
||||||
c3cn_put(c3cn);
|
c3cn_put(c3cn);
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
|
@ -792,18 +792,18 @@ static int act_open_rpl_status_to_errno(int status)
|
||||||
{
|
{
|
||||||
switch (status) {
|
switch (status) {
|
||||||
case CPL_ERR_CONN_RESET:
|
case CPL_ERR_CONN_RESET:
|
||||||
return ECONNREFUSED;
|
return -ECONNREFUSED;
|
||||||
case CPL_ERR_ARP_MISS:
|
case CPL_ERR_ARP_MISS:
|
||||||
return EHOSTUNREACH;
|
return -EHOSTUNREACH;
|
||||||
case CPL_ERR_CONN_TIMEDOUT:
|
case CPL_ERR_CONN_TIMEDOUT:
|
||||||
return ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
case CPL_ERR_TCAM_FULL:
|
case CPL_ERR_TCAM_FULL:
|
||||||
return ENOMEM;
|
return -ENOMEM;
|
||||||
case CPL_ERR_CONN_EXIST:
|
case CPL_ERR_CONN_EXIST:
|
||||||
cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
|
cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
|
||||||
return EADDRINUSE;
|
return -EADDRINUSE;
|
||||||
default:
|
default:
|
||||||
return EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -817,7 +817,7 @@ static void act_open_retry_timer(unsigned long data)
|
||||||
spin_lock_bh(&c3cn->lock);
|
spin_lock_bh(&c3cn->lock);
|
||||||
skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
|
skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
fail_act_open(c3cn, ENOMEM);
|
fail_act_open(c3cn, -ENOMEM);
|
||||||
else {
|
else {
|
||||||
skb->sk = (struct sock *)c3cn;
|
skb->sk = (struct sock *)c3cn;
|
||||||
set_arp_failure_handler(skb, act_open_req_arp_failure);
|
set_arp_failure_handler(skb, act_open_req_arp_failure);
|
||||||
|
@ -966,14 +966,14 @@ static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
|
||||||
case CPL_ERR_BAD_SYN: /* fall through */
|
case CPL_ERR_BAD_SYN: /* fall through */
|
||||||
case CPL_ERR_CONN_RESET:
|
case CPL_ERR_CONN_RESET:
|
||||||
return c3cn->state > C3CN_STATE_ESTABLISHED ?
|
return c3cn->state > C3CN_STATE_ESTABLISHED ?
|
||||||
EPIPE : ECONNRESET;
|
-EPIPE : -ECONNRESET;
|
||||||
case CPL_ERR_XMIT_TIMEDOUT:
|
case CPL_ERR_XMIT_TIMEDOUT:
|
||||||
case CPL_ERR_PERSIST_TIMEDOUT:
|
case CPL_ERR_PERSIST_TIMEDOUT:
|
||||||
case CPL_ERR_FINWAIT2_TIMEDOUT:
|
case CPL_ERR_FINWAIT2_TIMEDOUT:
|
||||||
case CPL_ERR_KEEPALIVE_TIMEDOUT:
|
case CPL_ERR_KEEPALIVE_TIMEDOUT:
|
||||||
return ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
default:
|
default:
|
||||||
return EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1563,7 +1563,7 @@ free_tid:
|
||||||
s3_free_atid(cdev, c3cn->tid);
|
s3_free_atid(cdev, c3cn->tid);
|
||||||
c3cn->tid = 0;
|
c3cn->tid = 0;
|
||||||
out_err:
|
out_err:
|
||||||
return -1;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -388,8 +388,8 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
|
||||||
if (err > 0) {
|
if (err > 0) {
|
||||||
int pdulen = err;
|
int pdulen = err;
|
||||||
|
|
||||||
cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
|
cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
|
||||||
task, skb, skb->len, skb->data_len, err);
|
task, skb, skb->len, skb->data_len, err);
|
||||||
|
|
||||||
if (task->conn->hdrdgst_en)
|
if (task->conn->hdrdgst_en)
|
||||||
pdulen += ISCSI_DIGEST_SIZE;
|
pdulen += ISCSI_DIGEST_SIZE;
|
||||||
|
|
|
@ -748,6 +748,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
|
||||||
{"IBM", "1724"},
|
{"IBM", "1724"},
|
||||||
{"IBM", "1726"},
|
{"IBM", "1726"},
|
||||||
{"IBM", "1742"},
|
{"IBM", "1742"},
|
||||||
|
{"IBM", "1745"},
|
||||||
|
{"IBM", "1746"},
|
||||||
{"IBM", "1814"},
|
{"IBM", "1814"},
|
||||||
{"IBM", "1815"},
|
{"IBM", "1815"},
|
||||||
{"IBM", "1818"},
|
{"IBM", "1818"},
|
||||||
|
|
|
@ -101,6 +101,8 @@ static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
|
||||||
|
|
||||||
static int fcoe_create(const char *, struct kernel_param *);
|
static int fcoe_create(const char *, struct kernel_param *);
|
||||||
static int fcoe_destroy(const char *, struct kernel_param *);
|
static int fcoe_destroy(const char *, struct kernel_param *);
|
||||||
|
static int fcoe_enable(const char *, struct kernel_param *);
|
||||||
|
static int fcoe_disable(const char *, struct kernel_param *);
|
||||||
|
|
||||||
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
|
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
|
||||||
u32 did, struct fc_frame *,
|
u32 did, struct fc_frame *,
|
||||||
|
@ -115,10 +117,16 @@ static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
|
||||||
|
|
||||||
module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
|
module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
|
||||||
__MODULE_PARM_TYPE(create, "string");
|
__MODULE_PARM_TYPE(create, "string");
|
||||||
MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
|
MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
|
||||||
module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
|
module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
|
||||||
__MODULE_PARM_TYPE(destroy, "string");
|
__MODULE_PARM_TYPE(destroy, "string");
|
||||||
MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
|
MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
|
||||||
|
module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
|
||||||
|
__MODULE_PARM_TYPE(enable, "string");
|
||||||
|
MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
|
||||||
|
module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
|
||||||
|
__MODULE_PARM_TYPE(disable, "string");
|
||||||
|
MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
|
||||||
|
|
||||||
/* notification function for packets from net device */
|
/* notification function for packets from net device */
|
||||||
static struct notifier_block fcoe_notifier = {
|
static struct notifier_block fcoe_notifier = {
|
||||||
|
@ -544,6 +552,23 @@ static void fcoe_queue_timer(ulong lport)
|
||||||
fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
|
fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* fcoe_get_wwn() - Get the world wide name from LLD if it supports it
|
||||||
|
* @netdev: the associated net device
|
||||||
|
* @wwn: the output WWN
|
||||||
|
* @type: the type of WWN (WWPN or WWNN)
|
||||||
|
*
|
||||||
|
* Returns: 0 for success
|
||||||
|
*/
|
||||||
|
static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
|
||||||
|
{
|
||||||
|
const struct net_device_ops *ops = netdev->netdev_ops;
|
||||||
|
|
||||||
|
if (ops->ndo_fcoe_get_wwn)
|
||||||
|
return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fcoe_netdev_config() - Set up net devive for SW FCoE
|
* fcoe_netdev_config() - Set up net devive for SW FCoE
|
||||||
* @lport: The local port that is associated with the net device
|
* @lport: The local port that is associated with the net device
|
||||||
|
@ -611,9 +636,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
|
||||||
*/
|
*/
|
||||||
if (netdev->priv_flags & IFF_802_1Q_VLAN)
|
if (netdev->priv_flags & IFF_802_1Q_VLAN)
|
||||||
vid = vlan_dev_vlan_id(netdev);
|
vid = vlan_dev_vlan_id(netdev);
|
||||||
wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
|
|
||||||
|
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
|
||||||
|
wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
|
||||||
fc_set_wwnn(lport, wwnn);
|
fc_set_wwnn(lport, wwnn);
|
||||||
wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid);
|
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
|
||||||
|
wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
|
||||||
|
2, vid);
|
||||||
fc_set_wwpn(lport, wwpn);
|
fc_set_wwpn(lport, wwpn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1837,6 +1866,104 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* fcoe_disable() - Disables a FCoE interface
|
||||||
|
* @buffer: The name of the Ethernet interface to be disabled
|
||||||
|
* @kp: The associated kernel parameter
|
||||||
|
*
|
||||||
|
* Called from sysfs.
|
||||||
|
*
|
||||||
|
* Returns: 0 for success
|
||||||
|
*/
|
||||||
|
static int fcoe_disable(const char *buffer, struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
struct fcoe_interface *fcoe;
|
||||||
|
struct net_device *netdev;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
mutex_lock(&fcoe_config_mutex);
|
||||||
|
#ifdef CONFIG_FCOE_MODULE
|
||||||
|
/*
|
||||||
|
* Make sure the module has been initialized, and is not about to be
|
||||||
|
* removed. Module paramter sysfs files are writable before the
|
||||||
|
* module_init function is called and after module_exit.
|
||||||
|
*/
|
||||||
|
if (THIS_MODULE->state != MODULE_STATE_LIVE) {
|
||||||
|
rc = -ENODEV;
|
||||||
|
goto out_nodev;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
netdev = fcoe_if_to_netdev(buffer);
|
||||||
|
if (!netdev) {
|
||||||
|
rc = -ENODEV;
|
||||||
|
goto out_nodev;
|
||||||
|
}
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
fcoe = fcoe_hostlist_lookup_port(netdev);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
|
if (fcoe)
|
||||||
|
fc_fabric_logoff(fcoe->ctlr.lp);
|
||||||
|
else
|
||||||
|
rc = -ENODEV;
|
||||||
|
|
||||||
|
dev_put(netdev);
|
||||||
|
out_nodev:
|
||||||
|
mutex_unlock(&fcoe_config_mutex);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* fcoe_enable() - Enables a FCoE interface
|
||||||
|
* @buffer: The name of the Ethernet interface to be enabled
|
||||||
|
* @kp: The associated kernel parameter
|
||||||
|
*
|
||||||
|
* Called from sysfs.
|
||||||
|
*
|
||||||
|
* Returns: 0 for success
|
||||||
|
*/
|
||||||
|
static int fcoe_enable(const char *buffer, struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
struct fcoe_interface *fcoe;
|
||||||
|
struct net_device *netdev;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
mutex_lock(&fcoe_config_mutex);
|
||||||
|
#ifdef CONFIG_FCOE_MODULE
|
||||||
|
/*
|
||||||
|
* Make sure the module has been initialized, and is not about to be
|
||||||
|
* removed. Module paramter sysfs files are writable before the
|
||||||
|
* module_init function is called and after module_exit.
|
||||||
|
*/
|
||||||
|
if (THIS_MODULE->state != MODULE_STATE_LIVE) {
|
||||||
|
rc = -ENODEV;
|
||||||
|
goto out_nodev;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
netdev = fcoe_if_to_netdev(buffer);
|
||||||
|
if (!netdev) {
|
||||||
|
rc = -ENODEV;
|
||||||
|
goto out_nodev;
|
||||||
|
}
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
fcoe = fcoe_hostlist_lookup_port(netdev);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
|
if (fcoe)
|
||||||
|
rc = fc_fabric_login(fcoe->ctlr.lp);
|
||||||
|
else
|
||||||
|
rc = -ENODEV;
|
||||||
|
|
||||||
|
dev_put(netdev);
|
||||||
|
out_nodev:
|
||||||
|
mutex_unlock(&fcoe_config_mutex);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fcoe_destroy() - Destroy a FCoE interface
|
* fcoe_destroy() - Destroy a FCoE interface
|
||||||
* @buffer: The name of the Ethernet interface to be destroyed
|
* @buffer: The name of the Ethernet interface to be destroyed
|
||||||
|
|
3531
drivers/scsi/hpsa.c
Normal file
3531
drivers/scsi/hpsa.c
Normal file
File diff suppressed because it is too large
Load diff
273
drivers/scsi/hpsa.h
Normal file
273
drivers/scsi/hpsa.h
Normal file
|
@ -0,0 +1,273 @@
|
||||||
|
/*
|
||||||
|
* Disk Array driver for HP Smart Array SAS controllers
|
||||||
|
* Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation; version 2 of the License.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||||
|
* NON INFRINGEMENT. See the GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||||
|
*
|
||||||
|
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef HPSA_H
|
||||||
|
#define HPSA_H
|
||||||
|
|
||||||
|
#include <scsi/scsicam.h>
|
||||||
|
|
||||||
|
#define IO_OK 0
|
||||||
|
#define IO_ERROR 1
|
||||||
|
|
||||||
|
struct ctlr_info;
|
||||||
|
|
||||||
|
struct access_method {
|
||||||
|
void (*submit_command)(struct ctlr_info *h,
|
||||||
|
struct CommandList *c);
|
||||||
|
void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
|
||||||
|
unsigned long (*fifo_full)(struct ctlr_info *h);
|
||||||
|
unsigned long (*intr_pending)(struct ctlr_info *h);
|
||||||
|
unsigned long (*command_completed)(struct ctlr_info *h);
|
||||||
|
};
|
||||||
|
|
||||||
|
struct hpsa_scsi_dev_t {
|
||||||
|
int devtype;
|
||||||
|
int bus, target, lun; /* as presented to the OS */
|
||||||
|
unsigned char scsi3addr[8]; /* as presented to the HW */
|
||||||
|
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
|
||||||
|
unsigned char device_id[16]; /* from inquiry pg. 0x83 */
|
||||||
|
unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
|
||||||
|
unsigned char model[16]; /* bytes 16-31 of inquiry data */
|
||||||
|
unsigned char revision[4]; /* bytes 32-35 of inquiry data */
|
||||||
|
unsigned char raid_level; /* from inquiry page 0xC1 */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ctlr_info {
|
||||||
|
int ctlr;
|
||||||
|
char devname[8];
|
||||||
|
char *product_name;
|
||||||
|
char firm_ver[4]; /* Firmware version */
|
||||||
|
struct pci_dev *pdev;
|
||||||
|
__u32 board_id;
|
||||||
|
void __iomem *vaddr;
|
||||||
|
unsigned long paddr;
|
||||||
|
int nr_cmds; /* Number of commands allowed on this controller */
|
||||||
|
struct CfgTable __iomem *cfgtable;
|
||||||
|
int interrupts_enabled;
|
||||||
|
int major;
|
||||||
|
int max_commands;
|
||||||
|
int commands_outstanding;
|
||||||
|
int max_outstanding; /* Debug */
|
||||||
|
int usage_count; /* number of opens all all minor devices */
|
||||||
|
# define DOORBELL_INT 0
|
||||||
|
# define PERF_MODE_INT 1
|
||||||
|
# define SIMPLE_MODE_INT 2
|
||||||
|
# define MEMQ_MODE_INT 3
|
||||||
|
unsigned int intr[4];
|
||||||
|
unsigned int msix_vector;
|
||||||
|
unsigned int msi_vector;
|
||||||
|
struct access_method access;
|
||||||
|
|
||||||
|
/* queue and queue Info */
|
||||||
|
struct hlist_head reqQ;
|
||||||
|
struct hlist_head cmpQ;
|
||||||
|
unsigned int Qdepth;
|
||||||
|
unsigned int maxQsinceinit;
|
||||||
|
unsigned int maxSG;
|
||||||
|
spinlock_t lock;
|
||||||
|
|
||||||
|
/* pointers to command and error info pool */
|
||||||
|
struct CommandList *cmd_pool;
|
||||||
|
dma_addr_t cmd_pool_dhandle;
|
||||||
|
struct ErrorInfo *errinfo_pool;
|
||||||
|
dma_addr_t errinfo_pool_dhandle;
|
||||||
|
unsigned long *cmd_pool_bits;
|
||||||
|
int nr_allocs;
|
||||||
|
int nr_frees;
|
||||||
|
int busy_initializing;
|
||||||
|
int busy_scanning;
|
||||||
|
struct mutex busy_shutting_down;
|
||||||
|
struct list_head scan_list;
|
||||||
|
struct completion scan_wait;
|
||||||
|
|
||||||
|
struct Scsi_Host *scsi_host;
|
||||||
|
spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
|
||||||
|
int ndevices; /* number of used elements in .dev[] array. */
|
||||||
|
#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
|
||||||
|
struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
|
||||||
|
};
|
||||||
|
#define HPSA_ABORT_MSG 0
|
||||||
|
#define HPSA_DEVICE_RESET_MSG 1
|
||||||
|
#define HPSA_BUS_RESET_MSG 2
|
||||||
|
#define HPSA_HOST_RESET_MSG 3
|
||||||
|
#define HPSA_MSG_SEND_RETRY_LIMIT 10
|
||||||
|
#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
|
||||||
|
|
||||||
|
/* Maximum time in seconds driver will wait for command completions
|
||||||
|
* when polling before giving up.
|
||||||
|
*/
|
||||||
|
#define HPSA_MAX_POLL_TIME_SECS (20)
|
||||||
|
|
||||||
|
/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
|
||||||
|
* how many times to retry TEST UNIT READY on a device
|
||||||
|
* while waiting for it to become ready before giving up.
|
||||||
|
* HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
|
||||||
|
* between sending TURs while waiting for a device
|
||||||
|
* to become ready.
|
||||||
|
*/
|
||||||
|
#define HPSA_TUR_RETRY_LIMIT (20)
|
||||||
|
#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
|
||||||
|
|
||||||
|
/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
|
||||||
|
* to become ready, in seconds, before giving up on it.
|
||||||
|
* HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
|
||||||
|
* between polling the board to see if it is ready, in
|
||||||
|
* milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
|
||||||
|
* HPSA_BOARD_READY_ITERATIONS are derived from those.
|
||||||
|
*/
|
||||||
|
#define HPSA_BOARD_READY_WAIT_SECS (120)
|
||||||
|
#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
|
||||||
|
#define HPSA_BOARD_READY_POLL_INTERVAL \
|
||||||
|
((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
|
||||||
|
#define HPSA_BOARD_READY_ITERATIONS \
|
||||||
|
((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
|
||||||
|
HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
|
||||||
|
#define HPSA_POST_RESET_PAUSE_MSECS (3000)
|
||||||
|
#define HPSA_POST_RESET_NOOP_RETRIES (12)
|
||||||
|
|
||||||
|
/* Defining the diffent access_menthods */
|
||||||
|
/*
|
||||||
|
* Memory mapped FIFO interface (SMART 53xx cards)
|
||||||
|
*/
|
||||||
|
#define SA5_DOORBELL 0x20
|
||||||
|
#define SA5_REQUEST_PORT_OFFSET 0x40
|
||||||
|
#define SA5_REPLY_INTR_MASK_OFFSET 0x34
|
||||||
|
#define SA5_REPLY_PORT_OFFSET 0x44
|
||||||
|
#define SA5_INTR_STATUS 0x30
|
||||||
|
#define SA5_SCRATCHPAD_OFFSET 0xB0
|
||||||
|
|
||||||
|
#define SA5_CTCFG_OFFSET 0xB4
|
||||||
|
#define SA5_CTMEM_OFFSET 0xB8
|
||||||
|
|
||||||
|
#define SA5_INTR_OFF 0x08
|
||||||
|
#define SA5B_INTR_OFF 0x04
|
||||||
|
#define SA5_INTR_PENDING 0x08
|
||||||
|
#define SA5B_INTR_PENDING 0x04
|
||||||
|
#define FIFO_EMPTY 0xffffffff
|
||||||
|
#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
|
||||||
|
|
||||||
|
#define HPSA_ERROR_BIT 0x02
|
||||||
|
#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
|
||||||
|
#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
|
||||||
|
#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
|
||||||
|
|
||||||
|
#define HPSA_INTR_ON 1
|
||||||
|
#define HPSA_INTR_OFF 0
|
||||||
|
/*
|
||||||
|
Send the command to the hardware
|
||||||
|
*/
|
||||||
|
static void SA5_submit_command(struct ctlr_info *h,
|
||||||
|
struct CommandList *c)
|
||||||
|
{
|
||||||
|
#ifdef HPSA_DEBUG
|
||||||
|
printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
|
||||||
|
c->busaddr);
|
||||||
|
#endif /* HPSA_DEBUG */
|
||||||
|
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
||||||
|
h->commands_outstanding++;
|
||||||
|
if (h->commands_outstanding > h->max_outstanding)
|
||||||
|
h->max_outstanding = h->commands_outstanding;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This card is the opposite of the other cards.
|
||||||
|
* 0 turns interrupts on...
|
||||||
|
* 0x08 turns them off...
|
||||||
|
*/
|
||||||
|
static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
|
||||||
|
{
|
||||||
|
if (val) { /* Turn interrupts on */
|
||||||
|
h->interrupts_enabled = 1;
|
||||||
|
writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
|
||||||
|
} else { /* Turn them off */
|
||||||
|
h->interrupts_enabled = 0;
|
||||||
|
writel(SA5_INTR_OFF,
|
||||||
|
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Returns true if fifo is full.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static unsigned long SA5_fifo_full(struct ctlr_info *h)
|
||||||
|
{
|
||||||
|
if (h->commands_outstanding >= h->max_commands)
|
||||||
|
return 1;
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* returns value read from hardware.
|
||||||
|
* returns FIFO_EMPTY if there is nothing to read
|
||||||
|
*/
|
||||||
|
static unsigned long SA5_completed(struct ctlr_info *h)
|
||||||
|
{
|
||||||
|
unsigned long register_value
|
||||||
|
= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
|
||||||
|
|
||||||
|
if (register_value != FIFO_EMPTY)
|
||||||
|
h->commands_outstanding--;
|
||||||
|
|
||||||
|
#ifdef HPSA_DEBUG
|
||||||
|
if (register_value != FIFO_EMPTY)
|
||||||
|
printk(KERN_INFO "hpsa: Read %lx back from board\n",
|
||||||
|
register_value);
|
||||||
|
else
|
||||||
|
printk(KERN_INFO "hpsa: FIFO Empty read\n");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return register_value;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Returns true if an interrupt is pending..
|
||||||
|
*/
|
||||||
|
static unsigned long SA5_intr_pending(struct ctlr_info *h)
|
||||||
|
{
|
||||||
|
unsigned long register_value =
|
||||||
|
readl(h->vaddr + SA5_INTR_STATUS);
|
||||||
|
#ifdef HPSA_DEBUG
|
||||||
|
printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
|
||||||
|
#endif /* HPSA_DEBUG */
|
||||||
|
if (register_value & SA5_INTR_PENDING)
|
||||||
|
return 1;
|
||||||
|
return 0 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static struct access_method SA5_access = {
|
||||||
|
SA5_submit_command,
|
||||||
|
SA5_intr_mask,
|
||||||
|
SA5_fifo_full,
|
||||||
|
SA5_intr_pending,
|
||||||
|
SA5_completed,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct board_type {
|
||||||
|
__u32 board_id;
|
||||||
|
char *product_name;
|
||||||
|
struct access_method *access;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* end of old hpsa_scsi.h file */
|
||||||
|
|
||||||
|
#endif /* HPSA_H */
|
||||||
|
|
326
drivers/scsi/hpsa_cmd.h
Normal file
326
drivers/scsi/hpsa_cmd.h
Normal file
|
@ -0,0 +1,326 @@
|
||||||
|
/*
|
||||||
|
* Disk Array driver for HP Smart Array SAS controllers
|
||||||
|
* Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation; version 2 of the License.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||||
|
* NON INFRINGEMENT. See the GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||||
|
*
|
||||||
|
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef HPSA_CMD_H
|
||||||
|
#define HPSA_CMD_H
|
||||||
|
|
||||||
|
/* general boundary defintions */
|
||||||
|
#define SENSEINFOBYTES 32 /* may vary between hbas */
|
||||||
|
#define MAXSGENTRIES 31
|
||||||
|
#define MAXREPLYQS 256
|
||||||
|
|
||||||
|
/* Command Status value */
|
||||||
|
#define CMD_SUCCESS 0x0000
|
||||||
|
#define CMD_TARGET_STATUS 0x0001
|
||||||
|
#define CMD_DATA_UNDERRUN 0x0002
|
||||||
|
#define CMD_DATA_OVERRUN 0x0003
|
||||||
|
#define CMD_INVALID 0x0004
|
||||||
|
#define CMD_PROTOCOL_ERR 0x0005
|
||||||
|
#define CMD_HARDWARE_ERR 0x0006
|
||||||
|
#define CMD_CONNECTION_LOST 0x0007
|
||||||
|
#define CMD_ABORTED 0x0008
|
||||||
|
#define CMD_ABORT_FAILED 0x0009
|
||||||
|
#define CMD_UNSOLICITED_ABORT 0x000A
|
||||||
|
#define CMD_TIMEOUT 0x000B
|
||||||
|
#define CMD_UNABORTABLE 0x000C
|
||||||
|
|
||||||
|
/* Unit Attentions ASC's as defined for the MSA2012sa */
|
||||||
|
#define POWER_OR_RESET 0x29
|
||||||
|
#define STATE_CHANGED 0x2a
|
||||||
|
#define UNIT_ATTENTION_CLEARED 0x2f
|
||||||
|
#define LUN_FAILED 0x3e
|
||||||
|
#define REPORT_LUNS_CHANGED 0x3f
|
||||||
|
|
||||||
|
/* Unit Attentions ASCQ's as defined for the MSA2012sa */
|
||||||
|
|
||||||
|
/* These ASCQ's defined for ASC = POWER_OR_RESET */
|
||||||
|
#define POWER_ON_RESET 0x00
|
||||||
|
#define POWER_ON_REBOOT 0x01
|
||||||
|
#define SCSI_BUS_RESET 0x02
|
||||||
|
#define MSA_TARGET_RESET 0x03
|
||||||
|
#define CONTROLLER_FAILOVER 0x04
|
||||||
|
#define TRANSCEIVER_SE 0x05
|
||||||
|
#define TRANSCEIVER_LVD 0x06
|
||||||
|
|
||||||
|
/* These ASCQ's defined for ASC = STATE_CHANGED */
|
||||||
|
#define RESERVATION_PREEMPTED 0x03
|
||||||
|
#define ASYM_ACCESS_CHANGED 0x06
|
||||||
|
#define LUN_CAPACITY_CHANGED 0x09
|
||||||
|
|
||||||
|
/* transfer direction */
|
||||||
|
#define XFER_NONE 0x00
|
||||||
|
#define XFER_WRITE 0x01
|
||||||
|
#define XFER_READ 0x02
|
||||||
|
#define XFER_RSVD 0x03
|
||||||
|
|
||||||
|
/* task attribute */
|
||||||
|
#define ATTR_UNTAGGED 0x00
|
||||||
|
#define ATTR_SIMPLE 0x04
|
||||||
|
#define ATTR_HEADOFQUEUE 0x05
|
||||||
|
#define ATTR_ORDERED 0x06
|
||||||
|
#define ATTR_ACA 0x07
|
||||||
|
|
||||||
|
/* cdb type */
|
||||||
|
#define TYPE_CMD 0x00
|
||||||
|
#define TYPE_MSG 0x01
|
||||||
|
|
||||||
|
/* config space register offsets */
|
||||||
|
#define CFG_VENDORID 0x00
|
||||||
|
#define CFG_DEVICEID 0x02
|
||||||
|
#define CFG_I2OBAR 0x10
|
||||||
|
#define CFG_MEM1BAR 0x14
|
||||||
|
|
||||||
|
/* i2o space register offsets */
|
||||||
|
#define I2O_IBDB_SET 0x20
|
||||||
|
#define I2O_IBDB_CLEAR 0x70
|
||||||
|
#define I2O_INT_STATUS 0x30
|
||||||
|
#define I2O_INT_MASK 0x34
|
||||||
|
#define I2O_IBPOST_Q 0x40
|
||||||
|
#define I2O_OBPOST_Q 0x44
|
||||||
|
#define I2O_DMA1_CFG 0x214
|
||||||
|
|
||||||
|
/* Configuration Table */
|
||||||
|
#define CFGTBL_ChangeReq 0x00000001l
|
||||||
|
#define CFGTBL_AccCmds 0x00000001l
|
||||||
|
|
||||||
|
#define CFGTBL_Trans_Simple 0x00000002l
|
||||||
|
|
||||||
|
#define CFGTBL_BusType_Ultra2 0x00000001l
|
||||||
|
#define CFGTBL_BusType_Ultra3 0x00000002l
|
||||||
|
#define CFGTBL_BusType_Fibre1G 0x00000100l
|
||||||
|
#define CFGTBL_BusType_Fibre2G 0x00000200l
|
||||||
|
struct vals32 {
|
||||||
|
__u32 lower;
|
||||||
|
__u32 upper;
|
||||||
|
};
|
||||||
|
|
||||||
|
union u64bit {
|
||||||
|
struct vals32 val32;
|
||||||
|
__u64 val;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* FIXME this is a per controller value (barf!) */
|
||||||
|
#define HPSA_MAX_TARGETS_PER_CTLR 16
|
||||||
|
#define HPSA_MAX_LUN 256
|
||||||
|
#define HPSA_MAX_PHYS_LUN 1024
|
||||||
|
|
||||||
|
/* SCSI-3 Commands */
|
||||||
|
#pragma pack(1)
|
||||||
|
|
||||||
|
#define HPSA_INQUIRY 0x12
|
||||||
|
struct InquiryData {
|
||||||
|
__u8 data_byte[36];
|
||||||
|
};
|
||||||
|
|
||||||
|
#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
|
||||||
|
#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
|
||||||
|
struct ReportLUNdata {
|
||||||
|
__u8 LUNListLength[4];
|
||||||
|
__u32 reserved;
|
||||||
|
__u8 LUN[HPSA_MAX_LUN][8];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ReportExtendedLUNdata {
|
||||||
|
__u8 LUNListLength[4];
|
||||||
|
__u8 extended_response_flag;
|
||||||
|
__u8 reserved[3];
|
||||||
|
__u8 LUN[HPSA_MAX_LUN][24];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct SenseSubsystem_info {
|
||||||
|
__u8 reserved[36];
|
||||||
|
__u8 portname[8];
|
||||||
|
__u8 reserved1[1108];
|
||||||
|
};
|
||||||
|
|
||||||
|
#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
|
||||||
|
struct ReadCapdata {
|
||||||
|
__u8 total_size[4]; /* Total size in blocks */
|
||||||
|
__u8 block_size[4]; /* Size of blocks in bytes */
|
||||||
|
};
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/* 12 byte commands not implemented in firmware yet. */
|
||||||
|
#define HPSA_READ 0xa8
|
||||||
|
#define HPSA_WRITE 0xaa
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define HPSA_READ 0x28 /* Read(10) */
|
||||||
|
#define HPSA_WRITE 0x2a /* Write(10) */
|
||||||
|
|
||||||
|
/* BMIC commands */
|
||||||
|
#define BMIC_READ 0x26
|
||||||
|
#define BMIC_WRITE 0x27
|
||||||
|
#define BMIC_CACHE_FLUSH 0xc2
|
||||||
|
#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
|
||||||
|
|
||||||
|
/* Command List Structure */
|
||||||
|
union SCSI3Addr {
|
||||||
|
struct {
|
||||||
|
__u8 Dev;
|
||||||
|
__u8 Bus:6;
|
||||||
|
__u8 Mode:2; /* b00 */
|
||||||
|
} PeripDev;
|
||||||
|
struct {
|
||||||
|
__u8 DevLSB;
|
||||||
|
__u8 DevMSB:6;
|
||||||
|
__u8 Mode:2; /* b01 */
|
||||||
|
} LogDev;
|
||||||
|
struct {
|
||||||
|
__u8 Dev:5;
|
||||||
|
__u8 Bus:3;
|
||||||
|
__u8 Targ:6;
|
||||||
|
__u8 Mode:2; /* b10 */
|
||||||
|
} LogUnit;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct PhysDevAddr {
|
||||||
|
__u32 TargetId:24;
|
||||||
|
__u32 Bus:6;
|
||||||
|
__u32 Mode:2;
|
||||||
|
/* 2 level target device addr */
|
||||||
|
union SCSI3Addr Target[2];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct LogDevAddr {
|
||||||
|
__u32 VolId:30;
|
||||||
|
__u32 Mode:2;
|
||||||
|
__u8 reserved[4];
|
||||||
|
};
|
||||||
|
|
||||||
|
union LUNAddr {
|
||||||
|
__u8 LunAddrBytes[8];
|
||||||
|
union SCSI3Addr SCSI3Lun[4];
|
||||||
|
struct PhysDevAddr PhysDev;
|
||||||
|
struct LogDevAddr LogDev;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct CommandListHeader {
|
||||||
|
__u8 ReplyQueue;
|
||||||
|
__u8 SGList;
|
||||||
|
__u16 SGTotal;
|
||||||
|
struct vals32 Tag;
|
||||||
|
union LUNAddr LUN;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct RequestBlock {
|
||||||
|
__u8 CDBLen;
|
||||||
|
struct {
|
||||||
|
__u8 Type:3;
|
||||||
|
__u8 Attribute:3;
|
||||||
|
__u8 Direction:2;
|
||||||
|
} Type;
|
||||||
|
__u16 Timeout;
|
||||||
|
__u8 CDB[16];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ErrDescriptor {
|
||||||
|
struct vals32 Addr;
|
||||||
|
__u32 Len;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct SGDescriptor {
|
||||||
|
struct vals32 Addr;
|
||||||
|
__u32 Len;
|
||||||
|
__u32 Ext;
|
||||||
|
};
|
||||||
|
|
||||||
|
union MoreErrInfo {
|
||||||
|
struct {
|
||||||
|
__u8 Reserved[3];
|
||||||
|
__u8 Type;
|
||||||
|
__u32 ErrorInfo;
|
||||||
|
} Common_Info;
|
||||||
|
struct {
|
||||||
|
__u8 Reserved[2];
|
||||||
|
__u8 offense_size; /* size of offending entry */
|
||||||
|
__u8 offense_num; /* byte # of offense 0-base */
|
||||||
|
__u32 offense_value;
|
||||||
|
} Invalid_Cmd;
|
||||||
|
};
|
||||||
|
struct ErrorInfo {
|
||||||
|
__u8 ScsiStatus;
|
||||||
|
__u8 SenseLen;
|
||||||
|
__u16 CommandStatus;
|
||||||
|
__u32 ResidualCnt;
|
||||||
|
union MoreErrInfo MoreErrInfo;
|
||||||
|
__u8 SenseInfo[SENSEINFOBYTES];
|
||||||
|
};
|
||||||
|
/* Command types */
|
||||||
|
#define CMD_IOCTL_PEND 0x01
|
||||||
|
#define CMD_SCSI 0x03
|
||||||
|
|
||||||
|
struct ctlr_info; /* defined in hpsa.h */
|
||||||
|
/* The size of this structure needs to be divisible by 8
|
||||||
|
* od on all architectures, because the controller uses 2
|
||||||
|
* lower bits of the address, and the driver uses 1 lower
|
||||||
|
* bit (3 bits total.)
|
||||||
|
*/
|
||||||
|
struct CommandList {
|
||||||
|
struct CommandListHeader Header;
|
||||||
|
struct RequestBlock Request;
|
||||||
|
struct ErrDescriptor ErrDesc;
|
||||||
|
struct SGDescriptor SG[MAXSGENTRIES];
|
||||||
|
/* information associated with the command */
|
||||||
|
__u32 busaddr; /* physical addr of this record */
|
||||||
|
struct ErrorInfo *err_info; /* pointer to the allocated mem */
|
||||||
|
struct ctlr_info *h;
|
||||||
|
int cmd_type;
|
||||||
|
long cmdindex;
|
||||||
|
struct hlist_node list;
|
||||||
|
struct CommandList *prev;
|
||||||
|
struct CommandList *next;
|
||||||
|
struct request *rq;
|
||||||
|
struct completion *waiting;
|
||||||
|
int retry_count;
|
||||||
|
void *scsi_cmd;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Configuration Table Structure */
|
||||||
|
struct HostWrite {
|
||||||
|
__u32 TransportRequest;
|
||||||
|
__u32 Reserved;
|
||||||
|
__u32 CoalIntDelay;
|
||||||
|
__u32 CoalIntCount;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct CfgTable {
|
||||||
|
__u8 Signature[4];
|
||||||
|
__u32 SpecValence;
|
||||||
|
__u32 TransportSupport;
|
||||||
|
__u32 TransportActive;
|
||||||
|
struct HostWrite HostWrite;
|
||||||
|
__u32 CmdsOutMax;
|
||||||
|
__u32 BusTypes;
|
||||||
|
__u32 Reserved;
|
||||||
|
__u8 ServerName[16];
|
||||||
|
__u32 HeartBeat;
|
||||||
|
__u32 SCSI_Prefetch;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct hpsa_pci_info {
|
||||||
|
unsigned char bus;
|
||||||
|
unsigned char dev_fn;
|
||||||
|
unsigned short domain;
|
||||||
|
__u32 board_id;
|
||||||
|
};
|
||||||
|
|
||||||
|
#pragma pack()
|
||||||
|
#endif /* HPSA_CMD_H */
|
|
@ -6521,6 +6521,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
ENTER;
|
ENTER;
|
||||||
|
ioa_cfg->pdev->state_saved = true;
|
||||||
rc = pci_restore_state(ioa_cfg->pdev);
|
rc = pci_restore_state(ioa_cfg->pdev);
|
||||||
|
|
||||||
if (rc != PCIBIOS_SUCCESSFUL) {
|
if (rc != PCIBIOS_SUCCESSFUL) {
|
||||||
|
|
|
@ -68,18 +68,20 @@ struct kmem_cache *scsi_pkt_cachep;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct fc_fcp_internal - FCP layer internal data
|
* struct fc_fcp_internal - FCP layer internal data
|
||||||
* @scsi_pkt_pool: Memory pool to draw FCP packets from
|
* @scsi_pkt_pool: Memory pool to draw FCP packets from
|
||||||
|
* @scsi_queue_lock: Protects the scsi_pkt_queue
|
||||||
* @scsi_pkt_queue: Current FCP packets
|
* @scsi_pkt_queue: Current FCP packets
|
||||||
* @last_can_queue_ramp_down_time: ramp down time
|
* @last_can_queue_ramp_down_time: ramp down time
|
||||||
* @last_can_queue_ramp_up_time: ramp up time
|
* @last_can_queue_ramp_up_time: ramp up time
|
||||||
* @max_can_queue: max can_queue size
|
* @max_can_queue: max can_queue size
|
||||||
*/
|
*/
|
||||||
struct fc_fcp_internal {
|
struct fc_fcp_internal {
|
||||||
mempool_t *scsi_pkt_pool;
|
mempool_t *scsi_pkt_pool;
|
||||||
struct list_head scsi_pkt_queue;
|
spinlock_t scsi_queue_lock;
|
||||||
unsigned long last_can_queue_ramp_down_time;
|
struct list_head scsi_pkt_queue;
|
||||||
unsigned long last_can_queue_ramp_up_time;
|
unsigned long last_can_queue_ramp_down_time;
|
||||||
int max_can_queue;
|
unsigned long last_can_queue_ramp_up_time;
|
||||||
|
int max_can_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
|
#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
|
||||||
|
@ -410,12 +412,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
fp = fc_frame_alloc(lport, len);
|
fp = fc_frame_alloc(lport, len);
|
||||||
if (!fp) {
|
if (likely(fp))
|
||||||
spin_lock_irqsave(lport->host->host_lock, flags);
|
return fp;
|
||||||
fc_fcp_can_queue_ramp_down(lport);
|
|
||||||
spin_unlock_irqrestore(lport->host->host_lock, flags);
|
/* error case */
|
||||||
}
|
spin_lock_irqsave(lport->host->host_lock, flags);
|
||||||
return fp;
|
fc_fcp_can_queue_ramp_down(lport);
|
||||||
|
spin_unlock_irqrestore(lport->host->host_lock, flags);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -990,7 +994,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
|
||||||
struct scsi_cmnd *sc_cmd;
|
struct scsi_cmnd *sc_cmd;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(lport->host->host_lock, flags);
|
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||||
restart:
|
restart:
|
||||||
list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
|
list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
|
||||||
sc_cmd = fsp->cmd;
|
sc_cmd = fsp->cmd;
|
||||||
|
@ -1001,7 +1005,7 @@ restart:
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
fc_fcp_pkt_hold(fsp);
|
fc_fcp_pkt_hold(fsp);
|
||||||
spin_unlock_irqrestore(lport->host->host_lock, flags);
|
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||||
|
|
||||||
if (!fc_fcp_lock_pkt(fsp)) {
|
if (!fc_fcp_lock_pkt(fsp)) {
|
||||||
fc_fcp_cleanup_cmd(fsp, error);
|
fc_fcp_cleanup_cmd(fsp, error);
|
||||||
|
@ -1010,14 +1014,14 @@ restart:
|
||||||
}
|
}
|
||||||
|
|
||||||
fc_fcp_pkt_release(fsp);
|
fc_fcp_pkt_release(fsp);
|
||||||
spin_lock_irqsave(lport->host->host_lock, flags);
|
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||||
/*
|
/*
|
||||||
* while we dropped the lock multiple pkts could
|
* while we dropped the lock multiple pkts could
|
||||||
* have been released, so we have to start over.
|
* have been released, so we have to start over.
|
||||||
*/
|
*/
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(lport->host->host_lock, flags);
|
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1035,11 +1039,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport)
|
||||||
* @fsp: The FCP packet to send
|
* @fsp: The FCP packet to send
|
||||||
*
|
*
|
||||||
* Return: Zero for success and -1 for failure
|
* Return: Zero for success and -1 for failure
|
||||||
* Locks: Called with the host lock and irqs disabled.
|
* Locks: Called without locks held
|
||||||
*/
|
*/
|
||||||
static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
|
static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
|
||||||
{
|
{
|
||||||
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
|
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
|
||||||
|
unsigned long flags;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
fsp->cmd->SCp.ptr = (char *)fsp;
|
fsp->cmd->SCp.ptr = (char *)fsp;
|
||||||
|
@ -1049,13 +1054,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
|
||||||
int_to_scsilun(fsp->cmd->device->lun,
|
int_to_scsilun(fsp->cmd->device->lun,
|
||||||
(struct scsi_lun *)fsp->cdb_cmd.fc_lun);
|
(struct scsi_lun *)fsp->cdb_cmd.fc_lun);
|
||||||
memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
|
memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
|
||||||
list_add_tail(&fsp->list, &si->scsi_pkt_queue);
|
|
||||||
|
|
||||||
spin_unlock_irq(lport->host->host_lock);
|
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||||
|
list_add_tail(&fsp->list, &si->scsi_pkt_queue);
|
||||||
|
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||||
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
|
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
|
||||||
spin_lock_irq(lport->host->host_lock);
|
if (unlikely(rc)) {
|
||||||
if (rc)
|
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||||
list_del(&fsp->list);
|
list_del(&fsp->list);
|
||||||
|
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -1752,6 +1760,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
|
||||||
struct fcoe_dev_stats *stats;
|
struct fcoe_dev_stats *stats;
|
||||||
|
|
||||||
lport = shost_priv(sc_cmd->device->host);
|
lport = shost_priv(sc_cmd->device->host);
|
||||||
|
spin_unlock_irq(lport->host->host_lock);
|
||||||
|
|
||||||
rval = fc_remote_port_chkready(rport);
|
rval = fc_remote_port_chkready(rport);
|
||||||
if (rval) {
|
if (rval) {
|
||||||
|
@ -1834,6 +1843,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
|
||||||
rc = SCSI_MLQUEUE_HOST_BUSY;
|
rc = SCSI_MLQUEUE_HOST_BUSY;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
spin_lock_irq(lport->host->host_lock);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(fc_queuecommand);
|
EXPORT_SYMBOL(fc_queuecommand);
|
||||||
|
@ -1864,11 +1874,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
|
||||||
|
|
||||||
lport = fsp->lp;
|
lport = fsp->lp;
|
||||||
si = fc_get_scsi_internal(lport);
|
si = fc_get_scsi_internal(lport);
|
||||||
spin_lock_irqsave(lport->host->host_lock, flags);
|
if (!fsp->cmd)
|
||||||
if (!fsp->cmd) {
|
|
||||||
spin_unlock_irqrestore(lport->host->host_lock, flags);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if can_queue ramp down is done then try can_queue ramp up
|
* if can_queue ramp down is done then try can_queue ramp up
|
||||||
|
@ -1880,10 +1887,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
|
||||||
sc_cmd = fsp->cmd;
|
sc_cmd = fsp->cmd;
|
||||||
fsp->cmd = NULL;
|
fsp->cmd = NULL;
|
||||||
|
|
||||||
if (!sc_cmd->SCp.ptr) {
|
if (!sc_cmd->SCp.ptr)
|
||||||
spin_unlock_irqrestore(lport->host->host_lock, flags);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
|
CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
|
||||||
switch (fsp->status_code) {
|
switch (fsp->status_code) {
|
||||||
|
@ -1945,10 +1950,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&si->scsi_queue_lock, flags);
|
||||||
list_del(&fsp->list);
|
list_del(&fsp->list);
|
||||||
|
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
|
||||||
sc_cmd->SCp.ptr = NULL;
|
sc_cmd->SCp.ptr = NULL;
|
||||||
sc_cmd->scsi_done(sc_cmd);
|
sc_cmd->scsi_done(sc_cmd);
|
||||||
spin_unlock_irqrestore(lport->host->host_lock, flags);
|
|
||||||
|
|
||||||
/* release ref from initial allocation in queue command */
|
/* release ref from initial allocation in queue command */
|
||||||
fc_fcp_pkt_release(fsp);
|
fc_fcp_pkt_release(fsp);
|
||||||
|
@ -2216,6 +2222,7 @@ int fc_fcp_init(struct fc_lport *lport)
|
||||||
lport->scsi_priv = si;
|
lport->scsi_priv = si;
|
||||||
si->max_can_queue = lport->host->can_queue;
|
si->max_can_queue = lport->host->can_queue;
|
||||||
INIT_LIST_HEAD(&si->scsi_pkt_queue);
|
INIT_LIST_HEAD(&si->scsi_pkt_queue);
|
||||||
|
spin_lock_init(&si->scsi_queue_lock);
|
||||||
|
|
||||||
si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
|
si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
|
||||||
if (!si->scsi_pkt_pool) {
|
if (!si->scsi_pkt_pool) {
|
||||||
|
|
|
@ -537,7 +537,9 @@ int fc_fabric_login(struct fc_lport *lport)
|
||||||
int rc = -1;
|
int rc = -1;
|
||||||
|
|
||||||
mutex_lock(&lport->lp_mutex);
|
mutex_lock(&lport->lp_mutex);
|
||||||
if (lport->state == LPORT_ST_DISABLED) {
|
if (lport->state == LPORT_ST_DISABLED ||
|
||||||
|
lport->state == LPORT_ST_LOGO) {
|
||||||
|
fc_lport_state_enter(lport, LPORT_ST_RESET);
|
||||||
fc_lport_enter_reset(lport);
|
fc_lport_enter_reset(lport);
|
||||||
rc = 0;
|
rc = 0;
|
||||||
}
|
}
|
||||||
|
@ -967,6 +969,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
|
||||||
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
|
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
|
||||||
fc_lport_state(lport));
|
fc_lport_state(lport));
|
||||||
|
|
||||||
|
if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
|
||||||
|
return;
|
||||||
|
|
||||||
if (lport->vport) {
|
if (lport->vport) {
|
||||||
if (lport->link_up)
|
if (lport->link_up)
|
||||||
fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
|
fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
|
||||||
|
|
|
@ -310,6 +310,7 @@ static void fc_rport_work(struct work_struct *work)
|
||||||
restart = 1;
|
restart = 1;
|
||||||
else
|
else
|
||||||
list_del(&rdata->peers);
|
list_del(&rdata->peers);
|
||||||
|
rdata->event = RPORT_EV_NONE;
|
||||||
mutex_unlock(&rdata->rp_mutex);
|
mutex_unlock(&rdata->rp_mutex);
|
||||||
mutex_unlock(&lport->disc.disc_mutex);
|
mutex_unlock(&lport->disc.disc_mutex);
|
||||||
}
|
}
|
||||||
|
|
|
@ -4506,9 +4506,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
|
||||||
pdev = phba->pcidev;
|
pdev = phba->pcidev;
|
||||||
|
|
||||||
/* Set the device DMA mask size */
|
/* Set the device DMA mask size */
|
||||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
|
||||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
|
|| pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
|
||||||
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
|
||||||
|
|| pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Get the bus address of Bar0 and Bar2 and the number of bytes
|
/* Get the bus address of Bar0 and Bar2 and the number of bytes
|
||||||
* required by each mapping.
|
* required by each mapping.
|
||||||
|
@ -6021,9 +6025,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||||
pdev = phba->pcidev;
|
pdev = phba->pcidev;
|
||||||
|
|
||||||
/* Set the device DMA mask size */
|
/* Set the device DMA mask size */
|
||||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
|
||||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
|
|| pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
|
||||||
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
|
||||||
|
|| pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
|
||||||
return error;
|
return error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
|
/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
|
||||||
* number of bytes required by each mapping. They are actually
|
* number of bytes required by each mapping. They are actually
|
||||||
|
|
|
@ -2501,7 +2501,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
|
||||||
instance->base_addr = pci_resource_start(instance->pdev, 0);
|
instance->base_addr = pci_resource_start(instance->pdev, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pci_request_regions(instance->pdev, "megasas: LSI")) {
|
if (pci_request_selected_regions(instance->pdev,
|
||||||
|
pci_select_bars(instance->pdev, IORESOURCE_MEM),
|
||||||
|
"megasas: LSI")) {
|
||||||
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
|
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
@ -2642,7 +2644,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
|
||||||
iounmap(instance->reg_set);
|
iounmap(instance->reg_set);
|
||||||
|
|
||||||
fail_ioremap:
|
fail_ioremap:
|
||||||
pci_release_regions(instance->pdev);
|
pci_release_selected_regions(instance->pdev,
|
||||||
|
pci_select_bars(instance->pdev, IORESOURCE_MEM));
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -2662,7 +2665,8 @@ static void megasas_release_mfi(struct megasas_instance *instance)
|
||||||
|
|
||||||
iounmap(instance->reg_set);
|
iounmap(instance->reg_set);
|
||||||
|
|
||||||
pci_release_regions(instance->pdev);
|
pci_release_selected_regions(instance->pdev,
|
||||||
|
pci_select_bars(instance->pdev, IORESOURCE_MEM));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2971,7 +2975,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
/*
|
/*
|
||||||
* PCI prepping: enable device set bus mastering and dma mask
|
* PCI prepping: enable device set bus mastering and dma mask
|
||||||
*/
|
*/
|
||||||
rval = pci_enable_device(pdev);
|
rval = pci_enable_device_mem(pdev);
|
||||||
|
|
||||||
if (rval) {
|
if (rval) {
|
||||||
return rval;
|
return rval;
|
||||||
|
@ -3276,7 +3280,7 @@ megasas_resume(struct pci_dev *pdev)
|
||||||
/*
|
/*
|
||||||
* PCI prepping: enable device set bus mastering and dma mask
|
* PCI prepping: enable device set bus mastering and dma mask
|
||||||
*/
|
*/
|
||||||
rval = pci_enable_device(pdev);
|
rval = pci_enable_device_mem(pdev);
|
||||||
|
|
||||||
if (rval) {
|
if (rval) {
|
||||||
printk(KERN_ERR "megasas: Enable device failed\n");
|
printk(KERN_ERR "megasas: Enable device failed\n");
|
||||||
|
|
|
@ -3583,6 +3583,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
|
||||||
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
|
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
|
||||||
mutex_init(&ioc->transport_cmds.mutex);
|
mutex_init(&ioc->transport_cmds.mutex);
|
||||||
|
|
||||||
|
/* scsih internal command bits */
|
||||||
|
ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
||||||
|
ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
|
||||||
|
mutex_init(&ioc->scsih_cmds.mutex);
|
||||||
|
|
||||||
/* task management internal command bits */
|
/* task management internal command bits */
|
||||||
ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
|
||||||
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
|
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
|
||||||
|
|
|
@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
|
||||||
{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
|
{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
|
||||||
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
|
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
|
||||||
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
|
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
|
||||||
|
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
|
||||||
|
|
||||||
{ } /* terminate list */
|
{ } /* terminate list */
|
||||||
};
|
};
|
||||||
|
|
|
@ -432,30 +432,23 @@ static void _osd_free_seg(struct osd_request *or __unused,
|
||||||
seg->alloc_size = 0;
|
seg->alloc_size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void _put_request(struct request *rq , bool is_async)
|
static void _put_request(struct request *rq)
|
||||||
{
|
{
|
||||||
if (is_async) {
|
/*
|
||||||
WARN_ON(rq->bio);
|
* If osd_finalize_request() was called but the request was not
|
||||||
__blk_put_request(rq->q, rq);
|
* executed through the block layer, then we must release BIOs.
|
||||||
} else {
|
* TODO: Keep error code in or->async_error. Need to audit all
|
||||||
/*
|
* code paths.
|
||||||
* If osd_finalize_request() was called but the request was not
|
*/
|
||||||
* executed through the block layer, then we must release BIOs.
|
if (unlikely(rq->bio))
|
||||||
* TODO: Keep error code in or->async_error. Need to audit all
|
blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
|
||||||
* code paths.
|
else
|
||||||
*/
|
blk_put_request(rq);
|
||||||
if (unlikely(rq->bio))
|
|
||||||
blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
|
|
||||||
else
|
|
||||||
blk_put_request(rq);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void osd_end_request(struct osd_request *or)
|
void osd_end_request(struct osd_request *or)
|
||||||
{
|
{
|
||||||
struct request *rq = or->request;
|
struct request *rq = or->request;
|
||||||
/* IMPORTANT: make sure this agrees with osd_execute_request_async */
|
|
||||||
bool is_async = (or->request->end_io_data == or);
|
|
||||||
|
|
||||||
_osd_free_seg(or, &or->set_attr);
|
_osd_free_seg(or, &or->set_attr);
|
||||||
_osd_free_seg(or, &or->enc_get_attr);
|
_osd_free_seg(or, &or->enc_get_attr);
|
||||||
|
@ -463,20 +456,34 @@ void osd_end_request(struct osd_request *or)
|
||||||
|
|
||||||
if (rq) {
|
if (rq) {
|
||||||
if (rq->next_rq) {
|
if (rq->next_rq) {
|
||||||
_put_request(rq->next_rq, is_async);
|
_put_request(rq->next_rq);
|
||||||
rq->next_rq = NULL;
|
rq->next_rq = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
_put_request(rq, is_async);
|
_put_request(rq);
|
||||||
}
|
}
|
||||||
_osd_request_free(or);
|
_osd_request_free(or);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(osd_end_request);
|
EXPORT_SYMBOL(osd_end_request);
|
||||||
|
|
||||||
|
static void _set_error_resid(struct osd_request *or, struct request *req,
|
||||||
|
int error)
|
||||||
|
{
|
||||||
|
or->async_error = error;
|
||||||
|
or->req_errors = req->errors ? : error;
|
||||||
|
or->sense_len = req->sense_len;
|
||||||
|
if (or->out.req)
|
||||||
|
or->out.residual = or->out.req->resid_len;
|
||||||
|
if (or->in.req)
|
||||||
|
or->in.residual = or->in.req->resid_len;
|
||||||
|
}
|
||||||
|
|
||||||
int osd_execute_request(struct osd_request *or)
|
int osd_execute_request(struct osd_request *or)
|
||||||
{
|
{
|
||||||
return or->async_error =
|
int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
|
||||||
blk_execute_rq(or->request->q, NULL, or->request, 0);
|
|
||||||
|
_set_error_resid(or, or->request, error);
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(osd_execute_request);
|
EXPORT_SYMBOL(osd_execute_request);
|
||||||
|
|
||||||
|
@ -484,15 +491,17 @@ static void osd_request_async_done(struct request *req, int error)
|
||||||
{
|
{
|
||||||
struct osd_request *or = req->end_io_data;
|
struct osd_request *or = req->end_io_data;
|
||||||
|
|
||||||
or->async_error = error;
|
_set_error_resid(or, req, error);
|
||||||
|
if (req->next_rq) {
|
||||||
if (unlikely(error)) {
|
__blk_put_request(req->q, req->next_rq);
|
||||||
OSD_DEBUG("osd_request_async_done error recieved %d "
|
req->next_rq = NULL;
|
||||||
"errors 0x%x\n", error, req->errors);
|
|
||||||
if (!req->errors) /* don't miss out on this one */
|
|
||||||
req->errors = error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__blk_put_request(req->q, req);
|
||||||
|
or->request = NULL;
|
||||||
|
or->in.req = NULL;
|
||||||
|
or->out.req = NULL;
|
||||||
|
|
||||||
if (or->async_done)
|
if (or->async_done)
|
||||||
or->async_done(or, or->async_private);
|
or->async_done(or, or->async_private);
|
||||||
else
|
else
|
||||||
|
@ -1489,21 +1498,18 @@ int osd_req_decode_sense_full(struct osd_request *or,
|
||||||
#endif
|
#endif
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (likely(!or->request->errors)) {
|
if (likely(!or->req_errors))
|
||||||
osi->out_resid = 0;
|
|
||||||
osi->in_resid = 0;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
osi = osi ? : &local_osi;
|
osi = osi ? : &local_osi;
|
||||||
memset(osi, 0, sizeof(*osi));
|
memset(osi, 0, sizeof(*osi));
|
||||||
|
|
||||||
ssdb = or->request->sense;
|
ssdb = (typeof(ssdb))or->sense;
|
||||||
sense_len = or->request->sense_len;
|
sense_len = or->sense_len;
|
||||||
if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
|
if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
|
||||||
OSD_ERR("Block-layer returned error(0x%x) but "
|
OSD_ERR("Block-layer returned error(0x%x) but "
|
||||||
"sense_len(%u) || key(%d) is empty\n",
|
"sense_len(%u) || key(%d) is empty\n",
|
||||||
or->request->errors, sense_len, ssdb->sense_key);
|
or->req_errors, sense_len, ssdb->sense_key);
|
||||||
goto analyze;
|
goto analyze;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1525,7 +1531,7 @@ int osd_req_decode_sense_full(struct osd_request *or,
|
||||||
"additional_code=0x%x async_error=%d errors=0x%x\n",
|
"additional_code=0x%x async_error=%d errors=0x%x\n",
|
||||||
osi->key, original_sense_len, sense_len,
|
osi->key, original_sense_len, sense_len,
|
||||||
osi->additional_code, or->async_error,
|
osi->additional_code, or->async_error,
|
||||||
or->request->errors);
|
or->req_errors);
|
||||||
|
|
||||||
if (original_sense_len < sense_len)
|
if (original_sense_len < sense_len)
|
||||||
sense_len = original_sense_len;
|
sense_len = original_sense_len;
|
||||||
|
@ -1695,10 +1701,10 @@ analyze:
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (or->out.req)
|
if (!or->out.residual)
|
||||||
osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes;
|
or->out.residual = or->out.total_bytes;
|
||||||
if (or->in.req)
|
if (!or->in.residual)
|
||||||
osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes;
|
or->in.residual = or->in.total_bytes;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,16 +45,6 @@
|
||||||
#define HEADER_LEN 28
|
#define HEADER_LEN 28
|
||||||
#define SIZE_OFFSET 16
|
#define SIZE_OFFSET 16
|
||||||
|
|
||||||
struct pm8001_ioctl_payload {
|
|
||||||
u32 signature;
|
|
||||||
u16 major_function;
|
|
||||||
u16 minor_function;
|
|
||||||
u16 length;
|
|
||||||
u16 status;
|
|
||||||
u16 offset;
|
|
||||||
u16 id;
|
|
||||||
u8 func_specific[1];
|
|
||||||
};
|
|
||||||
|
|
||||||
#define FLASH_OK 0x000000
|
#define FLASH_OK 0x000000
|
||||||
#define FAIL_OPEN_BIOS_FILE 0x000100
|
#define FAIL_OPEN_BIOS_FILE 0x000100
|
||||||
|
|
|
@ -373,10 +373,7 @@ static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
|
||||||
static void __devinit
|
static void __devinit
|
||||||
mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
|
mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
|
||||||
{
|
{
|
||||||
u32 offset;
|
u32 value, offset, i;
|
||||||
u32 value;
|
|
||||||
u32 i, j;
|
|
||||||
u32 bit_cnt;
|
|
||||||
|
|
||||||
#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
|
#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
|
||||||
#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
|
#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
|
||||||
|
@ -392,55 +389,35 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
|
||||||
*/
|
*/
|
||||||
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
|
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
|
||||||
return;
|
return;
|
||||||
/* set SSC bit of PHY 0 - 3 */
|
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
|
offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
|
||||||
value = pm8001_cr32(pm8001_ha, 2, offset);
|
pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
|
||||||
if (SSCbit) {
|
|
||||||
value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
|
|
||||||
value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
|
|
||||||
} else {
|
|
||||||
value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
|
|
||||||
value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
|
|
||||||
}
|
|
||||||
bit_cnt = 0;
|
|
||||||
for (j = 0; j < 31; j++)
|
|
||||||
if ((value >> j) & 0x00000001)
|
|
||||||
bit_cnt++;
|
|
||||||
if (bit_cnt % 2)
|
|
||||||
value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
|
|
||||||
else
|
|
||||||
value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
|
|
||||||
|
|
||||||
pm8001_cw32(pm8001_ha, 2, offset, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
|
/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
|
||||||
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
|
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* set SSC bit of PHY 4 - 7 */
|
|
||||||
for (i = 4; i < 8; i++) {
|
for (i = 4; i < 8; i++) {
|
||||||
offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
|
offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
|
||||||
value = pm8001_cr32(pm8001_ha, 2, offset);
|
pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
|
||||||
if (SSCbit) {
|
|
||||||
value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
|
|
||||||
value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
|
|
||||||
} else {
|
|
||||||
value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
|
|
||||||
value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
|
|
||||||
}
|
|
||||||
bit_cnt = 0;
|
|
||||||
for (j = 0; j < 31; j++)
|
|
||||||
if ((value >> j) & 0x00000001)
|
|
||||||
bit_cnt++;
|
|
||||||
if (bit_cnt % 2)
|
|
||||||
value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
|
|
||||||
else
|
|
||||||
value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
|
|
||||||
|
|
||||||
pm8001_cw32(pm8001_ha, 2, offset, value);
|
|
||||||
}
|
}
|
||||||
|
/*************************************************************
|
||||||
|
Change the SSC upspreading value to 0x0 so that upspreading is disabled.
|
||||||
|
Device MABC SMOD0 Controls
|
||||||
|
Address: (via MEMBASE-III):
|
||||||
|
Using shifted destination address 0x0_0000: with Offset 0xD8
|
||||||
|
|
||||||
|
31:28 R/W Reserved Do not change
|
||||||
|
27:24 R/W SAS_SMOD_SPRDUP 0000
|
||||||
|
23:20 R/W SAS_SMOD_SPRDDN 0000
|
||||||
|
19:0 R/W Reserved Do not change
|
||||||
|
Upon power-up this register will read as 0x8990c016,
|
||||||
|
and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
|
||||||
|
so that the written value will be 0x8090c016.
|
||||||
|
This will ensure only down-spreading SSC is enabled on the SPC.
|
||||||
|
*************************************************************/
|
||||||
|
value = pm8001_cr32(pm8001_ha, 2, 0xd8);
|
||||||
|
pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
|
||||||
|
|
||||||
/*set the shifted destination address to 0x0 to avoid error operation */
|
/*set the shifted destination address to 0x0 to avoid error operation */
|
||||||
bar4_shift(pm8001_ha, 0x0);
|
bar4_shift(pm8001_ha, 0x0);
|
||||||
|
@ -1901,7 +1878,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
{
|
{
|
||||||
struct sas_task *t;
|
struct sas_task *t;
|
||||||
struct pm8001_ccb_info *ccb;
|
struct pm8001_ccb_info *ccb;
|
||||||
unsigned long flags;
|
unsigned long flags = 0;
|
||||||
u32 param;
|
u32 param;
|
||||||
u32 status;
|
u32 status;
|
||||||
u32 tag;
|
u32 tag;
|
||||||
|
@ -2040,7 +2017,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
ts->stat = SAS_QUEUE_FULL;
|
ts->stat = SAS_QUEUE_FULL;
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
mb();/*in order to force CPU ordering*/
|
mb();/*in order to force CPU ordering*/
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
t->task_done(t);
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2058,7 +2037,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
ts->stat = SAS_QUEUE_FULL;
|
ts->stat = SAS_QUEUE_FULL;
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
mb();/*ditto*/
|
mb();/*ditto*/
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
t->task_done(t);
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2084,7 +2065,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
ts->stat = SAS_QUEUE_FULL;
|
ts->stat = SAS_QUEUE_FULL;
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
mb();/* ditto*/
|
mb();/* ditto*/
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
t->task_done(t);
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2149,7 +2132,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
ts->stat = SAS_QUEUE_FULL;
|
ts->stat = SAS_QUEUE_FULL;
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
mb();/*ditto*/
|
mb();/*ditto*/
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
t->task_done(t);
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2171,7 +2156,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
ts->stat = SAS_QUEUE_FULL;
|
ts->stat = SAS_QUEUE_FULL;
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
mb();/*ditto*/
|
mb();/*ditto*/
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
t->task_done(t);
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2200,11 +2187,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
||||||
t, status, ts->resp, ts->stat));
|
t, status, ts->resp, ts->stat));
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
} else {
|
} else if (t->uldd_task) {
|
||||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
mb();/* ditto */
|
mb();/* ditto */
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
t->task_done(t);
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
|
} else if (!t->uldd_task) {
|
||||||
|
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||||
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
|
mb();/*ditto*/
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2212,7 +2208,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
|
static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
|
||||||
{
|
{
|
||||||
struct sas_task *t;
|
struct sas_task *t;
|
||||||
unsigned long flags;
|
unsigned long flags = 0;
|
||||||
struct task_status_struct *ts;
|
struct task_status_struct *ts;
|
||||||
struct pm8001_ccb_info *ccb;
|
struct pm8001_ccb_info *ccb;
|
||||||
struct pm8001_device *pm8001_dev;
|
struct pm8001_device *pm8001_dev;
|
||||||
|
@ -2292,7 +2288,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
|
||||||
ts->stat = SAS_QUEUE_FULL;
|
ts->stat = SAS_QUEUE_FULL;
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
mb();/*ditto*/
|
mb();/*ditto*/
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
t->task_done(t);
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -2401,11 +2399,20 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
|
||||||
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
||||||
t, event, ts->resp, ts->stat));
|
t, event, ts->resp, ts->stat));
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
} else {
|
} else if (t->uldd_task) {
|
||||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
mb();/* in order to force CPU ordering */
|
mb();/* ditto */
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
t->task_done(t);
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
|
} else if (!t->uldd_task) {
|
||||||
|
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||||
|
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||||
|
mb();/*ditto*/
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2876,15 +2883,20 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
|
le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
|
||||||
u8 link_rate =
|
u8 link_rate =
|
||||||
(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
|
(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
|
||||||
|
u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
|
||||||
u8 phy_id =
|
u8 phy_id =
|
||||||
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
|
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
|
||||||
|
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
|
||||||
|
u8 portstate = (u8)(npip_portstate & 0x0000000F);
|
||||||
|
struct pm8001_port *port = &pm8001_ha->port[port_id];
|
||||||
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
|
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
|
||||||
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
|
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u8 deviceType = pPayload->sas_identify.dev_type;
|
u8 deviceType = pPayload->sas_identify.dev_type;
|
||||||
|
port->port_state = portstate;
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk("HW_EVENT_SAS_PHY_UP \n"));
|
pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
|
||||||
|
port_id, phy_id));
|
||||||
|
|
||||||
switch (deviceType) {
|
switch (deviceType) {
|
||||||
case SAS_PHY_UNUSED:
|
case SAS_PHY_UNUSED:
|
||||||
|
@ -2895,16 +2907,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
|
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
|
||||||
pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
|
pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
|
||||||
PHY_NOTIFY_ENABLE_SPINUP);
|
PHY_NOTIFY_ENABLE_SPINUP);
|
||||||
|
port->port_attached = 1;
|
||||||
get_lrate_mode(phy, link_rate);
|
get_lrate_mode(phy, link_rate);
|
||||||
break;
|
break;
|
||||||
case SAS_EDGE_EXPANDER_DEVICE:
|
case SAS_EDGE_EXPANDER_DEVICE:
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk("expander device.\n"));
|
pm8001_printk("expander device.\n"));
|
||||||
|
port->port_attached = 1;
|
||||||
get_lrate_mode(phy, link_rate);
|
get_lrate_mode(phy, link_rate);
|
||||||
break;
|
break;
|
||||||
case SAS_FANOUT_EXPANDER_DEVICE:
|
case SAS_FANOUT_EXPANDER_DEVICE:
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk("fanout expander device.\n"));
|
pm8001_printk("fanout expander device.\n"));
|
||||||
|
port->port_attached = 1;
|
||||||
get_lrate_mode(phy, link_rate);
|
get_lrate_mode(phy, link_rate);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -2946,11 +2961,20 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
|
le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
|
||||||
u8 link_rate =
|
u8 link_rate =
|
||||||
(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
|
(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
|
||||||
|
u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
|
||||||
u8 phy_id =
|
u8 phy_id =
|
||||||
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
|
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
|
||||||
|
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
|
||||||
|
u8 portstate = (u8)(npip_portstate & 0x0000000F);
|
||||||
|
struct pm8001_port *port = &pm8001_ha->port[port_id];
|
||||||
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
|
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
|
||||||
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
|
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
|
pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
|
||||||
|
" phy id = %d\n", port_id, phy_id));
|
||||||
|
port->port_state = portstate;
|
||||||
|
port->port_attached = 1;
|
||||||
get_lrate_mode(phy, link_rate);
|
get_lrate_mode(phy, link_rate);
|
||||||
phy->phy_type |= PORT_TYPE_SATA;
|
phy->phy_type |= PORT_TYPE_SATA;
|
||||||
phy->phy_attached = 1;
|
phy->phy_attached = 1;
|
||||||
|
@ -2984,7 +3008,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
|
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
|
||||||
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
|
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
|
||||||
u8 portstate = (u8)(npip_portstate & 0x0000000F);
|
u8 portstate = (u8)(npip_portstate & 0x0000000F);
|
||||||
|
struct pm8001_port *port = &pm8001_ha->port[port_id];
|
||||||
|
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
|
||||||
|
port->port_state = portstate;
|
||||||
|
phy->phy_type = 0;
|
||||||
|
phy->identify.device_type = 0;
|
||||||
|
phy->phy_attached = 0;
|
||||||
|
memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
|
||||||
switch (portstate) {
|
switch (portstate) {
|
||||||
case PORT_VALID:
|
case PORT_VALID:
|
||||||
break;
|
break;
|
||||||
|
@ -2993,26 +3023,30 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||||
pm8001_printk(" PortInvalid portID %d \n", port_id));
|
pm8001_printk(" PortInvalid portID %d \n", port_id));
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk(" Last phy Down and port invalid\n"));
|
pm8001_printk(" Last phy Down and port invalid\n"));
|
||||||
|
port->port_attached = 0;
|
||||||
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
|
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
|
||||||
port_id, phy_id, 0, 0);
|
port_id, phy_id, 0, 0);
|
||||||
break;
|
break;
|
||||||
case PORT_IN_RESET:
|
case PORT_IN_RESET:
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk(" PortInReset portID %d \n", port_id));
|
pm8001_printk(" Port In Reset portID %d \n", port_id));
|
||||||
break;
|
break;
|
||||||
case PORT_NOT_ESTABLISHED:
|
case PORT_NOT_ESTABLISHED:
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
|
pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
|
||||||
|
port->port_attached = 0;
|
||||||
break;
|
break;
|
||||||
case PORT_LOSTCOMM:
|
case PORT_LOSTCOMM:
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
|
pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk(" Last phy Down and port invalid\n"));
|
pm8001_printk(" Last phy Down and port invalid\n"));
|
||||||
|
port->port_attached = 0;
|
||||||
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
|
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
|
||||||
port_id, phy_id, 0, 0);
|
port_id, phy_id, 0, 0);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
port->port_attached = 0;
|
||||||
PM8001_MSG_DBG(pm8001_ha,
|
PM8001_MSG_DBG(pm8001_ha,
|
||||||
pm8001_printk(" phy Down and(default) = %x\n",
|
pm8001_printk(" phy Down and(default) = %x\n",
|
||||||
portstate));
|
portstate));
|
||||||
|
@ -3770,7 +3804,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
|
||||||
u32 opc = OPC_INB_SSPINIIOSTART;
|
u32 opc = OPC_INB_SSPINIIOSTART;
|
||||||
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
|
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
|
||||||
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
|
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
|
||||||
ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for
|
ssp_cmd.dir_m_tlr =
|
||||||
|
cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
|
||||||
SAS 1.1 compatible TLR*/
|
SAS 1.1 compatible TLR*/
|
||||||
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
|
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
|
||||||
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
|
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
|
||||||
|
@ -3841,7 +3876,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
|
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
|
||||||
ncg_tag = cpu_to_le32(hdr_tag);
|
ncg_tag = hdr_tag;
|
||||||
dir = data_dir_flags[task->data_dir] << 8;
|
dir = data_dir_flags[task->data_dir] << 8;
|
||||||
sata_cmd.tag = cpu_to_le32(tag);
|
sata_cmd.tag = cpu_to_le32(tag);
|
||||||
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
|
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
|
||||||
|
@ -3986,7 +4021,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
|
||||||
((stp_sspsmp_sata & 0x03) * 0x10000000));
|
((stp_sspsmp_sata & 0x03) * 0x10000000));
|
||||||
payload.firstburstsize_ITNexustimeout =
|
payload.firstburstsize_ITNexustimeout =
|
||||||
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
|
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
|
||||||
memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr,
|
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
|
||||||
SAS_ADDR_SIZE);
|
SAS_ADDR_SIZE);
|
||||||
rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
|
rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -4027,7 +4062,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
|
||||||
struct inbound_queue_table *circularQ;
|
struct inbound_queue_table *circularQ;
|
||||||
int ret;
|
int ret;
|
||||||
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
|
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
|
||||||
memset((u8 *)&payload, 0, sizeof(payload));
|
memset(&payload, 0, sizeof(payload));
|
||||||
circularQ = &pm8001_ha->inbnd_q_tbl[0];
|
circularQ = &pm8001_ha->inbnd_q_tbl[0];
|
||||||
payload.tag = 1;
|
payload.tag = 1;
|
||||||
payload.phyop_phyid =
|
payload.phyop_phyid =
|
||||||
|
|
|
@ -242,8 +242,7 @@ struct reg_dev_req {
|
||||||
__le32 phyid_portid;
|
__le32 phyid_portid;
|
||||||
__le32 dtype_dlr_retry;
|
__le32 dtype_dlr_retry;
|
||||||
__le32 firstburstsize_ITNexustimeout;
|
__le32 firstburstsize_ITNexustimeout;
|
||||||
u32 sas_addr_hi;
|
u8 sas_addr[SAS_ADDR_SIZE];
|
||||||
u32 sas_addr_low;
|
|
||||||
__le32 upper_device_id;
|
__le32 upper_device_id;
|
||||||
u32 reserved[8];
|
u32 reserved[8];
|
||||||
} __attribute__((packed, aligned(4)));
|
} __attribute__((packed, aligned(4)));
|
||||||
|
|
|
@ -200,8 +200,13 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
spin_lock_init(&pm8001_ha->lock);
|
spin_lock_init(&pm8001_ha->lock);
|
||||||
for (i = 0; i < pm8001_ha->chip->n_phy; i++)
|
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
|
||||||
pm8001_phy_init(pm8001_ha, i);
|
pm8001_phy_init(pm8001_ha, i);
|
||||||
|
pm8001_ha->port[i].wide_port_phymap = 0;
|
||||||
|
pm8001_ha->port[i].port_attached = 0;
|
||||||
|
pm8001_ha->port[i].port_state = 0;
|
||||||
|
INIT_LIST_HEAD(&pm8001_ha->port[i].list);
|
||||||
|
}
|
||||||
|
|
||||||
pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
|
pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
|
||||||
if (!pm8001_ha->tags)
|
if (!pm8001_ha->tags)
|
||||||
|
@ -511,19 +516,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
|
||||||
u8 i;
|
u8 i;
|
||||||
#ifdef PM8001_READ_VPD
|
#ifdef PM8001_READ_VPD
|
||||||
DECLARE_COMPLETION_ONSTACK(completion);
|
DECLARE_COMPLETION_ONSTACK(completion);
|
||||||
|
struct pm8001_ioctl_payload payload;
|
||||||
pm8001_ha->nvmd_completion = &completion;
|
pm8001_ha->nvmd_completion = &completion;
|
||||||
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0);
|
payload.minor_function = 0;
|
||||||
|
payload.length = 128;
|
||||||
|
payload.func_specific = kzalloc(128, GFP_KERNEL);
|
||||||
|
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
|
||||||
wait_for_completion(&completion);
|
wait_for_completion(&completion);
|
||||||
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
|
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
|
||||||
memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
|
memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
|
||||||
SAS_ADDR_SIZE);
|
SAS_ADDR_SIZE);
|
||||||
PM8001_INIT_DBG(pm8001_ha,
|
PM8001_INIT_DBG(pm8001_ha,
|
||||||
pm8001_printk("phy %d sas_addr = %x \n", i,
|
pm8001_printk("phy %d sas_addr = %016llx \n", i,
|
||||||
(u64)pm8001_ha->phy[i].dev_sas_addr));
|
pm8001_ha->phy[i].dev_sas_addr));
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
|
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
|
||||||
pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL;
|
pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
|
||||||
pm8001_ha->phy[i].dev_sas_addr =
|
pm8001_ha->phy[i].dev_sas_addr =
|
||||||
cpu_to_be64((u64)
|
cpu_to_be64((u64)
|
||||||
(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
|
(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
|
||||||
|
|
|
@ -329,6 +329,23 @@ int pm8001_slave_configure(struct scsi_device *sdev)
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
/* Find the local port id that's attached to this device */
|
||||||
|
static int sas_find_local_port_id(struct domain_device *dev)
|
||||||
|
{
|
||||||
|
struct domain_device *pdev = dev->parent;
|
||||||
|
|
||||||
|
/* Directly attached device */
|
||||||
|
if (!pdev)
|
||||||
|
return dev->port->id;
|
||||||
|
while (pdev) {
|
||||||
|
struct domain_device *pdev_p = pdev->parent;
|
||||||
|
if (!pdev_p)
|
||||||
|
return pdev->port->id;
|
||||||
|
pdev = pdev->parent;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
|
* pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
|
||||||
* @task: the task to be execute.
|
* @task: the task to be execute.
|
||||||
|
@ -346,11 +363,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
|
||||||
struct domain_device *dev = task->dev;
|
struct domain_device *dev = task->dev;
|
||||||
struct pm8001_hba_info *pm8001_ha;
|
struct pm8001_hba_info *pm8001_ha;
|
||||||
struct pm8001_device *pm8001_dev;
|
struct pm8001_device *pm8001_dev;
|
||||||
|
struct pm8001_port *port = NULL;
|
||||||
struct sas_task *t = task;
|
struct sas_task *t = task;
|
||||||
struct pm8001_ccb_info *ccb;
|
struct pm8001_ccb_info *ccb;
|
||||||
u32 tag = 0xdeadbeef, rc, n_elem = 0;
|
u32 tag = 0xdeadbeef, rc, n_elem = 0;
|
||||||
u32 n = num;
|
u32 n = num;
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0, flags_libsas = 0;
|
||||||
|
|
||||||
if (!dev->port) {
|
if (!dev->port) {
|
||||||
struct task_status_struct *tsm = &t->task_status;
|
struct task_status_struct *tsm = &t->task_status;
|
||||||
|
@ -379,6 +397,35 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
|
||||||
rc = SAS_PHY_DOWN;
|
rc = SAS_PHY_DOWN;
|
||||||
goto out_done;
|
goto out_done;
|
||||||
}
|
}
|
||||||
|
port = &pm8001_ha->port[sas_find_local_port_id(dev)];
|
||||||
|
if (!port->port_attached) {
|
||||||
|
if (sas_protocol_ata(t->task_proto)) {
|
||||||
|
struct task_status_struct *ts = &t->task_status;
|
||||||
|
ts->resp = SAS_TASK_UNDELIVERED;
|
||||||
|
ts->stat = SAS_PHY_DOWN;
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
|
spin_unlock_irqrestore(dev->sata_dev.ap->lock,
|
||||||
|
flags_libsas);
|
||||||
|
t->task_done(t);
|
||||||
|
spin_lock_irqsave(dev->sata_dev.ap->lock,
|
||||||
|
flags_libsas);
|
||||||
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
|
if (n > 1)
|
||||||
|
t = list_entry(t->list.next,
|
||||||
|
struct sas_task, list);
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
struct task_status_struct *ts = &t->task_status;
|
||||||
|
ts->resp = SAS_TASK_UNDELIVERED;
|
||||||
|
ts->stat = SAS_PHY_DOWN;
|
||||||
|
t->task_done(t);
|
||||||
|
if (n > 1)
|
||||||
|
t = list_entry(t->list.next,
|
||||||
|
struct sas_task, list);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
rc = pm8001_tag_alloc(pm8001_ha, &tag);
|
rc = pm8001_tag_alloc(pm8001_ha, &tag);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
@ -569,11 +616,11 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
|
||||||
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||||
|
|
||||||
pm8001_device = pm8001_alloc_dev(pm8001_ha);
|
pm8001_device = pm8001_alloc_dev(pm8001_ha);
|
||||||
pm8001_device->sas_device = dev;
|
|
||||||
if (!pm8001_device) {
|
if (!pm8001_device) {
|
||||||
res = -1;
|
res = -1;
|
||||||
goto found_out;
|
goto found_out;
|
||||||
}
|
}
|
||||||
|
pm8001_device->sas_device = dev;
|
||||||
dev->lldd_dev = pm8001_device;
|
dev->lldd_dev = pm8001_device;
|
||||||
pm8001_device->dev_type = dev->dev_type;
|
pm8001_device->dev_type = dev->dev_type;
|
||||||
pm8001_device->dcompletion = &completion;
|
pm8001_device->dcompletion = &completion;
|
||||||
|
@ -609,7 +656,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
|
||||||
wait_for_completion(&completion);
|
wait_for_completion(&completion);
|
||||||
if (dev->dev_type == SAS_END_DEV)
|
if (dev->dev_type == SAS_END_DEV)
|
||||||
msleep(50);
|
msleep(50);
|
||||||
pm8001_ha->flags = PM8001F_RUN_TIME ;
|
pm8001_ha->flags |= PM8001F_RUN_TIME ;
|
||||||
return 0;
|
return 0;
|
||||||
found_out:
|
found_out:
|
||||||
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
|
||||||
|
@ -772,7 +819,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
|
||||||
task->task_done = pm8001_task_done;
|
task->task_done = pm8001_task_done;
|
||||||
task->timer.data = (unsigned long)task;
|
task->timer.data = (unsigned long)task;
|
||||||
task->timer.function = pm8001_tmf_timedout;
|
task->timer.function = pm8001_tmf_timedout;
|
||||||
task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
|
task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
|
||||||
add_timer(&task->timer);
|
add_timer(&task->timer);
|
||||||
|
|
||||||
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
|
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
|
||||||
|
@ -897,6 +944,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
|
||||||
|
|
||||||
if (dev_is_sata(dev)) {
|
if (dev_is_sata(dev)) {
|
||||||
DECLARE_COMPLETION_ONSTACK(completion_setstate);
|
DECLARE_COMPLETION_ONSTACK(completion_setstate);
|
||||||
|
if (scsi_is_sas_phy_local(phy))
|
||||||
|
return 0;
|
||||||
rc = sas_phy_reset(phy, 1);
|
rc = sas_phy_reset(phy, 1);
|
||||||
msleep(2000);
|
msleep(2000);
|
||||||
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
|
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
|
||||||
|
|
|
@ -59,11 +59,11 @@
|
||||||
|
|
||||||
#define DRV_NAME "pm8001"
|
#define DRV_NAME "pm8001"
|
||||||
#define DRV_VERSION "0.1.36"
|
#define DRV_VERSION "0.1.36"
|
||||||
#define PM8001_FAIL_LOGGING 0x01 /* libsas EH function logging */
|
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
|
||||||
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
|
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
|
||||||
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
|
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
|
||||||
#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
|
#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
|
||||||
#define PM8001_EH_LOGGING 0x10 /* Error message logging */
|
#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
|
||||||
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
|
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
|
||||||
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
|
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
|
||||||
#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
|
#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
|
||||||
|
@ -100,6 +100,7 @@ do { \
|
||||||
|
|
||||||
#define PM8001_USE_TASKLET
|
#define PM8001_USE_TASKLET
|
||||||
#define PM8001_USE_MSIX
|
#define PM8001_USE_MSIX
|
||||||
|
#define PM8001_READ_VPD
|
||||||
|
|
||||||
|
|
||||||
#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
|
#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
|
||||||
|
@ -111,7 +112,22 @@ extern const struct pm8001_dispatch pm8001_8001_dispatch;
|
||||||
struct pm8001_hba_info;
|
struct pm8001_hba_info;
|
||||||
struct pm8001_ccb_info;
|
struct pm8001_ccb_info;
|
||||||
struct pm8001_device;
|
struct pm8001_device;
|
||||||
struct pm8001_tmf_task;
|
/* define task management IU */
|
||||||
|
struct pm8001_tmf_task {
|
||||||
|
u8 tmf;
|
||||||
|
u32 tag_of_task_to_be_managed;
|
||||||
|
};
|
||||||
|
struct pm8001_ioctl_payload {
|
||||||
|
u32 signature;
|
||||||
|
u16 major_function;
|
||||||
|
u16 minor_function;
|
||||||
|
u16 length;
|
||||||
|
u16 status;
|
||||||
|
u16 offset;
|
||||||
|
u16 id;
|
||||||
|
u8 *func_specific;
|
||||||
|
};
|
||||||
|
|
||||||
struct pm8001_dispatch {
|
struct pm8001_dispatch {
|
||||||
char *name;
|
char *name;
|
||||||
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
|
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
|
||||||
|
@ -164,6 +180,10 @@ struct pm8001_chip_info {
|
||||||
|
|
||||||
struct pm8001_port {
|
struct pm8001_port {
|
||||||
struct asd_sas_port sas_port;
|
struct asd_sas_port sas_port;
|
||||||
|
u8 port_attached;
|
||||||
|
u8 wide_port_phymap;
|
||||||
|
u8 port_state;
|
||||||
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct pm8001_phy {
|
struct pm8001_phy {
|
||||||
|
@ -386,11 +406,7 @@ struct pm8001_fw_image_header {
|
||||||
__be32 startup_entry;
|
__be32 startup_entry;
|
||||||
} __attribute__((packed, aligned(4)));
|
} __attribute__((packed, aligned(4)));
|
||||||
|
|
||||||
/* define task management IU */
|
|
||||||
struct pm8001_tmf_task {
|
|
||||||
u8 tmf;
|
|
||||||
u32 tag_of_task_to_be_managed;
|
|
||||||
};
|
|
||||||
/**
|
/**
|
||||||
* FW Flash Update status values
|
* FW Flash Update status values
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
/*
|
/*
|
||||||
* pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
|
* pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
|
||||||
*
|
*
|
||||||
* Written By: PMC Sierra Corporation
|
* Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
|
||||||
|
* PMC-Sierra Inc
|
||||||
*
|
*
|
||||||
* Copyright (C) 2008, 2009 PMC Sierra Inc
|
* Copyright (C) 2008, 2009 PMC Sierra Inc
|
||||||
*
|
*
|
||||||
|
@ -79,7 +80,7 @@ DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
|
||||||
/*
|
/*
|
||||||
* Module parameters
|
* Module parameters
|
||||||
*/
|
*/
|
||||||
MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com");
|
MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
|
||||||
MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
|
MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_VERSION(PMCRAID_DRIVER_VERSION);
|
MODULE_VERSION(PMCRAID_DRIVER_VERSION);
|
||||||
|
@ -162,10 +163,10 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
|
||||||
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
|
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
|
||||||
list_for_each_entry(temp, &pinstance->used_res_q, queue) {
|
list_for_each_entry(temp, &pinstance->used_res_q, queue) {
|
||||||
|
|
||||||
/* do not expose VSETs with order-ids >= 240 */
|
/* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
|
||||||
if (RES_IS_VSET(temp->cfg_entry)) {
|
if (RES_IS_VSET(temp->cfg_entry)) {
|
||||||
target = temp->cfg_entry.unique_flags1;
|
target = temp->cfg_entry.unique_flags1;
|
||||||
if (target >= PMCRAID_MAX_VSET_TARGETS)
|
if (target > PMCRAID_MAX_VSET_TARGETS)
|
||||||
continue;
|
continue;
|
||||||
bus = PMCRAID_VSET_BUS_ID;
|
bus = PMCRAID_VSET_BUS_ID;
|
||||||
lun = 0;
|
lun = 0;
|
||||||
|
@ -1210,7 +1211,7 @@ static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
||||||
if (cfgte->resource_type == RES_TYPE_VSET)
|
if (cfgte->resource_type == RES_TYPE_VSET)
|
||||||
retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE);
|
retval = ((cfgte->unique_flags1 & 0x80) == 0);
|
||||||
else if (cfgte->resource_type == RES_TYPE_GSCSI)
|
else if (cfgte->resource_type == RES_TYPE_GSCSI)
|
||||||
retval = (RES_BUS(cfgte->resource_address) !=
|
retval = (RES_BUS(cfgte->resource_address) !=
|
||||||
PMCRAID_VIRTUAL_ENCL_BUS_ID);
|
PMCRAID_VIRTUAL_ENCL_BUS_ID);
|
||||||
|
@ -1361,6 +1362,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
|
||||||
* Return value:
|
* Return value:
|
||||||
* none
|
* none
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
|
static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
|
||||||
{
|
{
|
||||||
struct pmcraid_config_table_entry *cfg_entry;
|
struct pmcraid_config_table_entry *cfg_entry;
|
||||||
|
@ -1368,9 +1370,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
|
||||||
struct pmcraid_cmd *cmd;
|
struct pmcraid_cmd *cmd;
|
||||||
struct pmcraid_cmd *cfgcmd;
|
struct pmcraid_cmd *cfgcmd;
|
||||||
struct pmcraid_resource_entry *res = NULL;
|
struct pmcraid_resource_entry *res = NULL;
|
||||||
u32 new_entry = 1;
|
|
||||||
unsigned long lock_flags;
|
unsigned long lock_flags;
|
||||||
unsigned long host_lock_flags;
|
unsigned long host_lock_flags;
|
||||||
|
u32 new_entry = 1;
|
||||||
|
u32 hidden_entry = 0;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
|
ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
|
||||||
|
@ -1406,9 +1409,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If this resource is not going to be added to mid-layer, just notify
|
/* If this resource is not going to be added to mid-layer, just notify
|
||||||
* applications and return
|
* applications and return. If this notification is about hiding a VSET
|
||||||
|
* resource, check if it was exposed already.
|
||||||
*/
|
*/
|
||||||
if (!pmcraid_expose_resource(cfg_entry))
|
if (pinstance->ccn.hcam->notification_type ==
|
||||||
|
NOTIFICATION_TYPE_ENTRY_CHANGED &&
|
||||||
|
cfg_entry->resource_type == RES_TYPE_VSET &&
|
||||||
|
cfg_entry->unique_flags1 & 0x80) {
|
||||||
|
hidden_entry = 1;
|
||||||
|
} else if (!pmcraid_expose_resource(cfg_entry))
|
||||||
goto out_notify_apps;
|
goto out_notify_apps;
|
||||||
|
|
||||||
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
|
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
|
||||||
|
@ -1424,6 +1433,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
|
||||||
|
|
||||||
if (new_entry) {
|
if (new_entry) {
|
||||||
|
|
||||||
|
if (hidden_entry) {
|
||||||
|
spin_unlock_irqrestore(&pinstance->resource_lock,
|
||||||
|
lock_flags);
|
||||||
|
goto out_notify_apps;
|
||||||
|
}
|
||||||
|
|
||||||
/* If there are more number of resources than what driver can
|
/* If there are more number of resources than what driver can
|
||||||
* manage, do not notify the applications about the CCN. Just
|
* manage, do not notify the applications about the CCN. Just
|
||||||
* ignore this notifications and re-register the same HCAM
|
* ignore this notifications and re-register the same HCAM
|
||||||
|
@ -1454,8 +1469,9 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
|
||||||
sizeof(struct pmcraid_config_table_entry));
|
sizeof(struct pmcraid_config_table_entry));
|
||||||
|
|
||||||
if (pinstance->ccn.hcam->notification_type ==
|
if (pinstance->ccn.hcam->notification_type ==
|
||||||
NOTIFICATION_TYPE_ENTRY_DELETED) {
|
NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
|
||||||
if (res->scsi_dev) {
|
if (res->scsi_dev) {
|
||||||
|
res->cfg_entry.unique_flags1 &= 0x7F;
|
||||||
res->change_detected = RES_CHANGE_DEL;
|
res->change_detected = RES_CHANGE_DEL;
|
||||||
res->cfg_entry.resource_handle =
|
res->cfg_entry.resource_handle =
|
||||||
PMCRAID_INVALID_RES_HANDLE;
|
PMCRAID_INVALID_RES_HANDLE;
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
/*
|
/*
|
||||||
* pmcraid.h -- PMC Sierra MaxRAID controller driver header file
|
* pmcraid.h -- PMC Sierra MaxRAID controller driver header file
|
||||||
*
|
*
|
||||||
|
* Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
|
||||||
|
* PMC-Sierra Inc
|
||||||
|
*
|
||||||
* Copyright (C) 2008, 2009 PMC Sierra Inc.
|
* Copyright (C) 2008, 2009 PMC Sierra Inc.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
@ -106,7 +109,7 @@
|
||||||
#define PMCRAID_VSET_LUN_ID 0x0
|
#define PMCRAID_VSET_LUN_ID 0x0
|
||||||
#define PMCRAID_PHYS_BUS_ID 0x0
|
#define PMCRAID_PHYS_BUS_ID 0x0
|
||||||
#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
|
#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
|
||||||
#define PMCRAID_MAX_VSET_TARGETS 240
|
#define PMCRAID_MAX_VSET_TARGETS 0x7F
|
||||||
#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
|
#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
|
||||||
|
|
||||||
#define PMCRAID_IOA_MAX_SECTORS 32767
|
#define PMCRAID_IOA_MAX_SECTORS 32767
|
||||||
|
|
|
@ -1570,9 +1570,6 @@ typedef struct fc_port {
|
||||||
struct fc_rport *rport, *drport;
|
struct fc_rport *rport, *drport;
|
||||||
u32 supported_classes;
|
u32 supported_classes;
|
||||||
|
|
||||||
unsigned long last_queue_full;
|
|
||||||
unsigned long last_ramp_up;
|
|
||||||
|
|
||||||
uint16_t vp_idx;
|
uint16_t vp_idx;
|
||||||
} fc_port_t;
|
} fc_port_t;
|
||||||
|
|
||||||
|
@ -2265,6 +2262,7 @@ struct qla_hw_data {
|
||||||
uint32_t port0 :1;
|
uint32_t port0 :1;
|
||||||
uint32_t running_gold_fw :1;
|
uint32_t running_gold_fw :1;
|
||||||
uint32_t cpu_affinity_enabled :1;
|
uint32_t cpu_affinity_enabled :1;
|
||||||
|
uint32_t disable_msix_handshake :1;
|
||||||
} flags;
|
} flags;
|
||||||
|
|
||||||
/* This spinlock is used to protect "io transactions", you must
|
/* This spinlock is used to protect "io transactions", you must
|
||||||
|
@ -2387,6 +2385,7 @@ struct qla_hw_data {
|
||||||
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
|
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
|
||||||
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
|
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
|
||||||
IS_QLA25XX(ha) || IS_QLA81XX(ha))
|
IS_QLA25XX(ha) || IS_QLA81XX(ha))
|
||||||
|
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
|
||||||
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
|
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
|
||||||
(ha)->flags.msix_enabled)
|
(ha)->flags.msix_enabled)
|
||||||
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
|
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
|
||||||
|
|
|
@ -72,8 +72,6 @@ extern int ql2xloginretrycount;
|
||||||
extern int ql2xfdmienable;
|
extern int ql2xfdmienable;
|
||||||
extern int ql2xallocfwdump;
|
extern int ql2xallocfwdump;
|
||||||
extern int ql2xextended_error_logging;
|
extern int ql2xextended_error_logging;
|
||||||
extern int ql2xqfullrampup;
|
|
||||||
extern int ql2xqfulltracking;
|
|
||||||
extern int ql2xiidmaenable;
|
extern int ql2xiidmaenable;
|
||||||
extern int ql2xmaxqueues;
|
extern int ql2xmaxqueues;
|
||||||
extern int ql2xmultique_tag;
|
extern int ql2xmultique_tag;
|
||||||
|
|
|
@ -1442,7 +1442,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
|
||||||
icb->firmware_options_2 |=
|
icb->firmware_options_2 |=
|
||||||
__constant_cpu_to_le32(BIT_18);
|
__constant_cpu_to_le32(BIT_18);
|
||||||
|
|
||||||
icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22);
|
/* Use Disable MSIX Handshake mode for capable adapters */
|
||||||
|
if (IS_MSIX_NACK_CAPABLE(ha)) {
|
||||||
|
icb->firmware_options_2 &=
|
||||||
|
__constant_cpu_to_le32(~BIT_22);
|
||||||
|
ha->flags.disable_msix_handshake = 1;
|
||||||
|
qla_printk(KERN_INFO, ha,
|
||||||
|
"MSIX Handshake Disable Mode turned on\n");
|
||||||
|
} else {
|
||||||
|
icb->firmware_options_2 |=
|
||||||
|
__constant_cpu_to_le32(BIT_22);
|
||||||
|
}
|
||||||
icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
|
icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
|
||||||
|
|
||||||
WRT_REG_DWORD(®->isp25mq.req_q_in, 0);
|
WRT_REG_DWORD(®->isp25mq.req_q_in, 0);
|
||||||
|
|
|
@ -811,78 +811,6 @@ skip_rio:
|
||||||
qla2x00_alert_all_vps(rsp, mb);
|
qla2x00_alert_all_vps(rsp, mb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
|
|
||||||
{
|
|
||||||
fc_port_t *fcport = data;
|
|
||||||
struct scsi_qla_host *vha = fcport->vha;
|
|
||||||
struct qla_hw_data *ha = vha->hw;
|
|
||||||
struct req_que *req = NULL;
|
|
||||||
|
|
||||||
if (!ql2xqfulltracking)
|
|
||||||
return;
|
|
||||||
|
|
||||||
req = vha->req;
|
|
||||||
if (!req)
|
|
||||||
return;
|
|
||||||
if (req->max_q_depth <= sdev->queue_depth)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (sdev->ordered_tags)
|
|
||||||
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
|
|
||||||
sdev->queue_depth + 1);
|
|
||||||
else
|
|
||||||
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
|
|
||||||
sdev->queue_depth + 1);
|
|
||||||
|
|
||||||
fcport->last_ramp_up = jiffies;
|
|
||||||
|
|
||||||
DEBUG2(qla_printk(KERN_INFO, ha,
|
|
||||||
"scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
|
|
||||||
fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
|
|
||||||
sdev->queue_depth));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
|
|
||||||
{
|
|
||||||
fc_port_t *fcport = data;
|
|
||||||
|
|
||||||
if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
|
|
||||||
return;
|
|
||||||
|
|
||||||
DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
|
|
||||||
"scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
|
|
||||||
fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
|
|
||||||
sdev->queue_depth));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
|
|
||||||
srb_t *sp)
|
|
||||||
{
|
|
||||||
fc_port_t *fcport;
|
|
||||||
struct scsi_device *sdev;
|
|
||||||
|
|
||||||
if (!ql2xqfulltracking)
|
|
||||||
return;
|
|
||||||
|
|
||||||
sdev = sp->cmd->device;
|
|
||||||
if (sdev->queue_depth >= req->max_q_depth)
|
|
||||||
return;
|
|
||||||
|
|
||||||
fcport = sp->fcport;
|
|
||||||
if (time_before(jiffies,
|
|
||||||
fcport->last_ramp_up + ql2xqfullrampup * HZ))
|
|
||||||
return;
|
|
||||||
if (time_before(jiffies,
|
|
||||||
fcport->last_queue_full + ql2xqfullrampup * HZ))
|
|
||||||
return;
|
|
||||||
|
|
||||||
starget_for_each_device(sdev->sdev_target, fcport,
|
|
||||||
qla2x00_adjust_sdev_qdepth_up);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* qla2x00_process_completed_request() - Process a Fast Post response.
|
* qla2x00_process_completed_request() - Process a Fast Post response.
|
||||||
* @ha: SCSI driver HA context
|
* @ha: SCSI driver HA context
|
||||||
|
@ -913,8 +841,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
|
||||||
|
|
||||||
/* Save ISP completion status */
|
/* Save ISP completion status */
|
||||||
sp->cmd->result = DID_OK << 16;
|
sp->cmd->result = DID_OK << 16;
|
||||||
|
|
||||||
qla2x00_ramp_up_queue_depth(vha, req, sp);
|
|
||||||
qla2x00_sp_compl(ha, sp);
|
qla2x00_sp_compl(ha, sp);
|
||||||
} else {
|
} else {
|
||||||
DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
|
DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
|
||||||
|
@ -1435,13 +1361,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||||
"scsi(%ld): QUEUE FULL status detected "
|
"scsi(%ld): QUEUE FULL status detected "
|
||||||
"0x%x-0x%x.\n", vha->host_no, comp_status,
|
"0x%x-0x%x.\n", vha->host_no, comp_status,
|
||||||
scsi_status));
|
scsi_status));
|
||||||
|
|
||||||
/* Adjust queue depth for all luns on the port. */
|
|
||||||
if (!ql2xqfulltracking)
|
|
||||||
break;
|
|
||||||
fcport->last_queue_full = jiffies;
|
|
||||||
starget_for_each_device(cp->device->sdev_target,
|
|
||||||
fcport, qla2x00_adjust_sdev_qdepth_down);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (lscsi_status != SS_CHECK_CONDITION)
|
if (lscsi_status != SS_CHECK_CONDITION)
|
||||||
|
@ -1516,17 +1435,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||||
"scsi(%ld): QUEUE FULL status detected "
|
"scsi(%ld): QUEUE FULL status detected "
|
||||||
"0x%x-0x%x.\n", vha->host_no, comp_status,
|
"0x%x-0x%x.\n", vha->host_no, comp_status,
|
||||||
scsi_status));
|
scsi_status));
|
||||||
|
|
||||||
/*
|
|
||||||
* Adjust queue depth for all luns on the
|
|
||||||
* port.
|
|
||||||
*/
|
|
||||||
if (!ql2xqfulltracking)
|
|
||||||
break;
|
|
||||||
fcport->last_queue_full = jiffies;
|
|
||||||
starget_for_each_device(
|
|
||||||
cp->device->sdev_target, fcport,
|
|
||||||
qla2x00_adjust_sdev_qdepth_down);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (lscsi_status != SS_CHECK_CONDITION)
|
if (lscsi_status != SS_CHECK_CONDITION)
|
||||||
|
@ -2020,7 +1928,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
|
||||||
|
|
||||||
vha = qla25xx_get_host(rsp);
|
vha = qla25xx_get_host(rsp);
|
||||||
qla24xx_process_response_queue(vha, rsp);
|
qla24xx_process_response_queue(vha, rsp);
|
||||||
if (!ha->mqenable) {
|
if (!ha->flags.disable_msix_handshake) {
|
||||||
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
|
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
|
||||||
RD_REG_DWORD_RELAXED(®->hccr);
|
RD_REG_DWORD_RELAXED(®->hccr);
|
||||||
}
|
}
|
||||||
|
@ -2034,6 +1942,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct qla_hw_data *ha;
|
struct qla_hw_data *ha;
|
||||||
struct rsp_que *rsp;
|
struct rsp_que *rsp;
|
||||||
|
struct device_reg_24xx __iomem *reg;
|
||||||
|
|
||||||
rsp = (struct rsp_que *) dev_id;
|
rsp = (struct rsp_que *) dev_id;
|
||||||
if (!rsp) {
|
if (!rsp) {
|
||||||
|
@ -2043,6 +1952,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
|
||||||
}
|
}
|
||||||
ha = rsp->hw;
|
ha = rsp->hw;
|
||||||
|
|
||||||
|
/* Clear the interrupt, if enabled, for this response queue */
|
||||||
|
if (rsp->options & ~BIT_6) {
|
||||||
|
reg = &ha->iobase->isp24;
|
||||||
|
spin_lock_irq(&ha->hardware_lock);
|
||||||
|
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
|
||||||
|
RD_REG_DWORD_RELAXED(®->hccr);
|
||||||
|
spin_unlock_irq(&ha->hardware_lock);
|
||||||
|
}
|
||||||
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
|
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
|
|
@ -696,6 +696,10 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
|
||||||
/* Use alternate PCI devfn */
|
/* Use alternate PCI devfn */
|
||||||
if (LSB(rsp->rid))
|
if (LSB(rsp->rid))
|
||||||
options |= BIT_5;
|
options |= BIT_5;
|
||||||
|
/* Enable MSIX handshake mode on for uncapable adapters */
|
||||||
|
if (!IS_MSIX_NACK_CAPABLE(ha))
|
||||||
|
options |= BIT_6;
|
||||||
|
|
||||||
rsp->options = options;
|
rsp->options = options;
|
||||||
rsp->id = que_id;
|
rsp->id = que_id;
|
||||||
reg = ISP_QUE_REG(ha, que_id);
|
reg = ISP_QUE_REG(ha, que_id);
|
||||||
|
|
|
@ -78,21 +78,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
|
||||||
MODULE_PARM_DESC(ql2xmaxqdepth,
|
MODULE_PARM_DESC(ql2xmaxqdepth,
|
||||||
"Maximum queue depth to report for target devices.");
|
"Maximum queue depth to report for target devices.");
|
||||||
|
|
||||||
int ql2xqfulltracking = 1;
|
|
||||||
module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
|
|
||||||
MODULE_PARM_DESC(ql2xqfulltracking,
|
|
||||||
"Controls whether the driver tracks queue full status "
|
|
||||||
"returns and dynamically adjusts a scsi device's queue "
|
|
||||||
"depth. Default is 1, perform tracking. Set to 0 to "
|
|
||||||
"disable dynamic tracking and adjustment of queue depth.");
|
|
||||||
|
|
||||||
int ql2xqfullrampup = 120;
|
|
||||||
module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
|
|
||||||
MODULE_PARM_DESC(ql2xqfullrampup,
|
|
||||||
"Number of seconds to wait to begin to ramp-up the queue "
|
|
||||||
"depth for a device after a queue-full condition has been "
|
|
||||||
"detected. Default is 120 seconds.");
|
|
||||||
|
|
||||||
int ql2xiidmaenable=1;
|
int ql2xiidmaenable=1;
|
||||||
module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
|
module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
|
||||||
MODULE_PARM_DESC(ql2xiidmaenable,
|
MODULE_PARM_DESC(ql2xiidmaenable,
|
||||||
|
@ -1217,13 +1202,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
|
||||||
sdev->hostdata = NULL;
|
sdev->hostdata = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
|
||||||
|
{
|
||||||
|
fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
|
||||||
|
|
||||||
|
if (!scsi_track_queue_full(sdev, qdepth))
|
||||||
|
return;
|
||||||
|
|
||||||
|
DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
|
||||||
|
"scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
|
||||||
|
fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
|
||||||
|
sdev->queue_depth));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
|
||||||
|
{
|
||||||
|
fc_port_t *fcport = sdev->hostdata;
|
||||||
|
struct scsi_qla_host *vha = fcport->vha;
|
||||||
|
struct qla_hw_data *ha = vha->hw;
|
||||||
|
struct req_que *req = NULL;
|
||||||
|
|
||||||
|
req = vha->req;
|
||||||
|
if (!req)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (sdev->ordered_tags)
|
||||||
|
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
|
||||||
|
else
|
||||||
|
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
|
||||||
|
|
||||||
|
DEBUG2(qla_printk(KERN_INFO, ha,
|
||||||
|
"scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
|
||||||
|
fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
|
||||||
|
sdev->queue_depth));
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
|
qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
|
||||||
{
|
{
|
||||||
if (reason != SCSI_QDEPTH_DEFAULT)
|
switch (reason) {
|
||||||
return -EOPNOTSUPP;
|
case SCSI_QDEPTH_DEFAULT:
|
||||||
|
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
|
||||||
|
break;
|
||||||
|
case SCSI_QDEPTH_QFULL:
|
||||||
|
qla2x00_handle_queue_full(sdev, qdepth);
|
||||||
|
break;
|
||||||
|
case SCSI_QDEPTH_RAMP_UP:
|
||||||
|
qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
|
|
||||||
return sdev->queue_depth;
|
return sdev->queue_depth;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2003,13 +2036,13 @@ skip_dpc:
|
||||||
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
|
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
|
||||||
base_vha->host_no, ha));
|
base_vha->host_no, ha));
|
||||||
|
|
||||||
base_vha->flags.init_done = 1;
|
|
||||||
base_vha->flags.online = 1;
|
|
||||||
|
|
||||||
ret = scsi_add_host(host, &pdev->dev);
|
ret = scsi_add_host(host, &pdev->dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto probe_failed;
|
goto probe_failed;
|
||||||
|
|
||||||
|
base_vha->flags.init_done = 1;
|
||||||
|
base_vha->flags.online = 1;
|
||||||
|
|
||||||
ha->isp_ops->enable_intrs(ha);
|
ha->isp_ops->enable_intrs(ha);
|
||||||
|
|
||||||
scsi_scan_host(host);
|
scsi_scan_host(host);
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
/*
|
/*
|
||||||
* Driver version
|
* Driver version
|
||||||
*/
|
*/
|
||||||
#define QLA2XXX_VERSION "8.03.01-k7"
|
#define QLA2XXX_VERSION "8.03.01-k8"
|
||||||
|
|
||||||
#define QLA_DRIVER_MAJOR_VER 8
|
#define QLA_DRIVER_MAJOR_VER 8
|
||||||
#define QLA_DRIVER_MINOR_VER 3
|
#define QLA_DRIVER_MINOR_VER 3
|
||||||
|
|
|
@ -859,6 +859,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||||
case 0x07: /* operation in progress */
|
case 0x07: /* operation in progress */
|
||||||
case 0x08: /* Long write in progress */
|
case 0x08: /* Long write in progress */
|
||||||
case 0x09: /* self test in progress */
|
case 0x09: /* self test in progress */
|
||||||
|
case 0x14: /* space allocation in progress */
|
||||||
action = ACTION_DELAYED_RETRY;
|
action = ACTION_DELAYED_RETRY;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -649,11 +649,22 @@ static __init int fc_transport_init(void)
|
||||||
return error;
|
return error;
|
||||||
error = transport_class_register(&fc_vport_class);
|
error = transport_class_register(&fc_vport_class);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
goto unreg_host_class;
|
||||||
error = transport_class_register(&fc_rport_class);
|
error = transport_class_register(&fc_rport_class);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
goto unreg_vport_class;
|
||||||
return transport_class_register(&fc_transport_class);
|
error = transport_class_register(&fc_transport_class);
|
||||||
|
if (error)
|
||||||
|
goto unreg_rport_class;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
unreg_rport_class:
|
||||||
|
transport_class_unregister(&fc_rport_class);
|
||||||
|
unreg_vport_class:
|
||||||
|
transport_class_unregister(&fc_vport_class);
|
||||||
|
unreg_host_class:
|
||||||
|
transport_class_unregister(&fc_host_class);
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit fc_transport_exit(void)
|
static void __exit fc_transport_exit(void)
|
||||||
|
|
|
@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
|
||||||
return snprintf(buf, 20, "%u\n", sdkp->ATO);
|
return snprintf(buf, 20, "%u\n", sdkp->ATO);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||||
|
|
||||||
|
return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
|
||||||
|
}
|
||||||
|
|
||||||
static struct device_attribute sd_disk_attrs[] = {
|
static struct device_attribute sd_disk_attrs[] = {
|
||||||
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
|
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
|
||||||
sd_store_cache_type),
|
sd_store_cache_type),
|
||||||
|
@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
|
||||||
sd_store_manage_start_stop),
|
sd_store_manage_start_stop),
|
||||||
__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
|
__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
|
||||||
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
|
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
|
||||||
|
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
|
||||||
__ATTR_NULL,
|
__ATTR_NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -398,6 +408,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
|
||||||
scsi_set_prot_type(scmd, dif);
|
scsi_set_prot_type(scmd, dif);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* sd_prepare_discard - unmap blocks on thinly provisioned device
|
||||||
|
* @rq: Request to prepare
|
||||||
|
*
|
||||||
|
* Will issue either UNMAP or WRITE SAME(16) depending on preference
|
||||||
|
* indicated by target device.
|
||||||
|
**/
|
||||||
|
static int sd_prepare_discard(struct request *rq)
|
||||||
|
{
|
||||||
|
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
||||||
|
struct bio *bio = rq->bio;
|
||||||
|
sector_t sector = bio->bi_sector;
|
||||||
|
unsigned int num = bio_sectors(bio);
|
||||||
|
|
||||||
|
if (sdkp->device->sector_size == 4096) {
|
||||||
|
sector >>= 3;
|
||||||
|
num >>= 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||||
|
rq->timeout = SD_TIMEOUT;
|
||||||
|
|
||||||
|
memset(rq->cmd, 0, rq->cmd_len);
|
||||||
|
|
||||||
|
if (sdkp->unmap) {
|
||||||
|
char *buf = kmap_atomic(bio_page(bio), KM_USER0);
|
||||||
|
|
||||||
|
rq->cmd[0] = UNMAP;
|
||||||
|
rq->cmd[8] = 24;
|
||||||
|
rq->cmd_len = 10;
|
||||||
|
|
||||||
|
/* Ensure that data length matches payload */
|
||||||
|
rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
|
||||||
|
|
||||||
|
put_unaligned_be16(6 + 16, &buf[0]);
|
||||||
|
put_unaligned_be16(16, &buf[2]);
|
||||||
|
put_unaligned_be64(sector, &buf[8]);
|
||||||
|
put_unaligned_be32(num, &buf[16]);
|
||||||
|
|
||||||
|
kunmap_atomic(buf, KM_USER0);
|
||||||
|
} else {
|
||||||
|
rq->cmd[0] = WRITE_SAME_16;
|
||||||
|
rq->cmd[1] = 0x8; /* UNMAP */
|
||||||
|
put_unaligned_be64(sector, &rq->cmd[2]);
|
||||||
|
put_unaligned_be32(num, &rq->cmd[10]);
|
||||||
|
rq->cmd_len = 16;
|
||||||
|
}
|
||||||
|
|
||||||
|
return BLKPREP_OK;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sd_init_command - build a scsi (read or write) command from
|
* sd_init_command - build a scsi (read or write) command from
|
||||||
* information in the request structure.
|
* information in the request structure.
|
||||||
|
@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
|
||||||
int ret, host_dif;
|
int ret, host_dif;
|
||||||
unsigned char protect;
|
unsigned char protect;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Discard request come in as REQ_TYPE_FS but we turn them into
|
||||||
|
* block PC requests to make life easier.
|
||||||
|
*/
|
||||||
|
if (blk_discard_rq(rq))
|
||||||
|
ret = sd_prepare_discard(rq);
|
||||||
|
|
||||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||||
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
|
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
|
||||||
sd_printk(KERN_NOTICE, sdkp,
|
sd_printk(KERN_NOTICE, sdkp,
|
||||||
"physical block alignment offset: %u\n", alignment);
|
"physical block alignment offset: %u\n", alignment);
|
||||||
|
|
||||||
|
if (buffer[14] & 0x80) { /* TPE */
|
||||||
|
struct request_queue *q = sdp->request_queue;
|
||||||
|
|
||||||
|
sdkp->thin_provisioning = 1;
|
||||||
|
q->limits.discard_granularity = sdkp->hw_sector_size;
|
||||||
|
q->limits.max_discard_sectors = 0xffffffff;
|
||||||
|
|
||||||
|
if (buffer[14] & 0x40) /* TPRZ */
|
||||||
|
q->limits.discard_zeroes_data = 1;
|
||||||
|
|
||||||
|
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||||
|
}
|
||||||
|
|
||||||
sdkp->capacity = lba + 1;
|
sdkp->capacity = lba + 1;
|
||||||
return sector_size;
|
return sector_size;
|
||||||
}
|
}
|
||||||
|
@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
|
||||||
*/
|
*/
|
||||||
static void sd_read_block_limits(struct scsi_disk *sdkp)
|
static void sd_read_block_limits(struct scsi_disk *sdkp)
|
||||||
{
|
{
|
||||||
|
struct request_queue *q = sdkp->disk->queue;
|
||||||
unsigned int sector_sz = sdkp->device->sector_size;
|
unsigned int sector_sz = sdkp->device->sector_size;
|
||||||
char *buffer;
|
char *buffer;
|
||||||
|
|
||||||
|
@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
|
||||||
blk_queue_io_opt(sdkp->disk->queue,
|
blk_queue_io_opt(sdkp->disk->queue,
|
||||||
get_unaligned_be32(&buffer[12]) * sector_sz);
|
get_unaligned_be32(&buffer[12]) * sector_sz);
|
||||||
|
|
||||||
|
/* Thin provisioning enabled and page length indicates TP support */
|
||||||
|
if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
|
||||||
|
unsigned int lba_count, desc_count, granularity;
|
||||||
|
|
||||||
|
lba_count = get_unaligned_be32(&buffer[20]);
|
||||||
|
desc_count = get_unaligned_be32(&buffer[24]);
|
||||||
|
|
||||||
|
if (lba_count) {
|
||||||
|
q->limits.max_discard_sectors =
|
||||||
|
lba_count * sector_sz >> 9;
|
||||||
|
|
||||||
|
if (desc_count)
|
||||||
|
sdkp->unmap = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
granularity = get_unaligned_be32(&buffer[28]);
|
||||||
|
|
||||||
|
if (granularity)
|
||||||
|
q->limits.discard_granularity = granularity * sector_sz;
|
||||||
|
|
||||||
|
if (buffer[32] & 0x80)
|
||||||
|
q->limits.discard_alignment =
|
||||||
|
get_unaligned_be32(&buffer[32]) & ~(1 << 31);
|
||||||
|
}
|
||||||
|
|
||||||
kfree(buffer);
|
kfree(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,6 +60,8 @@ struct scsi_disk {
|
||||||
unsigned RCD : 1; /* state of disk RCD bit, unused */
|
unsigned RCD : 1; /* state of disk RCD bit, unused */
|
||||||
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
|
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
|
||||||
unsigned first_scan : 1;
|
unsigned first_scan : 1;
|
||||||
|
unsigned thin_provisioning : 1;
|
||||||
|
unsigned unmap : 1;
|
||||||
};
|
};
|
||||||
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
|
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
|
||||||
|
|
||||||
|
|
|
@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
|
||||||
SRpnt->waiting = waiting;
|
SRpnt->waiting = waiting;
|
||||||
|
|
||||||
if (STp->buffer->do_dio) {
|
if (STp->buffer->do_dio) {
|
||||||
|
mdata->page_order = 0;
|
||||||
mdata->nr_entries = STp->buffer->sg_segs;
|
mdata->nr_entries = STp->buffer->sg_segs;
|
||||||
mdata->pages = STp->buffer->mapped_pages;
|
mdata->pages = STp->buffer->mapped_pages;
|
||||||
} else {
|
} else {
|
||||||
|
mdata->page_order = STp->buffer->reserved_page_order;
|
||||||
mdata->nr_entries =
|
mdata->nr_entries =
|
||||||
DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
|
DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
|
||||||
STp->buffer->map_data.pages = STp->buffer->reserved_pages;
|
mdata->pages = STp->buffer->reserved_pages;
|
||||||
STp->buffer->map_data.offset = 0;
|
mdata->offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
|
memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
|
||||||
|
@ -3719,7 +3721,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
|
||||||
priority |= __GFP_ZERO;
|
priority |= __GFP_ZERO;
|
||||||
|
|
||||||
if (STbuffer->frp_segs) {
|
if (STbuffer->frp_segs) {
|
||||||
order = STbuffer->map_data.page_order;
|
order = STbuffer->reserved_page_order;
|
||||||
b_size = PAGE_SIZE << order;
|
b_size = PAGE_SIZE << order;
|
||||||
} else {
|
} else {
|
||||||
for (b_size = PAGE_SIZE, order = 0;
|
for (b_size = PAGE_SIZE, order = 0;
|
||||||
|
@ -3752,7 +3754,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
|
||||||
segs++;
|
segs++;
|
||||||
}
|
}
|
||||||
STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
|
STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
|
||||||
STbuffer->map_data.page_order = order;
|
STbuffer->reserved_page_order = order;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -3765,7 +3767,7 @@ static void clear_buffer(struct st_buffer * st_bp)
|
||||||
|
|
||||||
for (i=0; i < st_bp->frp_segs; i++)
|
for (i=0; i < st_bp->frp_segs; i++)
|
||||||
memset(page_address(st_bp->reserved_pages[i]), 0,
|
memset(page_address(st_bp->reserved_pages[i]), 0,
|
||||||
PAGE_SIZE << st_bp->map_data.page_order);
|
PAGE_SIZE << st_bp->reserved_page_order);
|
||||||
st_bp->cleared = 1;
|
st_bp->cleared = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3773,7 +3775,7 @@ static void clear_buffer(struct st_buffer * st_bp)
|
||||||
/* Release the extra buffer */
|
/* Release the extra buffer */
|
||||||
static void normalize_buffer(struct st_buffer * STbuffer)
|
static void normalize_buffer(struct st_buffer * STbuffer)
|
||||||
{
|
{
|
||||||
int i, order = STbuffer->map_data.page_order;
|
int i, order = STbuffer->reserved_page_order;
|
||||||
|
|
||||||
for (i = 0; i < STbuffer->frp_segs; i++) {
|
for (i = 0; i < STbuffer->frp_segs; i++) {
|
||||||
__free_pages(STbuffer->reserved_pages[i], order);
|
__free_pages(STbuffer->reserved_pages[i], order);
|
||||||
|
@ -3781,7 +3783,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
|
||||||
}
|
}
|
||||||
STbuffer->frp_segs = 0;
|
STbuffer->frp_segs = 0;
|
||||||
STbuffer->sg_segs = 0;
|
STbuffer->sg_segs = 0;
|
||||||
STbuffer->map_data.page_order = 0;
|
STbuffer->reserved_page_order = 0;
|
||||||
STbuffer->map_data.offset = 0;
|
STbuffer->map_data.offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3791,7 +3793,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
|
||||||
static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
|
static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
|
||||||
{
|
{
|
||||||
int i, cnt, res, offset;
|
int i, cnt, res, offset;
|
||||||
int length = PAGE_SIZE << st_bp->map_data.page_order;
|
int length = PAGE_SIZE << st_bp->reserved_page_order;
|
||||||
|
|
||||||
for (i = 0, offset = st_bp->buffer_bytes;
|
for (i = 0, offset = st_bp->buffer_bytes;
|
||||||
i < st_bp->frp_segs && offset >= length; i++)
|
i < st_bp->frp_segs && offset >= length; i++)
|
||||||
|
@ -3823,7 +3825,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
|
||||||
static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
|
static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
|
||||||
{
|
{
|
||||||
int i, cnt, res, offset;
|
int i, cnt, res, offset;
|
||||||
int length = PAGE_SIZE << st_bp->map_data.page_order;
|
int length = PAGE_SIZE << st_bp->reserved_page_order;
|
||||||
|
|
||||||
for (i = 0, offset = st_bp->read_pointer;
|
for (i = 0, offset = st_bp->read_pointer;
|
||||||
i < st_bp->frp_segs && offset >= length; i++)
|
i < st_bp->frp_segs && offset >= length; i++)
|
||||||
|
@ -3856,7 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
|
||||||
{
|
{
|
||||||
int src_seg, dst_seg, src_offset = 0, dst_offset;
|
int src_seg, dst_seg, src_offset = 0, dst_offset;
|
||||||
int count, total;
|
int count, total;
|
||||||
int length = PAGE_SIZE << st_bp->map_data.page_order;
|
int length = PAGE_SIZE << st_bp->reserved_page_order;
|
||||||
|
|
||||||
if (offset == 0)
|
if (offset == 0)
|
||||||
return;
|
return;
|
||||||
|
@ -4578,7 +4580,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
||||||
}
|
}
|
||||||
|
|
||||||
mdata->offset = uaddr & ~PAGE_MASK;
|
mdata->offset = uaddr & ~PAGE_MASK;
|
||||||
mdata->page_order = 0;
|
|
||||||
STbp->mapped_pages = pages;
|
STbp->mapped_pages = pages;
|
||||||
|
|
||||||
return nr_pages;
|
return nr_pages;
|
||||||
|
|
|
@ -46,6 +46,7 @@ struct st_buffer {
|
||||||
struct st_request *last_SRpnt;
|
struct st_request *last_SRpnt;
|
||||||
struct st_cmdstatus cmdstat;
|
struct st_cmdstatus cmdstat;
|
||||||
struct page **reserved_pages;
|
struct page **reserved_pages;
|
||||||
|
int reserved_page_order;
|
||||||
struct page **mapped_pages;
|
struct page **mapped_pages;
|
||||||
struct rq_map_data map_data;
|
struct rq_map_data map_data;
|
||||||
unsigned char *b_data;
|
unsigned char *b_data;
|
||||||
|
|
|
@ -42,6 +42,8 @@ enum enclosure_status {
|
||||||
ENCLOSURE_STATUS_NOT_INSTALLED,
|
ENCLOSURE_STATUS_NOT_INSTALLED,
|
||||||
ENCLOSURE_STATUS_UNKNOWN,
|
ENCLOSURE_STATUS_UNKNOWN,
|
||||||
ENCLOSURE_STATUS_UNAVAILABLE,
|
ENCLOSURE_STATUS_UNAVAILABLE,
|
||||||
|
/* last element for counting purposes */
|
||||||
|
ENCLOSURE_STATUS_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
/* SFF-8485 activity light settings */
|
/* SFF-8485 activity light settings */
|
||||||
|
|
|
@ -142,6 +142,7 @@ struct osd_request {
|
||||||
struct _osd_io_info {
|
struct _osd_io_info {
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
u64 total_bytes;
|
u64 total_bytes;
|
||||||
|
u64 residual;
|
||||||
struct request *req;
|
struct request *req;
|
||||||
struct _osd_req_data_segment *last_seg;
|
struct _osd_req_data_segment *last_seg;
|
||||||
u8 *pad_buff;
|
u8 *pad_buff;
|
||||||
|
@ -150,12 +151,14 @@ struct osd_request {
|
||||||
gfp_t alloc_flags;
|
gfp_t alloc_flags;
|
||||||
unsigned timeout;
|
unsigned timeout;
|
||||||
unsigned retries;
|
unsigned retries;
|
||||||
|
unsigned sense_len;
|
||||||
u8 sense[OSD_MAX_SENSE_LEN];
|
u8 sense[OSD_MAX_SENSE_LEN];
|
||||||
enum osd_attributes_mode attributes_mode;
|
enum osd_attributes_mode attributes_mode;
|
||||||
|
|
||||||
osd_req_done_fn *async_done;
|
osd_req_done_fn *async_done;
|
||||||
void *async_private;
|
void *async_private;
|
||||||
int async_error;
|
int async_error;
|
||||||
|
int req_errors;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool osd_req_is_ver1(struct osd_request *or)
|
static inline bool osd_req_is_ver1(struct osd_request *or)
|
||||||
|
@ -297,8 +300,6 @@ enum osd_err_priority {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct osd_sense_info {
|
struct osd_sense_info {
|
||||||
u64 out_resid; /* Zero on success otherwise out residual */
|
|
||||||
u64 in_resid; /* Zero on success otherwise in residual */
|
|
||||||
enum osd_err_priority osd_err_pri;
|
enum osd_err_priority osd_err_pri;
|
||||||
|
|
||||||
int key; /* one of enum scsi_sense_keys */
|
int key; /* one of enum scsi_sense_keys */
|
||||||
|
|
Loading…
Reference in a new issue