aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2021-09-30 14:21:56 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2021-09-30 14:21:56 +1000
commit027e6ded9178040c343fef61aeafb0db08d7853e (patch)
tree95eb086d45668616a5ae077e7283c4129d17468d
parent24923d6964288d548a09e8ee4bcab4b08e366f20 (diff)
parentb198c36ab6054018d578aedd5a126cc47de24570 (diff)
downloaddevel-027e6ded9178040c343fef61aeafb0db08d7853e.tar.gz
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
-rw-r--r--Documentation/ABI/testing/sysfs-class-fc27
-rw-r--r--drivers/scsi/dc395x.c1
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.c3
-rw-r--r--drivers/scsi/elx/libefc/efc.h2
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c7
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c2
-rw-r--r--drivers/scsi/elx/libefc/efclib.h1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c24
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c37
-rw-r--r--drivers/scsi/libiscsi.c3
-rw-r--r--drivers/scsi/libsas/sas_init.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c23
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c70
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c92
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c154
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c6
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c7
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c12
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c15
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h6
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c60
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/ufs/Kconfig9
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs-hwmon.c210
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c21
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h6
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c3
-rw-r--r--drivers/scsi/ufs/ufs.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c47
-rw-r--r--drivers/scsi/ufs/ufshcd.h20
-rw-r--r--drivers/scsi/ufs/ufshpb.c8
-rw-r--r--drivers/target/target_core_xcopy.c14
-rw-r--r--include/scsi/scsi_cmnd.h1
54 files changed, 1067 insertions, 278 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-fc b/Documentation/ABI/testing/sysfs-class-fc
new file mode 100644
index 00000000000000..3057a6d3b8cfc2
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-fc
@@ -0,0 +1,27 @@
+What: /sys/class/fc/fc_udev_device/appid_store
+Date: Aug 2021
+Contact: Muneendra Kumar <muneendra.kumar@broadconm.com>
+Description:
+ This interface allows an admin to set an FC application
+ identifier in the blkcg associated with a cgroup id. The
+ identifier is typically a UUID that is associated with
+ an application or logical entity such as a virtual
+ machine or container group. The application or logical
+ entity utilizes a block device via the cgroup id.
+ FC adapter drivers may query the identifier and tag FC
+ traffic based on the identifier. FC host and FC fabric
+ entities can utilize the application id and FC traffic
+ tag to identify traffic sources.
+
+ The interface expects a string "<cgroupid>:<appid>" where:
+ <cgroupid> is inode of the cgroup in hexadecimal
+ <appid> is user provided string upto 128 characters
+ in length.
+
+ If an appid_store is done for a cgroup id that already
+ has an appid set, the new value will override the
+ previous value.
+
+ If an admin wants to remove an FC application identifier
+ from a cgroup, an appid_store should be done with the
+ following string: "<cgroupid>:"
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 24c7cefb0b78a8..1c79e6c2716300 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4618,6 +4618,7 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* initialise the adapter and everything we need */
if (adapter_init(acb, io_port_base, io_port_len, irq)) {
dprintkl(KERN_INFO, "adapter init failed\n");
+ acb = NULL;
goto fail;
}
diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
index 40fb3a724c76d0..8535bb7eabd8e1 100644
--- a/drivers/scsi/elx/efct/efct_scsi.c
+++ b/drivers/scsi/elx/efct/efct_scsi.c
@@ -38,8 +38,6 @@ efct_scsi_io_alloc(struct efct_node *node)
xport = efct->xport;
- spin_lock_irqsave(&node->active_ios_lock, flags);
-
io = efct_io_pool_io_alloc(efct->xport->io_pool);
if (!io) {
efc_log_err(efct, "IO alloc Failed\n");
@@ -66,6 +64,7 @@ efct_scsi_io_alloc(struct efct_node *node)
/* Add to node's active_ios list */
INIT_LIST_HEAD(&io->list_entry);
+ spin_lock_irqsave(&node->active_ios_lock, flags);
list_add(&io->list_entry, &node->active_ios);
spin_unlock_irqrestore(&node->active_ios_lock, flags);
diff --git a/drivers/scsi/elx/libefc/efc.h b/drivers/scsi/elx/libefc/efc.h
index 927016283f4132..468ff3cc9c00a3 100644
--- a/drivers/scsi/elx/libefc/efc.h
+++ b/drivers/scsi/elx/libefc/efc.h
@@ -47,6 +47,6 @@ enum efc_scsi_del_target_reason {
#define nport_sm_trace(nport) \
efc_log_debug(nport->efc, \
- "[%s] %-20s\n", nport->display_name, efc_sm_event_name(evt)) \
+ "[%s] %-20s %-20s\n", nport->display_name, __func__, efc_sm_event_name(evt)) \
#endif /* __EFC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
index 37e6697d86b888..f8665d48904af4 100644
--- a/drivers/scsi/elx/libefc/efc_cmds.c
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -249,6 +249,7 @@ efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
{
struct efc_nport *nport = arg;
+ nport->attaching = false;
if (efc_nport_get_mbox_status(nport, mqe, status)) {
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
return -EIO;
@@ -286,6 +287,8 @@ efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
if (rc) {
efc_log_err(efc, "REG_VPI command failure\n");
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ } else {
+ nport->attaching = true;
}
return rc;
@@ -302,8 +305,10 @@ efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
/* Issue the UNREG_VPI command to free the assigned VPI context */
if (nport->attached)
efc_nport_free_unreg_vpi(nport);
- else
+ else if (nport->attaching)
nport->free_req_pending = true;
+ else
+ efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL);
return 0;
}
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
index 3270ce40196c6f..9661eea93aa183 100644
--- a/drivers/scsi/elx/libefc/efc_fabric.c
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -685,7 +685,7 @@ efc_process_gidpt_payload(struct efc_node *node,
}
/* Allocate a buffer for all nodes */
- active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC);
+ active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC);
if (!active_nodes) {
node_printf(node, "efc_malloc failed\n");
return -EIO;
diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h
index ee291cabf7e056..dde20891c2dd71 100644
--- a/drivers/scsi/elx/libefc/efclib.h
+++ b/drivers/scsi/elx/libefc/efclib.h
@@ -142,6 +142,7 @@ struct efc_nport {
bool is_vport;
bool free_req_pending;
bool attached;
+ bool attaching;
bool p2p_winner;
struct efc_domain *domain;
u64 wwpn;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 436d174f2194bc..57be32ba0109f4 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -35,7 +35,7 @@
#define HISI_SAS_QUEUE_SLOTS 4096
#define HISI_SAS_MAX_ITCT_ENTRIES 1024
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
-#define HISI_SAS_RESET_BIT 0
+#define HISI_SAS_RESETTING_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
#define HISI_SAS_PM_BIT 2
#define HISI_SAS_HW_FAULT_BIT 3
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 9515c45affa5ef..bb1c8ef3a76fe9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -724,7 +724,7 @@ static int hisi_sas_init_device(struct domain_device *device)
*/
local_phy = sas_get_local_phy(device);
if (!scsi_is_sas_phy_local(local_phy) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
unsigned long deadline = ata_deadline(jiffies, 20000);
struct sata_device *sata_dev = &device->sata_dev;
struct ata_host *ata_host = sata_dev->ata_host;
@@ -1072,7 +1072,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
sas_dev->device_id, sas_dev->dev_type);
down(&hisi_hba->sem);
- if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0, true);
@@ -1171,7 +1171,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
static void hisi_sas_task_done(struct sas_task *task)
{
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -1229,7 +1229,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
dev_err(dev, "abort tmf: executing internal task failed: %d\n",
res);
goto ex_err;
@@ -1554,8 +1554,7 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
- if (timer_pending(&hisi_hba->timer))
- del_timer_sync(&hisi_hba->timer);
+ del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
}
@@ -1576,7 +1575,7 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
hisi_sas_reset_init_all_devices(hisi_hba);
up(&hisi_hba->sem);
scsi_unblock_requests(shost);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
@@ -1587,7 +1586,7 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
if (!hisi_hba->hw->soft_reset)
return -1;
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
@@ -1611,7 +1610,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
up(&hisi_hba->sem);
scsi_unblock_requests(shost);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return rc;
}
@@ -2097,7 +2096,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
task, abort_flag, tag, dq);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
res);
goto exit;
@@ -2251,7 +2250,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
} else {
struct hisi_sas_port *port = phy->port;
- if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
+ if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
phy->in_reset) {
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
return;
@@ -2769,8 +2768,7 @@ int hisi_sas_remove(struct platform_device *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- if (timer_pending(&hisi_hba->timer))
- del_timer(&hisi_hba->timer);
+ del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index afe639994f3dd9..862f4e8b7eb58e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1422,7 +1422,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
- if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b0b2361e63fef5..236cf65c2f97da 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2368,18 +2368,18 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case STAT_IO_COMPLETE:
/* internal abort command complete */
ts->stat = TMF_RESP_FUNC_SUCC;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NO_DEVICE:
ts->stat = TMF_RESP_FUNC_COMPLETE;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NOT_VALID:
/* abort single io, controller don't find
* the io need to abort
*/
ts->stat = TMF_RESP_FUNC_FAILED;
- del_timer(&slot->internal_abort_timer);
+ del_timer_sync(&slot->internal_abort_timer);
goto out;
default:
break;
@@ -2824,7 +2824,7 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 27884f3106ab85..c83076461a97ca 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -519,6 +519,8 @@ struct hisi_sas_err_record_v3 {
#define CHNL_INT_STS_INT2_MSK BIT(3)
#define CHNL_WIDTH 4
+#define BAR_NO_V3_HW 5
+
enum {
DSM_FUNC_ERR_HANDLE_MSI = 0,
};
@@ -1616,7 +1618,7 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -3687,7 +3689,6 @@ static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
do_div(timestamp, NSEC_PER_MSEC);
hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
- hisi_hba->debugfs_dump_index++;
debugfs_snapshot_prepare_v3_hw(hisi_hba);
@@ -3703,6 +3704,7 @@ static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
debugfs_create_files_v3_hw(hisi_hba);
debugfs_snapshot_restore_v3_hw(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
}
static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
@@ -4677,15 +4679,15 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct sas_ha_struct *sha;
int rc, phy_nr, port_nr, i;
- rc = pci_enable_device(pdev);
+ rc = pcim_enable_device(pdev);
if (rc)
goto err_out;
pci_set_master(pdev);
- rc = pci_request_regions(pdev, DRV_NAME);
+ rc = pcim_iomap_regions(pdev, 1 << BAR_NO_V3_HW, DRV_NAME);
if (rc)
- goto err_out_disable_device;
+ goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
@@ -4693,20 +4695,20 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;
- goto err_out_regions;
+ goto err_out;
}
shost = hisi_sas_shost_alloc_pci(pdev);
if (!shost) {
rc = -ENOMEM;
- goto err_out_regions;
+ goto err_out;
}
sha = SHOST_TO_SAS_HA(shost);
hisi_hba = shost_priv(shost);
dev_set_drvdata(dev, sha);
- hisi_hba->regs = pcim_iomap(pdev, 5, 0);
+ hisi_hba->regs = pcim_iomap_table(pdev)[BAR_NO_V3_HW];
if (!hisi_hba->regs) {
dev_err(dev, "cannot map register\n");
rc = -ENOMEM;
@@ -4761,7 +4763,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = interrupt_preinit_v3_hw(hisi_hba);
if (rc)
goto err_out_debugfs;
- dev_err(dev, "%d hw queues\n", shost->nr_hw_queues);
+
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_free_irq_vectors;
@@ -4800,10 +4802,6 @@ err_out_debugfs:
err_out_ha:
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
-err_out_regions:
- pci_release_regions(pdev);
-err_out_disable_device:
- pci_disable_device(pdev);
err_out:
return rc;
}
@@ -4833,16 +4831,13 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct Scsi_Host *shost = sha->core.shost;
pm_runtime_get_noresume(dev);
- if (timer_pending(&hisi_hba->timer))
- del_timer(&hisi_hba->timer);
+ del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
flush_workqueue(hisi_hba->wq);
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
debugfs_exit_v3_hw(hisi_hba);
scsi_host_put(shost);
@@ -4856,7 +4851,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
int rc;
dev_info(dev, "FLR prepare\n");
- set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
rc = disable_host_v3_hw(hisi_hba);
@@ -4902,7 +4897,7 @@ static int _suspend_v3_hw(struct device *device)
return -ENODEV;
}
- if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
scsi_block_requests(shost);
@@ -4913,7 +4908,7 @@ static int _suspend_v3_hw(struct device *device)
if (rc) {
dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
scsi_unblock_requests(shost);
return rc;
}
@@ -4952,7 +4947,7 @@ static int _resume_v3_hw(struct device *device)
}
phys_init_v3_hw(hisi_hba);
sas_resume_ha(sha);
- clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return 0;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4683c183e9d411..712a453683859f 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2947,6 +2947,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->tmf_state = TMF_INITIAL;
timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
mutex_init(&session->eh_mutex);
+ init_waitqueue_head(&session->ehwait);
spin_lock_init(&session->frwd_lock);
spin_lock_init(&session->back_lock);
@@ -3074,8 +3075,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
- init_waitqueue_head(&session->ehwait);
-
return cls_conn;
login_task_data_alloc_fail:
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 80592f53017abf..37cc92837fdfd6 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -147,6 +147,7 @@ Undo_phys:
return error;
}
+EXPORT_SYMBOL_GPL(sas_register_ha);
static void sas_disable_events(struct sas_ha_struct *sas_ha)
{
@@ -176,6 +177,7 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
return 0;
}
+EXPORT_SYMBOL_GPL(sas_unregister_ha);
static int sas_get_linkerrors(struct sas_phy *phy)
{
@@ -313,6 +315,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
}
return ret;
}
+EXPORT_SYMBOL_GPL(sas_phy_reset);
int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates)
@@ -659,5 +662,3 @@ MODULE_LICENSE("GPL v2");
module_init(sas_class_init);
module_exit(sas_class_exit);
-EXPORT_SYMBOL_GPL(sas_register_ha);
-EXPORT_SYMBOL_GPL(sas_unregister_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 08ffb87882904d..2bf37151623ec4 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -201,6 +201,7 @@ out_done:
cmd->scsi_done(cmd);
return 0;
}
+EXPORT_SYMBOL_GPL(sas_queuecommand);
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
@@ -511,6 +512,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
return FAILED;
}
+EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
@@ -532,6 +534,7 @@ int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
return FAILED;
}
+EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
/* Try to reset a device */
static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
@@ -790,6 +793,7 @@ int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(sas_ioctl);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
{
@@ -832,6 +836,7 @@ int sas_target_alloc(struct scsi_target *starget)
starget->hostdata = found_dev;
return 0;
}
+EXPORT_SYMBOL_GPL(sas_target_alloc);
#define SAS_DEF_QD 256
@@ -860,6 +865,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
return 0;
}
+EXPORT_SYMBOL_GPL(sas_slave_configure);
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
{
@@ -872,6 +878,7 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
depth = 1;
return scsi_change_queue_depth(sdev, depth);
}
+EXPORT_SYMBOL_GPL(sas_change_queue_depth);
int sas_bios_param(struct scsi_device *scsi_dev,
struct block_device *bdev,
@@ -884,6 +891,7 @@ int sas_bios_param(struct scsi_device *scsi_dev,
return 0;
}
+EXPORT_SYMBOL_GPL(sas_bios_param);
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
@@ -910,6 +918,7 @@ void sas_task_abort(struct sas_task *task)
else
blk_abort_request(scsi_cmd_to_rq(sc));
}
+EXPORT_SYMBOL_GPL(sas_task_abort);
int sas_slave_alloc(struct scsi_device *sdev)
{
@@ -918,6 +927,7 @@ int sas_slave_alloc(struct scsi_device *sdev)
return 0;
}
+EXPORT_SYMBOL_GPL(sas_slave_alloc);
void sas_target_destroy(struct scsi_target *starget)
{
@@ -929,6 +939,7 @@ void sas_target_destroy(struct scsi_target *starget)
starget->hostdata = NULL;
sas_put_device(found_dev);
}
+EXPORT_SYMBOL_GPL(sas_target_destroy);
#define SAS_STRING_ADDR_SIZE 16
@@ -956,15 +967,3 @@ out:
}
EXPORT_SYMBOL_GPL(sas_request_addr);
-EXPORT_SYMBOL_GPL(sas_queuecommand);
-EXPORT_SYMBOL_GPL(sas_target_alloc);
-EXPORT_SYMBOL_GPL(sas_slave_configure);
-EXPORT_SYMBOL_GPL(sas_change_queue_depth);
-EXPORT_SYMBOL_GPL(sas_bios_param);
-EXPORT_SYMBOL_GPL(sas_task_abort);
-EXPORT_SYMBOL_GPL(sas_phy_reset);
-EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
-EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
-EXPORT_SYMBOL_GPL(sas_slave_alloc);
-EXPORT_SYMBOL_GPL(sas_target_destroy);
-EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 337e6ed2482181..2f8e6d0a926fea 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1029,6 +1029,7 @@ struct lpfc_hba {
* Firmware supports Forced Link Speed
* capability
*/
+#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 052c0e5b11195f..de38f4b886ca98 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1059,9 +1059,10 @@ stop_rr_fcf_flogi:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
"0150 FLOGI failure Status:x%x/x%x "
- "xri x%x TMO:x%x\n",
+ "xri x%x TMO:x%x refcnt %d\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout);
+ cmdiocb->sli4_xritag, irsp->ulpTimeout,
+ kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
@@ -1122,12 +1123,12 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0101 FLOGI completes successfully, I/O tag:x%x, "
- "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x\n",
+ "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
vport->port_state, vport->fc_flag,
- sp->cmn.priority_tagging);
+ sp->cmn.priority_tagging, kref_read(&ndlp->kref));
if (sp->cmn.priority_tagging)
vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA;
@@ -1205,8 +1206,6 @@ flogifail:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
- if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
- lpfc_nlp_put(ndlp);
if (!lpfc_error_lost_link(irsp)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -2330,6 +2329,13 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+ /*
+ * For P2P topology, retain the node so that PLOGI can be
+ * attempted on it again.
+ */
+ if (vport->fc_flag & FC_PT2PT)
+ goto out;
+
/* As long as this node is not registered with the SCSI
* or NVMe transport and no other PRLIs are outstanding,
* it is no longer an active node. Otherwise devloss
@@ -5296,6 +5302,7 @@ out:
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
(vport && vport->port_type == LPFC_NPIV_PORT) &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
spin_lock_irq(&ndlp->lock);
@@ -5599,11 +5606,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
}
/* The NPIV instance is rejecting this unsolicited ELS. Make sure the
- * node's assigned RPI needs to be released as this node will get
- * freed.
+ * node's assigned RPI gets released provided this node is not already
+ * registered with the transport.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
- vport->port_type == LPFC_NPIV_PORT) {
+ vport->port_type == LPFC_NPIV_PORT &&
+ !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_RELEASE_RPI;
spin_unlock_irq(&ndlp->lock);
@@ -6216,6 +6224,7 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
* from backend
*/
lpfc_nlp_unreg_node(vport, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
continue;
}
@@ -11385,6 +11394,7 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ struct lpfc_nodelist *ndlp = NULL;
unsigned long iflag = 0;
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
@@ -11392,7 +11402,20 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
lpfc_nlp_put(sglq_entry->ndlp);
+ ndlp = sglq_entry->ndlp;
sglq_entry->ndlp = NULL;
+
+ /* If the xri on the abts_els_sgl list is for the Fport
+ * node and the vport is unloading, the xri aborted wcqe
+ * likely isn't coming back. Just release the sgl.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ ndlp->nlp_DID == Fabric_DID) {
+ list_del(&sglq_entry->list);
+ sglq_entry->state = SGL_FREED;
+ list_add_tail(&sglq_entry->list,
+ &phba->sli4_hba.lpfc_els_sgl_list);
+ }
}
}
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7195ca0275f93e..0b1e1cc00e0197 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -966,8 +966,20 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
struct lpfc_nodelist *ndlp, *next_ndlp;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* It's possible the FLOGI to the fabric node never
+ * successfully completed and never registered with the
+ * transport. In this case there is no way to clean up
+ * the node.
+ */
+ if (ndlp->nlp_DID == Fabric_DID) {
+ if (ndlp->nlp_prev_state ==
+ NLP_STE_UNUSED_NODE &&
+ !ndlp->fc4_xpt_flags)
+ lpfc_nlp_put(ndlp);
+ }
continue;
+ }
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
((vport->port_type == LPFC_NPIV_PORT) &&
@@ -4449,8 +4461,9 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 %s rport x%px DID x%x, role x%x\n",
- __func__, rport, rport->port_id, rport->roles);
+ "3183 %s rport x%px DID x%x, role x%x refcnt %d\n",
+ __func__, rport, rport->port_id, rport->roles,
+ kref_read(&ndlp->kref));
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
@@ -4475,8 +4488,9 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3184 rport unregister x%06x, rport x%px "
- "xptflg x%x\n",
- ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags);
+ "xptflg x%x refcnt %d\n",
+ ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
fc_remote_port_delete(rport);
lpfc_nlp_put(ndlp);
@@ -4679,8 +4693,11 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Reg/Unreg for FCP and NVME Transport interface */
if ((old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
- /* For nodes marked for ADISC, Handle unreg in ADISC cmpl */
- if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+ /* For nodes marked for ADISC, Handle unreg in ADISC cmpl
+ * if linkup. In linkdown do unreg_node
+ */
+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC) ||
+ !lpfc_is_link_up(vport->phba))
lpfc_nlp_unreg_node(vport, ndlp);
}
@@ -5233,6 +5250,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 195169badb3721..8d5537ec0f30d5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1606,6 +1606,11 @@ void
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
+ if (phba->link_state == LPFC_HBA_ERROR &&
+ phba->hba_flag & HBA_PCI_ERR) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
phba->link_state = LPFC_HBA_ERROR;
spin_unlock_irq(&phba->hbalock);
@@ -1945,7 +1950,6 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (pci_channel_offline(phba->pcidev)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
- lpfc_sli4_offline_eratt(phba);
return;
}
@@ -3643,6 +3647,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
+ int offline = 0;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -3651,6 +3656,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_linkdown(phba);
+ offline = pci_channel_offline(phba->pcidev);
+
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
@@ -3673,7 +3680,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
- lpfc_unreg_rpi(vports[i], ndlp);
+ if (offline) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~(NLP_UNREG_INP |
+ NLP_RPI_REGISTERED);
+ spin_unlock_irq(&ndlp->lock);
+ } else {
+ lpfc_unreg_rpi(vports[i], ndlp);
+ }
/*
* Whenever an SLI4 port goes offline, free the
* RPI. Get a new RPI when the adapter port
@@ -5862,7 +5876,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
uint32_t io_cnt;
uint32_t head, tail;
uint32_t busy, max_read;
- uint64_t total, rcv, lat, mbpi;
+ uint64_t total, rcv, lat, mbpi, extra;
int timer_interval = LPFC_CMF_INTERVAL;
uint32_t ms;
struct lpfc_cgn_stat *cgs;
@@ -5929,7 +5943,19 @@ lpfc_cmf_timer(struct hrtimer *timer)
phba->hba_flag & HBA_SETUP) {
mbpi = phba->cmf_last_sync_bw;
phba->cmf_last_sync_bw = 0;
- lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
+ extra = 0;
+
+ /* Calculate any extra bytes needed to account for the
+ * timer accuracy. If we are less than LPFC_CMF_INTERVAL
+ * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
+ * add an extra 2%. The goal is to equalize total with a
+ * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
+ */
+ if (ms == LPFC_CMF_INTERVAL)
+ extra = div_u64(total, 50);
+ else if (ms < LPFC_CMF_INTERVAL)
+ extra = div_u64(total, 33);
+ lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
} else {
/* For Monitor mode or link down we want mbpi
* to be the full link speed
@@ -13368,8 +13394,6 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
atomic_set(&phba->cgn_sync_alarm_cnt, 0);
atomic_set(&phba->cgn_sync_warn_cnt, 0);
- atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
- atomic64_set(&phba->cgn_acqe_stat.warn, 0);
atomic_set(&phba->cgn_driver_evt_cnt, 0);
atomic_set(&phba->cgn_latency_evt_cnt, 0);
atomic64_set(&phba->cgn_latency_evt, 0);
@@ -14080,6 +14104,10 @@ lpfc_pci_resume_one_s3(struct device *dev_d)
return error;
}
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -15033,14 +15061,17 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
lpfc_sli4_prep_dev_for_recover(phba);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ phba->hba_flag |= HBA_PCI_ERR;
/* Fatal error, prepare for slot reset */
lpfc_sli4_prep_dev_for_reset(phba);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
+ phba->hba_flag |= HBA_PCI_ERR;
/* Permanent failure, prepare for device down */
lpfc_sli4_prep_dev_for_perm_failure(phba);
return PCI_ERS_RESULT_DISCONNECT;
default:
+ phba->hba_flag |= HBA_PCI_ERR;
/* Unknown state, prepare and request slot reset */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
@@ -15084,6 +15115,7 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
pci_restore_state(pdev);
+ phba->hba_flag &= ~HBA_PCI_ERR;
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
@@ -15106,6 +15138,7 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
} else
phba->intr_mode = intr_mode;
+ lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
/* Log the current active interrupt mode */
lpfc_log_intr_mode(phba, phba->intr_mode);
@@ -15307,6 +15340,10 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+ if (phba->link_state == LPFC_HBA_ERROR &&
+ phba->hba_flag & HBA_IOQ_FLUSH)
+ return PCI_ERS_RESULT_NEED_RESET;
+
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
rc = lpfc_io_error_detected_s3(pdev, state);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 479b3eed620857..9601edd838e105 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -209,8 +209,9 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport x%px\n",
- remoteport);
+ "6146 remoteport delete of remoteport x%px, ndlp x%px "
+ "DID x%x xflags x%x\n",
+ remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
spin_lock_irq(&ndlp->lock);
/* The register rebind might have occurred before the delete
@@ -936,6 +937,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
#endif
+ int offline = 0;
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd) {
@@ -1097,11 +1099,12 @@ out_err:
nCmd->transferred_length = 0;
nCmd->rcv_rsplen = 0;
nCmd->status = NVME_SC_INTERNAL;
+ offline = pci_channel_offline(vport->phba->pcidev);
}
}
/* pick up SLI4 exhange busy condition */
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
@@ -1296,7 +1299,6 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
struct sli4_sge *first_data_sgl;
struct ulp_bde64 *bde;
dma_addr_t physaddr = 0;
- uint32_t num_bde = 0;
uint32_t dma_len = 0;
uint32_t dma_offset = 0;
int nseg, i, j;
@@ -1350,7 +1352,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
}
sgl->word2 = 0;
- if ((num_bde + 1) == nseg) {
+ if (nseg == 1) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
bf_set(lpfc_sli4_sge_type, sgl,
LPFC_SGE_TYPE_DATA);
@@ -1419,8 +1421,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
j++;
}
- if (phba->cfg_enable_pbde) {
- /* Use PBDE support for first SGL only, offset == 0 */
+
+ /* PBDE support for first data SGE only */
+ if (nseg == 1 && phba->cfg_enable_pbde) {
/* Words 13-15 */
bde = (struct ulp_bde64 *)
&wqe->words[13];
@@ -1431,11 +1434,11 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
- /* Word 11 */
+ /* Word 11 - set PBDE bit */
bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
} else {
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
- bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
+ /* Word 11 - PBDE bit disabled by default template */
}
} else {
@@ -2166,6 +2169,10 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
+ if (!vport || !vport->localport ||
+ !qp || !qp->io_wq)
+ return;
+
pring = qp->io_wq->pring;
if (!pring)
continue;
@@ -2173,6 +2180,10 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_scsi += qp->abts_scsi_io_bufs;
abts_nvme += qp->abts_nvme_io_bufs;
}
+ if (!vport || !vport->localport ||
+ vport->phba->hba_flag & HBA_PCI_ERR)
+ return;
+
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6176 Lport x%px Localport x%px wait "
"timed out. Pending %d [%d:%d]. "
@@ -2212,6 +2223,8 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
return;
localport = vport->localport;
+ if (!localport)
+ return;
lport = (struct lpfc_nvme_lport *)localport->private;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
@@ -2528,7 +2541,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING ||
+ unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
@@ -2557,6 +2571,42 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
/**
+ * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @lpfc_ncmd: The nvme job structure for the request being aborted.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * NVME aborted xri. Aborted NVME IO commands are completed to the transport
+ * here.
+ **/
+void
+lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_ncmd)
+{
+ struct nvmefc_fcp_req *nvme_cmd = NULL;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6533 %s nvme_cmd %p tag x%x abort complete and "
+ "xri released\n", __func__,
+ lpfc_ncmd->nvmeCmd,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->transferred_length = 0;
+ nvme_cmd->rcv_rsplen = 0;
+ nvme_cmd->status = NVME_SC_INTERNAL;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+}
+
+/**
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 6e3dd0b9bcfa96..731802527b812b 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -2708,7 +2708,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
struct ulp_bde64 *bde;
dma_addr_t physaddr;
int i, cnt, nsegs;
- int do_pbde;
+ bool use_pbde = false;
int xc = 1;
if (!lpfc_is_link_up(phba)) {
@@ -2816,9 +2816,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!xc)
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
- /* Word 11 - set sup, irsp, irsplen later */
- do_pbde = 0;
-
/* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
@@ -2896,12 +2893,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!xc)
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
- /* Word 11 - set pbde later */
- if (phba->cfg_enable_pbde) {
- do_pbde = 1;
+ /* Word 11 - check for pbde */
+ if (nsegs == 1 && phba->cfg_enable_pbde) {
+ use_pbde = true;
+ /* Word 11 - PBDE bit already preset by template */
} else {
+ /* Overwrite default template setting */
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
- do_pbde = 0;
}
/* Word 12 */
@@ -2972,7 +2970,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
((rsp->rsplen >> 2) - 1));
memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
}
- do_pbde = 0;
/* Word 12 */
wqe->fcp_trsp.rsvd_12_15[0] = 0;
@@ -3007,23 +3004,24 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = cpu_to_le32(cnt);
- if (i == 0) {
- bde = (struct ulp_bde64 *)&wqe->words[13];
- if (do_pbde) {
- /* Words 13-15 (PBDE) */
- bde->addrLow = sgl->addr_lo;
- bde->addrHigh = sgl->addr_hi;
- bde->tus.f.bdeSize =
- le32_to_cpu(sgl->sge_len);
- bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bde->tus.w = cpu_to_le32(bde->tus.w);
- } else {
- memset(bde, 0, sizeof(struct ulp_bde64));
- }
- }
sgl++;
ctxp->offset += cnt;
}
+
+ bde = (struct ulp_bde64 *)&wqe->words[13];
+ if (use_pbde) {
+ /* decrement sgl ptr backwards once to first data sge */
+ sgl--;
+
+ /* Words 13-15 (PBDE) */
+ bde->addrLow = sgl->addr_lo;
+ bde->addrHigh = sgl->addr_hi;
+ bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ } else {
+ memset(bde, 0, sizeof(struct ulp_bde64));
+ }
ctxp->state = LPFC_NVME_STE_DATA;
ctxp->entry_cnt++;
return nvmewqe;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index befdf864c43bdc..27ea8f552ede9b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -493,8 +493,8 @@ void
lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx)
{
- uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ u16 xri = 0;
+ u16 rxid = 0;
struct lpfc_io_buf *psb, *next_psb;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
@@ -504,15 +504,22 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
int rrq_empty = 0;
struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
struct scsi_cmnd *cmd;
+ int offline = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
-
+ offline = pci_channel_offline(phba->pcidev);
+ if (!offline) {
+ xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ }
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
+ if (offline)
+ xri = psb->cur_iocbq.sli4_xritag;
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del_init(&psb->list);
psb->flags &= ~LPFC_SBUF_XBUSY;
@@ -521,8 +528,15 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
- return;
+ if (!offline) {
+ lpfc_sli4_nvme_xri_aborted(phba, axri,
+ psb);
+ return;
+ }
+ lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&qp->abts_io_buf_list_lock);
+ continue;
}
qp->abts_scsi_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
@@ -534,13 +548,13 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (ndlp) {
+ if (ndlp && !offline) {
lpfc_set_rrq_active(phba, ndlp,
psb->cur_iocbq.sli4_lxritag, rxid, 1);
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
}
- if (phba->cfg_fcp_wait_abts_rsp) {
+ if (phba->cfg_fcp_wait_abts_rsp || offline) {
spin_lock_irqsave(&psb->buf_lock, iflag);
cmd = psb->pCmd;
psb->pCmd = NULL;
@@ -567,25 +581,30 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
lpfc_release_scsi_buf_s4(phba, psb);
if (rrq_empty)
lpfc_worker_wake_up(phba);
- return;
+ if (!offline)
+ return;
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&qp->abts_io_buf_list_lock);
+ continue;
}
}
spin_unlock(&qp->abts_io_buf_list_lock);
- for (i = 1; i <= phba->sli.last_iotag; i++) {
- iocbq = phba->sli.iocbq_lookup[i];
-
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
- continue;
- if (iocbq->sli4_xritag != xri)
- continue;
- psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->flags &= ~LPFC_SBUF_XBUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (!list_empty(&pring->txq))
- lpfc_worker_wake_up(phba);
- return;
+ if (!offline) {
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ continue;
+ if (iocbq->sli4_xritag != xri)
+ continue;
+ psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
+ psb->flags &= ~LPFC_SBUF_XBUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (!list_empty(&pring->txq))
+ lpfc_worker_wake_up(phba);
+ return;
+ }
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
@@ -3215,7 +3234,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
struct lpfc_vport *vport = phba->pport;
union lpfc_wqe128 *wqe = &pwqeq->wqe;
dma_addr_t physaddr;
- uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
int nseg, i, j;
@@ -3277,7 +3295,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
j = 2;
for (i = 0; i < nseg; i++) {
sgl->word2 = 0;
- if ((num_bde + 1) == nseg) {
+ if (nseg == 1) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
bf_set(lpfc_sli4_sge_type, sgl,
LPFC_SGE_TYPE_DATA);
@@ -3346,13 +3364,15 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
j++;
}
- /*
- * Setup the first Payload BDE. For FCoE we just key off
- * Performance Hints, for FC we use lpfc_enable_pbde.
- * We populate words 13-15 of IOCB/WQE.
+
+ /* PBDE support for first data SGE only.
+ * For FCoE, we key off Performance Hints.
+ * For FC, we key off lpfc_enable_pbde.
*/
- if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
- phba->cfg_enable_pbde) {
+ if (nseg == 1 &&
+ ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
+ phba->cfg_enable_pbde)) {
+ /* Words 13-15 */
bde = (struct ulp_bde64 *)
&wqe->words[13];
bde->addrLow = first_data_sgl->addr_lo;
@@ -3362,12 +3382,15 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bde->tus.w = cpu_to_le32(bde->tus.w);
+ /* Word 11 - set PBDE bit */
+ bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
} else {
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
+ /* Word 11 - PBDE bit disabled by default template */
}
} else {
sgl += 1;
- /* clear the last flag in the fcp_rsp map entry */
+ /* set the last flag in the fcp_rsp map entry */
sgl->word2 = le32_to_cpu(sgl->word2);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
@@ -3380,10 +3403,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
}
}
- /* Word 11 */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
-
/*
* Finish initializing those IOCB fields that are dependent on the
* scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
@@ -3941,7 +3960,8 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
int cpu;
/* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
- if (phba->cmf_active_mode == LPFC_CFG_MANAGED) {
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
+ phba->cmf_max_bytes_per_interval) {
total = 0;
for_each_present_cpu(cpu) {
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 78ce38d7251c59..134ad0f8779d3d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1404,7 +1404,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
}
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
- (sglq->state != SGL_XRI_ABORTED)) {
+ (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
+ sglq->state != SGL_XRI_ABORTED) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
@@ -4583,10 +4584,12 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
lpfc_sli_cancel_iocbs(phba, &txq,
IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
- /* Flush the txcmpq */
+ /* Flush the txcmplq */
lpfc_sli_cancel_iocbs(phba, &txcmplq,
IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ lpfc_sli4_io_xri_aborted(phba, NULL, 0);
}
} else {
pring = &psli->sli3_ring[LPFC_FCP_RING];
@@ -7761,8 +7764,6 @@ lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Zero out Congestion Signal ACQE counter */
phba->cgn_acqe_cnt = 0;
- atomic64_set(&phba->cgn_acqe_stat.warn, 0);
- atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
&pmb->u.mqe.un.set_feature);
@@ -8014,6 +8015,10 @@ lpfc_cmf_setup(struct lpfc_hba *phba)
/* initialize congestion buffer info */
lpfc_init_congestion_buf(phba);
lpfc_init_congestion_stat(phba);
+
+ /* Zero out Congestion Signal counters */
+ atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+ atomic64_set(&phba->cgn_acqe_stat.warn, 0);
}
rc = lpfc_sli4_cgn_params_read(phba);
@@ -8153,6 +8158,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
struct lpfc_rqb *rqbp;
+ u32 flg;
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
@@ -8166,7 +8172,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
else {
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+ flg = phba->sli.sli_flag;
spin_unlock_irq(&phba->hbalock);
+ /* Allow a little time after setting SLI_ACTIVE for any polled
+ * MBX commands to complete via BSG.
+ */
+ for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
+ msleep(20);
+ spin_lock_irq(&phba->hbalock);
+ flg = phba->sli.sli_flag;
+ spin_unlock_irq(&phba->hbalock);
+ }
}
lpfc_sli4_dip(phba);
@@ -9750,7 +9766,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
"(%d):2541 Mailbox command x%x "
"(x%x/x%x) failure: "
"mqe_sta: x%x mcqe_sta: x%x/x%x "
- "Data: x%x x%x\n,",
+ "Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
@@ -9784,7 +9800,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
"(%d):2597 Sync Mailbox command "
"x%x (x%x/x%x) failure: "
"mqe_sta: x%x mcqe_sta: x%x/x%x "
- "Data: x%x x%x\n,",
+ "Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
@@ -12485,15 +12501,54 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
+ * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
+ * @iocbq: Pointer to iocb object.
+ * @vport: Pointer to driver virtual port object.
+ *
+ * This function acts as an iocb filter for functions which abort FCP iocbs.
+ *
+ * Return values
+ * -ENODEV, if a null iocb or vport ptr is encountered
+ * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
+ * driver already started the abort process, or is an abort iocb itself
+ * 0, passes criteria for aborting the FCP I/O iocb
+ **/
+static int
+lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
+ struct lpfc_vport *vport)
+{
+ IOCB_t *icmd = NULL;
+
+ /* No null ptr vports */
+ if (!iocbq || iocbq->vport != vport)
+ return -ENODEV;
+
+ /* iocb must be for FCP IO, already exists on the TX cmpl queue,
+ * can't be premarked as driver aborted, nor be an ABORT iocb itself
+ */
+ icmd = &iocbq->iocb;
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
+ (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
+ (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
* @iocbq: Pointer to driver iocb object.
* @vport: Pointer to driver virtual port object.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
* @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
*
- * This function acts as an iocb filter for functions which abort or count
- * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
+ * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
+ * host.
+ *
+ * It will return
* 0 if the filtering criteria is met for the given iocb and will return
* 1 if the filtering criteria is not met.
* If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
@@ -12512,22 +12567,8 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_io_buf *lpfc_cmd;
- IOCB_t *icmd = NULL;
int rc = 1;
- if (!iocbq || iocbq->vport != vport)
- return rc;
-
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
- iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
- return rc;
-
- icmd = &iocbq->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN)
- return rc;
-
lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
if (lpfc_cmd->pCmd == NULL)
@@ -12582,17 +12623,33 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
+ IOCB_t *icmd = NULL;
int sum, i;
+ unsigned long iflags;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
- ctx_cmd) == 0)
+ if (!iocbq || iocbq->vport != vport)
+ continue;
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
+ continue;
+
+ /* Include counting outstanding aborts */
+ icmd = &iocbq->iocb;
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN) {
+ sum++;
+ continue;
+ }
+
+ if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+ ctx_cmd) == 0)
sum++;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return sum;
}
@@ -12659,7 +12716,11 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*
* This function sends an abort command for every SCSI command
* associated with the given virtual port pending on the ring
- * filtered by lpfc_sli_validate_fcp_iocb function.
+ * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
+ * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
+ * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
+ * followed by lpfc_sli_validate_fcp_iocb.
+ *
* When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
* FCP iocbs associated with lun specified by tgt_id and lun_id
* parameters
@@ -12691,6 +12752,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
+ if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
+ continue;
+
if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
abort_cmd) != 0)
continue;
@@ -12723,7 +12787,11 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
*
* This function sends an abort command for every SCSI command
* associated with the given virtual port pending on the ring
- * filtered by lpfc_sli_validate_fcp_iocb function.
+ * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
+ * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
+ * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
+ * followed by lpfc_sli_validate_fcp_iocb.
+ *
* When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
* FCP iocbs associated with lun specified by tgt_id and lun_id
* parameters
@@ -12761,6 +12829,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
+ if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
+ continue;
+
if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
cmd) != 0)
continue;
@@ -21104,6 +21175,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
fail_msg,
piocbq->iotag, piocbq->sli4_xritag);
list_add_tail(&piocbq->list, &completions);
+ fail_msg = NULL;
}
spin_unlock_irqrestore(&pring->ring_lock, iflags);
}
@@ -21963,8 +22035,26 @@ lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[hwqid];
lpfc_ncmd = NULL;
+ if (!qp) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5556 NULL qp for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
multixri_pool = qp->p_multixri_pool;
+ if (!multixri_pool) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5557 NULL multixri for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
pvt_pool = &multixri_pool->pvt_pool;
+ if (!pvt_pool) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
+ return lpfc_ncmd;
+ }
multixri_pool->io_req_count++;
/* If pvt_pool is empty, move some XRIs from public to private pool */
@@ -22040,6 +22130,12 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[hwqid];
lpfc_cmd = NULL;
+ if (!qp) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
+ "5555 NULL qp for hwqid x%x\n", hwqid);
+ return lpfc_cmd;
+ }
if (phba->cfg_xri_rebalancing)
lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 99c5d1e4da5efb..5962cf508842f7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1116,6 +1116,8 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);
+void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_ncmd);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri,
struct lpfc_io_buf *lpfc_ncmd);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index a7aba78334257a..dec71775f67770 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.0.0.1"
+#define LPFC_DRIVER_VERSION "14.0.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index ec05c42e8ee6c3..b25e447aa3bd88 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -409,6 +409,7 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
char *str = buf;
int start = 0;
u32 ib_offset = pm8001_ha->ib_offset;
+ u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128;
#define IB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
memoryMap.region[ib_offset].virt_ptr + \
@@ -419,7 +420,7 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
start = start + 4;
}
pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
- if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ if (((pm8001_ha->evtlog_ib_offset) % queue_size) == 0)
pm8001_ha->evtlog_ib_offset = 0;
return str - buf;
@@ -445,6 +446,7 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
char *str = buf;
int start = 0;
u32 ob_offset = pm8001_ha->ob_offset;
+ u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128;
#define OB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
memoryMap.region[ob_offset].virt_ptr + \
@@ -455,7 +457,7 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
start = start + 4;
}
pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
- if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+ if (((pm8001_ha->evtlog_ob_offset) % queue_size) == 0)
pm8001_ha->evtlog_ob_offset = 0;
return str - buf;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 63690508313b76..c9ecddd0d719c9 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3358,6 +3358,8 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPC;
pm8001_dbg(pm8001_ha, MSG,
@@ -3434,6 +3436,8 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
unsigned long flags;
pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n",
port_id, phy_id);
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPC;
port->port_attached = 1;
@@ -4460,6 +4464,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
+ struct pm8001_port *port = dev->port->lldd_port;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
@@ -4488,7 +4493,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
payload.phyid_portid =
- cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) |
+ cpu_to_le32(((port->port_id) & 0x0F) |
((phy_id & 0x0F) << 4));
payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) |
((linkrate & 0x0F) * 0x1000000) |
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 47db7e0beae6f6..7082fecf7ce808 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -128,6 +128,7 @@ static struct sas_domain_function_template pm8001_transport_ops = {
.lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
.lldd_lu_reset = pm8001_lu_reset,
.lldd_query_task = pm8001_query_task,
+ .lldd_port_formed = pm8001_port_formed,
};
/**
@@ -1198,6 +1199,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out;
/* Memory region for ccb_info*/
+ pm8001_ha->ccb_count = ccb_count;
pm8001_ha->ccb_info =
kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL);
if (!pm8001_ha->ccb_info) {
@@ -1259,6 +1261,16 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
tasklet_kill(&pm8001_ha->tasklet[j]);
#endif
scsi_host_put(pm8001_ha->shost);
+
+ for (i = 0; i < pm8001_ha->ccb_count; i++) {
+ dma_free_coherent(&pm8001_ha->pdev->dev,
+ sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
+ pm8001_ha->ccb_info[i].buf_prd,
+ pm8001_ha->ccb_info[i].ccb_dma_handle);
+ }
+ kfree(pm8001_ha->ccb_info);
+ kfree(pm8001_ha->devices);
+
pm8001_free(pm8001_ha);
kfree(sha->sas_phy);
kfree(sha->sas_port);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 32e60f0c3b1483..83e73009db5cd3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1355,3 +1355,18 @@ int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
tmf_task.tmf = TMF_CLEAR_TASK_SET;
return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
}
+
+void pm8001_port_formed(struct asd_sas_phy *sas_phy)
+{
+ struct sas_ha_struct *sas_ha = sas_phy->ha;
+ struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
+ struct pm8001_phy *phy = sas_phy->lldd_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct pm8001_port *port = phy->port;
+
+ if (!sas_port) {
+ pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
+ return;
+ }
+ sas_port->lldd_port = port;
+}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 62d08b535a4b6c..7e999768bfd2d0 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -230,6 +230,7 @@ struct pm8001_port {
u8 port_attached;
u16 wide_port_phymap;
u8 port_state;
+ u8 port_id;
struct list_head list;
};
@@ -457,6 +458,7 @@ struct outbound_queue_table {
__le32 producer_index;
u32 consumer_idx;
spinlock_t oq_lock;
+ unsigned long lock_flags;
};
struct pm8001_hba_memspace {
void __iomem *memvirtaddr;
@@ -516,6 +518,7 @@ struct pm8001_hba_info {
u32 iomb_size; /* SPC and SPCV IOMB size */
struct pm8001_device *devices;
struct pm8001_ccb_info *ccb_info;
+ u32 ccb_count;
#ifdef PM8001_USE_MSIX
int number_of_intr;/*will be used in remove()*/
char intr_drvname[PM8001_MAX_MSIX_VEC]
@@ -651,6 +654,7 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
int pm8001_I_T_nexus_reset(struct domain_device *dev);
int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
int pm8001_query_task(struct sas_task *task);
+void pm8001_port_formed(struct asd_sas_phy *sas_phy);
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
struct sas_task *task_to_close,
@@ -738,9 +742,7 @@ pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
{
pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
smp_mb(); /*in order to force CPU ordering*/
- spin_unlock(&pm8001_ha->lock);
task->task_done(task);
- spin_lock(&pm8001_ha->lock);
}
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 6ffe17b849ae84..1ae2f5c6042cca 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2379,7 +2379,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
/*See the comments for mpi_ssp_completion */
static void
-mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
@@ -2616,7 +2617,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2632,7 +2637,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2656,7 +2665,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2727,7 +2740,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2747,7 +2764,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -2785,12 +2806,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
}
}
/*See the comments for mpi_ssp_completion */
-static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
struct sas_task *t;
struct task_status_struct *ts;
@@ -2890,7 +2916,11 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
return;
}
break;
@@ -3002,7 +3032,11 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
+ spin_unlock_irqrestore(&circularQ->oq_lock,
+ circularQ->lock_flags);
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ spin_lock_irqsave(&circularQ->oq_lock,
+ circularQ->lock_flags);
}
}
@@ -3299,6 +3333,8 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
port->wide_port_phymap |= (1U << phy_id);
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
@@ -3380,6 +3416,8 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
"port id %d, phy id %d link_rate %d portstate 0x%x\n",
port_id, phy_id, link_rate, portstate);
+ phy->port = port;
+ port->port_id = port_id;
port->port_state = portstate;
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
port->port_attached = 1;
@@ -3902,7 +3940,8 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
* @pm8001_ha: our hba card information
* @piomb: IO message buffer
*/
-static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
+ struct outbound_queue_table *circularQ, void *piomb)
{
__le32 pHeader = *(__le32 *)piomb;
u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
@@ -3944,11 +3983,11 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case OPC_OUB_SATA_COMP:
pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n");
- mpi_sata_completion(pm8001_ha, piomb);
+ mpi_sata_completion(pm8001_ha, circularQ, piomb);
break;
case OPC_OUB_SATA_EVENT:
pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n");
- mpi_sata_event(pm8001_ha, piomb);
+ mpi_sata_event(pm8001_ha, circularQ, piomb);
break;
case OPC_OUB_SSP_EVENT:
pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n");
@@ -4117,7 +4156,6 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
void *pMsg1 = NULL;
u8 bc;
u32 ret = MPI_IO_STATUS_FAIL;
- unsigned long flags;
u32 regval;
if (vec == (pm8001_ha->max_q_num - 1)) {
@@ -4134,7 +4172,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
}
}
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
- spin_lock_irqsave(&circularQ->oq_lock, flags);
+ spin_lock_irqsave(&circularQ->oq_lock, circularQ->lock_flags);
do {
/* spurious interrupt during setup if kexec-ing and
* driver doing a doorbell access w/ the pre-kexec oq
@@ -4145,7 +4183,8 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
if (MPI_IO_STATUS_SUCCESS == ret) {
/* process the outbound message */
- process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+ process_one_iomb(pm8001_ha, circularQ,
+ (void *)(pMsg1 - 4));
/* free the message from the outbound circular buffer */
pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
circularQ, bc);
@@ -4160,7 +4199,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
break;
}
} while (1);
- spin_unlock_irqrestore(&circularQ->oq_lock, flags);
+ spin_unlock_irqrestore(&circularQ->oq_lock, circularQ->lock_flags);
return ret;
}
@@ -4808,6 +4847,7 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
+ struct pm8001_port *port = dev->port->lldd_port;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
@@ -4840,7 +4880,7 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
payload.phyid_portid =
- cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+ cpu_to_le32(((port->port_id) & 0xFF) |
((phy_id & 0xFF) << 8));
payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d09776b77af2e2..cb5f2ecb652d35 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1868,6 +1868,18 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
return strlen(buf);
}
+static const struct {
+ u16 rate;
+ char *str;
+} port_speed_str[] = {
+ { PORT_SPEED_4GB, "4" },
+ { PORT_SPEED_8GB, "8" },
+ { PORT_SPEED_16GB, "16" },
+ { PORT_SPEED_32GB, "32" },
+ { PORT_SPEED_64GB, "64" },
+ { PORT_SPEED_10GB, "10" },
+};
+
static ssize_t
qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1875,7 +1887,8 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
ssize_t rval;
- char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
+ u16 i;
+ char *speed = "Unknown";
rval = qla2x00_get_data_rate(vha);
if (rval != QLA_SUCCESS) {
@@ -1884,7 +1897,14 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
+ for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) {
+ if (port_speed_str[i].rate != ha->link_data_rate)
+ continue;
+ speed = port_speed_str[i].str;
+ break;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", speed);
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4b5d28d89d6906..0c33fb0de21a40 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2877,6 +2877,9 @@ qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_j
case QL_VND_MANAGE_HOST_PORT:
return qla2x00_manage_host_port(bsg_job);
+ case QL_VND_MBX_PASSTHRU:
+ return qla2x00_mailbox_passthru(bsg_job);
+
default:
return -ENOSYS;
}
@@ -3013,3 +3016,48 @@ done:
sp->free(sp);
return 0;
}
+
+int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ int ret = -EINVAL;
+ int ptsize = sizeof(struct qla_mbx_passthru);
+ struct qla_mbx_passthru *req_data = NULL;
+ uint32_t req_data_len;
+
+ req_data_len = bsg_job->request_payload.payload_len;
+ if (req_data_len != ptsize) {
+ ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
+ return -EIO;
+ }
+ req_data = kzalloc(ptsize, GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0xf0a4,
+ "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, ptsize);
+ ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
+
+ /* Copy the req_data in request buffer */
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, req_data, ptsize);
+
+ bsg_reply->reply_payload_rcv_len = ptsize;
+ if (ret == QLA_SUCCESS)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ else
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
+
+ bsg_job->reply_len = sizeof(*bsg_job->reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ kfree(req_data);
+
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index dd793cf8bc1e87..0f8a4c7e52a233 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -36,6 +36,7 @@
#define QL_VND_GET_HOST_STATS 0x24
#define QL_VND_GET_TGT_STATS 0x25
#define QL_VND_MANAGE_HOST_PORT 0x26
+#define QL_VND_MBX_PASSTHRU 0x2B
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -187,6 +188,12 @@ struct qla_port_param {
uint16_t speed;
} __attribute__ ((packed));
+struct qla_mbx_passthru {
+ uint16_t reserved1[2];
+ uint16_t mbx_in[32];
+ uint16_t mbx_out[32];
+ uint32_t reserved2[16];
+} __packed;
/* FRU VPD */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index be2eb75ee1a376..8924eeb9367dae 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3750,6 +3750,7 @@ struct qla_qpair {
struct qla_fw_resources fwres ____cacheline_aligned;
u32 cmd_cnt;
u32 cmd_completion_cnt;
+ u32 prev_completion_cnt;
};
/* Place holder for FW buffer parameters */
@@ -4607,6 +4608,7 @@ struct qla_hw_data {
struct qla_chip_state_84xx *cs84xx;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
+ struct work_struct heartbeat_work;
struct qlfc_fw fw_buf;
/* FCP_CMND priority support */
@@ -4708,7 +4710,6 @@ struct qla_hw_data {
struct qla_hw_data_stat stat;
pci_error_state_t pci_error_state;
- u64 prev_cmd_cnt;
struct dma_pool *purex_dma_pool;
struct btree_head32 host_map;
@@ -4854,7 +4855,6 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 32
#define ISP_ABORT_TO_ROM 33
#define VPORT_DELETE 34
-#define HEARTBEAT_CHK 38
#define PROCESS_PUREX_IOCB 63
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1c3f055d41b8e8..8aadcdeca6cb96 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -662,9 +662,13 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
+extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job);
int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
struct rsp_que **rsp, u8 *buf, u32 buf_len);
+int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in,
+ uint16_t *mbx_out);
+
/*
* Global Function Prototypes in qla_dbg.c source file.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index ebc8fdb0b43d34..28b574e20ef323 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1537,7 +1537,8 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
}
if (IS_QLA2031(ha)) {
if ((ha->pdev->subsystem_vendor == 0x103C) &&
- (ha->pdev->subsystem_device == 0x8002)) {
+ ((ha->pdev->subsystem_device == 0x8002) ||
+ (ha->pdev->subsystem_device == 0x8086))) {
speeds = FDMI_PORT_SPEED_16GB;
} else {
speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5fc7697f0af4c1..908a72ebf7c2aa 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5335,15 +5335,14 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
"LOOP READY.\n");
ha->flags.fw_init_done = 1;
+ /*
+ * use link up to wake up app to get ready for
+ * authentication.
+ */
if (ha->flags.edif_enabled &&
- !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
- N2N_TOPO(vha->hw)) {
- /*
- * use port online to wake up app to get ready
- * for authentication
- */
- qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0);
- }
+ !(vha->e_dbell.db_flags & EDB_ACTIVE))
+ qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
+ ha->link_data_rate);
/*
* Process any ATIO queue entries that came in
@@ -7026,12 +7025,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->chip_reset++;
ha->base_qpair->chip_reset = ha->chip_reset;
ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
+ ha->base_qpair->prev_completion_cnt = 0;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
ha->queue_pair_map[i]->chip_reset =
ha->base_qpair->chip_reset;
ha->queue_pair_map[i]->cmd_cnt =
ha->queue_pair_map[i]->cmd_completion_cnt = 0;
+ ha->base_qpair->prev_completion_cnt = 0;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7811c4952035b3..73a353153d33bf 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3236,7 +3236,7 @@ qla24xx_abort_command(srb_t *sp)
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
+ struct req_que *req;
struct qla_qpair *qpair = sp->qpair;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
@@ -7011,3 +7011,36 @@ void qla_no_op_mb(struct scsi_qla_host *vha)
"Failed %s %x\n", __func__, rval);
}
}
+
+int qla_mailbox_passthru(scsi_qla_host_t *vha,
+ uint16_t *mbx_in, uint16_t *mbx_out)
+{
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = -EINVAL;
+
+ memset(&mc, 0, sizeof(mc));
+ /* Receiving all 32 register's contents */
+ memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
+
+ mcp->out_mb = 0xFFFFFFFF;
+ mcp->in_mb = 0xFFFFFFFF;
+
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ mcp->bufp = NULL;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n",
+ __func__);
+ /* passing all 32 register's contents */
+ memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 1c5da2dbd6f978..0ae1e081cb0357 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -228,6 +228,8 @@ static void qla_nvme_abort_work(struct work_struct *work)
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
int rval, abts_done_called = 1;
+ bool io_wait_for_abort_done;
+ uint32_t handle;
ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
"%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
@@ -244,12 +246,20 @@ static void qla_nvme_abort_work(struct work_struct *work)
goto out;
}
+ /*
+ * sp may not be valid after abort_command if return code is either
+ * SUCCESS or ERR_FROM_FW codes, so cache the value here.
+ */
+ io_wait_for_abort_done = ql2xabts_wait_nvme &&
+ QLA_ABTS_WAIT_ENABLED(sp);
+ handle = sp->handle;
+
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
__func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
- sp, sp->handle, fcport, rval);
+ sp, handle, fcport, rval);
/*
* If async tmf is enabled, the abort callback is called only on
@@ -264,7 +274,7 @@ static void qla_nvme_abort_work(struct work_struct *work)
* are waited until ABTS complete. This kref is decreased
* at qla24xx_abort_sp_done function.
*/
- if (abts_done_called && ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
+ if (abts_done_called && io_wait_for_abort_done)
return;
out:
/* kref_get was done before work was schedule. */
@@ -389,6 +399,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
uint16_t avail_dsds;
struct dsd64 *cur_dsd;
struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
@@ -400,6 +411,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* Setup qpair pointers */
req = qpair->req;
+ rsp = qpair->rsp;
tot_dsds = fd->sg_cnt;
/* Acquire qpair specific lock */
@@ -561,6 +573,10 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* Set chip new ring index. */
wrt_reg_dword(req->req_q_in, req->ring_index);
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d2e40aaba734de..03ff2596715b9a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1258,6 +1258,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
uint32_t ratov_j;
struct qla_qpair *qpair;
unsigned long flags;
+ int fast_fail_status = SUCCESS;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
@@ -1266,9 +1267,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return FAILED;
}
+ /* Save any FAST_IO_FAIL value to return later if abort succeeds */
ret = fc_block_scsi_eh(cmd);
if (ret != 0)
- return ret;
+ fast_fail_status = ret;
sp = scsi_cmd_priv(cmd);
qpair = sp->qpair;
@@ -1276,7 +1278,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
vha->cmd_timeout_cnt++;
if ((sp->fcport && sp->fcport->deleted) || !qpair)
- return SUCCESS;
+ return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
sp->comp = &comp;
@@ -1311,7 +1313,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
__func__, ha->r_a_tov/10);
ret = FAILED;
} else {
- ret = SUCCESS;
+ ret = fast_fail_status;
}
break;
default:
@@ -2794,6 +2796,16 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
return atomic_read(&vha->loop_state) == LOOP_READY;
}
+static void qla_heartbeat_work_fn(struct work_struct *work)
+{
+ struct qla_hw_data *ha = container_of(work,
+ struct qla_hw_data, heartbeat_work);
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!ha->flags.mbox_busy && base_vha->flags.init_done)
+ qla_no_op_mb(base_vha);
+}
+
static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
@@ -3232,6 +3244,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->transportt, sht->vendor_id);
INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+ INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn);
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
@@ -3364,6 +3377,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ /* Check if FW supports MQ or not for ISP25xx */
+ if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6))
+ ha->mqenable = 0;
+
if (ha->mqenable) {
bool startit = false;
@@ -7114,17 +7131,6 @@ intr_on_check:
qla2x00_lip_reset(base_vha);
}
- if (test_bit(HEARTBEAT_CHK, &base_vha->dpc_flags)) {
- /*
- * if there is a mb in progress then that's
- * enough of a check to see if fw is still ticking.
- */
- if (!ha->flags.mbox_busy && base_vha->flags.init_done)
- qla_no_op_mb(base_vha);
-
- clear_bit(HEARTBEAT_CHK, &base_vha->dpc_flags);
- }
-
ha->dpc_active = 0;
end_loop:
set_current_state(TASK_INTERRUPTIBLE);
@@ -7183,57 +7189,51 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
static bool qla_do_heartbeat(struct scsi_qla_host *vha)
{
- u64 cmd_cnt, prev_cmd_cnt;
- bool do_hb = false;
struct qla_hw_data *ha = vha->hw;
- int i;
+ u32 cmpl_cnt;
+ u16 i;
+ bool do_heartbeat = false;
- /* if cmds are still pending down in fw, then do hb */
- if (ha->base_qpair->cmd_cnt != ha->base_qpair->cmd_completion_cnt) {
- do_hb = true;
+ /*
+ * Allow do_heartbeat only if we don’t have any active interrupts,
+ * but there are still IOs outstanding with firmware.
+ */
+ cmpl_cnt = ha->base_qpair->cmd_completion_cnt;
+ if (cmpl_cnt == ha->base_qpair->prev_completion_cnt &&
+ cmpl_cnt != ha->base_qpair->cmd_cnt) {
+ do_heartbeat = true;
goto skip;
}
+ ha->base_qpair->prev_completion_cnt = cmpl_cnt;
for (i = 0; i < ha->max_qpairs; i++) {
- if (ha->queue_pair_map[i] &&
- ha->queue_pair_map[i]->cmd_cnt !=
- ha->queue_pair_map[i]->cmd_completion_cnt) {
- do_hb = true;
- break;
+ if (ha->queue_pair_map[i]) {
+ cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt;
+ if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt &&
+ cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) {
+ do_heartbeat = true;
+ break;
+ }
+ ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt;
}
}
skip:
- prev_cmd_cnt = ha->prev_cmd_cnt;
- cmd_cnt = ha->base_qpair->cmd_cnt;
- for (i = 0; i < ha->max_qpairs; i++) {
- if (ha->queue_pair_map[i])
- cmd_cnt += ha->queue_pair_map[i]->cmd_cnt;
- }
- ha->prev_cmd_cnt = cmd_cnt;
-
- if (!do_hb && ((cmd_cnt - prev_cmd_cnt) > 50))
- /*
- * IOs are completing before periodic hb check.
- * IOs seems to be running, do hb for sanity check.
- */
- do_hb = true;
-
- return do_hb;
+ return do_heartbeat;
}
static void qla_heart_beat(struct scsi_qla_host *vha)
{
+ struct qla_hw_data *ha = vha->hw;
+
if (vha->vp_idx)
return;
if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
return;
- if (qla_do_heartbeat(vha)) {
- set_bit(HEARTBEAT_CHK, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- }
+ if (qla_do_heartbeat(vha))
+ queue_work(ha->wq, &ha->heartbeat_work);
}
/**************************************************************************
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 055040cbef9b83..4b117165bf8b27 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.06.200-k"
+#define QLA2XXX_VERSION "10.02.07.100-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 6
-#define QLA_DRIVER_BETA_VER 200
+#define QLA_DRIVER_PATCH_VER 7
+#define QLA_DRIVER_BETA_VER 100
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 432df76e6318a6..565e8aa6319d42 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -199,3 +199,12 @@ config SCSI_UFS_FAULT_INJECTION
help
Enable fault injection support in the UFS driver. This makes it easier
to test the UFS error handler and abort handler.
+
+config SCSI_UFS_HWMON
+ bool "UFS Temperature Notification"
+ depends on SCSI_UFSHCD && HWMON
+ help
+ This provides support for UFS hardware monitoring. If enabled,
+ a hardware monitoring device will be created for the UFS device.
+
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index c407da9b517132..966048875b5038 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -10,6 +10,7 @@ ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
diff --git a/drivers/scsi/ufs/ufs-hwmon.c b/drivers/scsi/ufs/ufs-hwmon.c
new file mode 100644
index 00000000000000..74855491dc8f6c
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-hwmon.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UFS hardware monitoring support
+ * Copyright (c) 2021, Western Digital Corporation
+ */
+
+#include <linux/hwmon.h>
+#include <linux/units.h>
+
+#include "ufshcd.h"
+
+struct ufs_hwmon_data {
+ struct ufs_hba *hba;
+ u8 mask;
+};
+
+static int ufs_read_temp_enable(struct ufs_hba *hba, u8 mask, long *val)
+{
+ u32 ee_mask;
+ int err;
+
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
+ &ee_mask);
+ if (err)
+ return err;
+
+ *val = (mask & ee_mask & MASK_EE_TOO_HIGH_TEMP) || (mask & ee_mask & MASK_EE_TOO_LOW_TEMP);
+
+ return 0;
+}
+
+static int ufs_get_temp(struct ufs_hba *hba, enum attr_idn idn, long *val)
+{
+ u32 value;
+ int err;
+
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, idn, 0, 0, &value);
+ if (err)
+ return err;
+
+ if (value == 0)
+ return -ENODATA;
+
+ *val = ((long)value - 80) * MILLIDEGREE_PER_DEGREE;
+
+ return 0;
+}
+
+static int ufs_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
+{
+ struct ufs_hwmon_data *data = dev_get_drvdata(dev);
+ struct ufs_hba *hba = data->hba;
+ int err;
+
+ down(&hba->host_sem);
+
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ err = ufs_read_temp_enable(hba, data->mask, val);
+
+ break;
+ case hwmon_temp_crit:
+ err = ufs_get_temp(hba, QUERY_ATTR_IDN_HIGH_TEMP_BOUND, val);
+
+ break;
+ case hwmon_temp_lcrit:
+ err = ufs_get_temp(hba, QUERY_ATTR_IDN_LOW_TEMP_BOUND, val);
+
+ break;
+ case hwmon_temp_input:
+ err = ufs_get_temp(hba, QUERY_ATTR_IDN_CASE_ROUGH_TEMP, val);
+
+ break;
+ default:
+ err = -EOPNOTSUPP;
+
+ break;
+ }
+
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+
+ return err;
+}
+
+static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long val)
+{
+ struct ufs_hwmon_data *data = dev_get_drvdata(dev);
+ struct ufs_hba *hba = data->hba;
+ int err;
+
+ if (attr != hwmon_temp_enable)
+ return -EINVAL;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+
+ if (val == 1)
+ err = ufshcd_update_ee_usr_mask(hba, MASK_EE_URGENT_TEMP, 0);
+ else
+ err = ufshcd_update_ee_usr_mask(hba, 0, MASK_EE_URGENT_TEMP);
+
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+
+ return err;
+}
+
+static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ return 0644;
+ case hwmon_temp_crit:
+ case hwmon_temp_lcrit:
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *ufs_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT),
+ NULL
+};
+
+static const struct hwmon_ops ufs_hwmon_ops = {
+ .is_visible = ufs_hwmon_is_visible,
+ .read = ufs_hwmon_read,
+ .write = ufs_hwmon_write,
+};
+
+static const struct hwmon_chip_info ufs_hwmon_hba_info = {
+ .ops = &ufs_hwmon_ops,
+ .info = ufs_hwmon_info,
+};
+
+void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask)
+{
+ struct device *dev = hba->dev;
+ struct ufs_hwmon_data *data;
+ struct device *hwmon;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ data->hba = hba;
+ data->mask = mask;
+
+ hwmon = hwmon_device_register_with_info(dev, "ufs", data, &ufs_hwmon_hba_info, NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+ kfree(data);
+ return;
+ }
+
+ hba->hwmon_device = hwmon;
+}
+
+void ufs_hwmon_remove(struct ufs_hba *hba)
+{
+ struct ufs_hwmon_data *data;
+
+ if (!hba->hwmon_device)
+ return;
+
+ data = dev_get_drvdata(hba->hwmon_device);
+ hwmon_device_unregister(hba->hwmon_device);
+ hba->hwmon_device = NULL;
+ kfree(data);
+}
+
+void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask)
+{
+ if (!hba->hwmon_device)
+ return;
+
+ if (ee_mask & MASK_EE_TOO_HIGH_TEMP)
+ hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_max_alarm, 0);
+
+ if (ee_mask & MASK_EE_TOO_LOW_TEMP)
+ hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_min_alarm, 0);
+}
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 80b3545dd17d64..d2d7e76c5ec85d 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -296,6 +296,21 @@ static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
host->ref_clk_ungating_wait_us = ungating_us;
}
+static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
+ ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
+ ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
+ ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
+ ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
+ ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
+ } else {
+ ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ }
+}
+
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
unsigned long max_wait_ms)
{
@@ -305,7 +320,7 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
- ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
val = val >> 28;
@@ -689,6 +704,8 @@ static int ufs_mtk_init(struct ufs_hba *hba)
ufs_mtk_mphy_power_on(hba, true);
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
+ host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+
goto out;
out_variant_clear:
@@ -1001,7 +1018,7 @@ static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
"MPHY Ctrl ");
/* Direct debugging information to REG_MTK_PROBE */
- ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
+ ufs_mtk_dbg_sel(hba);
ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
}
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 3f0d3bb769e89b..524c8e2c1e6ffd 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -15,9 +15,14 @@
#define REG_UFS_REFCLK_CTRL 0x144
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
+#define REG_UFS_MTK_IP_VER 0x2240
#define REG_UFS_REJECT_MON 0x22AC
#define REG_UFS_DEBUG_SEL 0x22C0
#define REG_UFS_PROBE 0x22C8
+#define REG_UFS_DEBUG_SEL_B0 0x22D0
+#define REG_UFS_DEBUG_SEL_B1 0x22D4
+#define REG_UFS_DEBUG_SEL_B2 0x22D8
+#define REG_UFS_DEBUG_SEL_B3 0x22DC
/*
* Ref-clk control
@@ -113,6 +118,7 @@ struct ufs_mtk_host {
bool ref_clk_enabled;
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
+ u32 ip_ver;
};
#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 9d9770f1db4fb8..92d4c61fc9d0a4 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -888,7 +888,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -916,7 +915,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
break;
}
- return err;
+ return 0;
}
static int
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 8c6b38b1b14221..0bfdca3e648eb0 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -152,6 +152,9 @@ enum attr_idn {
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
+ QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
+ QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
@@ -338,6 +341,9 @@ enum {
/* Possible values for dExtendedUFSFeaturesSupport */
enum {
+ UFS_DEV_LOW_TEMP_NOTIF = BIT(4),
+ UFS_DEV_HIGH_TEMP_NOTIF = BIT(5),
+ UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
};
@@ -370,6 +376,7 @@ enum {
MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
};
+#define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP)
/* Background operation status */
enum bkops_status {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 188de6f9105000..5dbf2d7458e806 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -5611,6 +5611,24 @@ out:
__func__, err);
}
+static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
+{
+ u32 value;
+
+ if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
+ return;
+
+ dev_info(hba->dev, "exception Tcase %d\n", value - 80);
+
+ ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
+
+ /*
+ * A placeholder for the platform vendors to add whatever additional
+ * steps required
+ */
+}
+
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -5790,6 +5808,9 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
+ if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
+ ufshcd_temp_exception_event_handler(hba, status);
+
ufs_debugfs_exception_event(hba, status);
out:
ufshcd_scsi_unblock_requests(hba);
@@ -7456,6 +7477,29 @@ wb_disabled:
hba->caps &= ~UFSHCD_CAP_WB_EN;
}
+static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u32 ext_ufs_feature;
+ u8 mask = 0;
+
+ if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
+ return;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+
+ if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
+ mask |= MASK_EE_TOO_LOW_TEMP;
+
+ if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
+ mask |= MASK_EE_TOO_HIGH_TEMP;
+
+ if (mask) {
+ ufshcd_enable_ee(hba, mask);
+ ufs_hwmon_probe(hba, mask);
+ }
+}
+
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
{
struct ufs_dev_fix *f;
@@ -7551,6 +7595,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_wb_probe(hba, desc_buf);
+ ufshcd_temp_notif_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -9397,6 +9443,7 @@ void ufshcd_remove(struct ufs_hba *hba)
{
if (hba->sdev_ufs_device)
ufshcd_rpm_get_sync(hba);
+ ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index f0da5d3db1fa70..05ba64b9ddab34 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -653,6 +653,12 @@ enum ufshcd_caps {
* in order to exit DeepSleep state.
*/
UFSHCD_CAP_DEEPSLEEP = 1 << 10,
+
+ /*
+ * This capability allows the host controller driver to use temperature
+ * notification if it is supported by the UFS device.
+ */
+ UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
};
struct ufs_hba_variant_params {
@@ -791,6 +797,10 @@ struct ufs_hba {
struct scsi_device *sdev_ufs_device;
struct scsi_device *sdev_rpmb;
+#ifdef CONFIG_SCSI_UFS_HWMON
+ struct device *hwmon_device;
+#endif
+
enum ufs_dev_pwr_mode curr_dev_pwr_mode;
enum uic_link_state uic_link_state;
/* Desired UFS power management level during runtime PM */
@@ -1054,6 +1064,16 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
return 0;
}
+#ifdef CONFIG_SCSI_UFS_HWMON
+void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
+void ufs_hwmon_remove(struct ufs_hba *hba);
+void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask);
+#else
+static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {}
+static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
+static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
+#endif
+
#ifdef CONFIG_PM
extern int ufshcd_runtime_suspend(struct device *dev);
extern int ufshcd_runtime_resume(struct device *dev);
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 589af5f6b940aa..386e24e023a671 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -2648,11 +2648,11 @@ static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
QUERY_DESC_IDN_UNIT, lun, 0,
desc_buf, &size);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret) {
dev_err(hba->dev,
@@ -2875,10 +2875,10 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
if (version == HPB_SUPPORT_LEGACY_VERSION)
hpb_dev_info->is_legacy = true;
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret)
dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index d4fe7cb2bd0032..6bb20aa9c5bc51 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -295,8 +295,7 @@ out:
return -EINVAL;
}
-static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
- unsigned char *p)
+static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p)
{
unsigned char *desc = p;
int dc = (desc[1] & 0x02);
@@ -332,9 +331,9 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
return 0;
}
-static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
- struct xcopy_op *xop, unsigned char *p,
- unsigned int sdll, sense_reason_t *sense_ret)
+static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop,
+ unsigned char *p, unsigned int sdll,
+ sense_reason_t *sense_ret)
{
unsigned char *desc = p;
unsigned int start = 0;
@@ -362,7 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
*/
switch (desc[0]) {
case 0x02:
- rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
+ rc = target_xcopy_parse_segdesc_02(xop, desc);
if (rc < 0)
goto out;
@@ -840,8 +839,7 @@ static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
*/
seg_desc = &p[16] + tdll;
- rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
- sdll, &ret);
+ rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret);
if (rc <= 0)
goto out;
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index eaf04c9a1dfcb5..a2315aac93c7e1 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -10,7 +10,6 @@
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
#include <scsi/scsi_request.h>
struct Scsi_Host;