aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-04-09 10:09:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-09 10:09:51 -0700
commit189fefc7a4f0401d0f799de96b772319a6541fc1 (patch)
treee56075286127609d34a3103eae695c43e6d4b0d9
parent3766fcf5d318046e0ae58659e03ead35d40cb9dd (diff)
parentbc04d93ea30a0a8eb2a2648b848cef35d1f6f798 (diff)
downloadmisc-189fefc7a4f0401d0f799de96b772319a6541fc1.tar.gz
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull vdpa/mlx5 fixes from Michael Tsirkin: "Last minute fixes. These all look like something we are better off having than not ..." * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: vdpa/mlx5: Fix suspend/resume index restoration vdpa/mlx5: Fix wrong use of bit numbers vdpa/mlx5: Retrieve BAR address suitable any function vdpa/mlx5: Use the correct dma device when registering memory vdpa/mlx5: should exclude header length and fcs from mtu
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h4
-rw-r--r--drivers/vdpa/mlx5/core/mr.c9
-rw-r--r--drivers/vdpa/mlx5/core/resources.c3
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c40
4 files changed, 37 insertions, 19 deletions
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 08f742fd24099e..b6cc53ba980ccc 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -4,9 +4,13 @@
#ifndef __MLX5_VDPA_H__
#define __MLX5_VDPA_H__
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/vdpa.h>
#include <linux/mlx5/driver.h>
+#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
struct mlx5_vdpa_direct_mr {
u64 start;
u64 end;
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index d300f799efcd1f..3908ff28eec00e 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -219,6 +219,11 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
}
+static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
+{
+ return &mvdev->mdev->pdev->dev;
+}
+
static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
struct vhost_iotlb *iotlb)
{
@@ -234,7 +239,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
u64 pa;
u64 paend;
struct scatterlist *sg;
- struct device *dma = mvdev->mdev->device;
+ struct device *dma = get_dma_device(mvdev);
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -291,7 +296,7 @@ err_map:
static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
- struct device *dma = mvdev->mdev->device;
+ struct device *dma = get_dma_device(mvdev);
destroy_direct_mr(mvdev, mr);
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 96e6421c5d1cf8..6521cbd0f5c278 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -246,7 +246,8 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
if (err)
goto err_key;
- kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+ kick_addr = mdev->bar_addr + offset;
+
res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
if (!res->kick_addr) {
err = -ENOMEM;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 71397fdafa6a4c..4d2809c7d4e32f 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
- !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+ !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
@@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
return;
}
mvq->avail_idx = attr.available_index;
+ mvq->used_idx = attr.used_index;
}
static void suspend_vqs(struct mlx5_vdpa_net *ndev)
@@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
return -EINVAL;
}
+ mvq->used_idx = state->avail_index;
mvq->avail_idx = state->avail_index;
return 0;
}
@@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
* that cares about emulating the index after vq is stopped.
*/
if (!mvq->initialized) {
- state->avail_index = mvq->avail_idx;
+ /* Firmware returns a wrong value for the available index.
+ * Since both values should be identical, we take the value of
+ * used_idx which is reported correctly.
+ */
+ state->avail_index = mvq->used_idx;
return 0;
}
@@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
return err;
}
- state->avail_index = attr.available_index;
+ state->avail_index = attr.used_index;
return 0;
}
@@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
}
}
-static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
-{
- int i;
-
- for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
- ndev->vqs[i].avail_idx = 0;
- ndev->vqs[i].used_idx = 0;
- }
-}
-
/* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{
return virtio_legacy_is_little_endian() ||
- (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+ (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
}
static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
@@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev);
- clear_virtqueues(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0;
@@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.free = mlx5_vdpa_free,
};
+static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+ u16 hw_mtu;
+ int err;
+
+ err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+ if (err)
+ return err;
+
+ *mtu = hw_mtu - MLX5V_ETH_HARD_MTU;
+ return 0;
+}
+
static int alloc_resources(struct mlx5_vdpa_net *ndev)
{
struct mlx5_vdpa_net_resources *res = &ndev->res;
@@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
init_mvqs(ndev);
mutex_init(&ndev->reslock);
config = &ndev->config;
- err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+ err = query_mtu(mdev, &ndev->mtu);
if (err)
goto err_mtu;