mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
vdpa: support virtio_map
Virtio core switches from DMA device to virtio_map, let's do that as well for vDPA. Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20250821064641.5025-8-jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
This commit is contained in:
parent
bee8c7c24b
commit
58aca3dbc7
@ -496,7 +496,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pci_set_master(pdev);
|
||||
pci_set_drvdata(pdev, eni_vdpa);
|
||||
|
||||
eni_vdpa->vdpa.dma_dev = &pdev->dev;
|
||||
eni_vdpa->vdpa.vmap.dma_dev = &pdev->dev;
|
||||
eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
|
||||
|
||||
eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
|
||||
|
||||
@ -713,7 +713,7 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
|
||||
|
||||
ifcvf_mgmt_dev->adapter = adapter;
|
||||
adapter->pdev = pdev;
|
||||
adapter->vdpa.dma_dev = &pdev->dev;
|
||||
adapter->vdpa.vmap.dma_dev = &pdev->dev;
|
||||
adapter->vdpa.mdev = mdev;
|
||||
adapter->vf = vf;
|
||||
vdpa_dev = &adapter->vdpa;
|
||||
|
||||
@ -378,7 +378,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
|
||||
u64 pa, offset;
|
||||
u64 paend;
|
||||
struct scatterlist *sg;
|
||||
struct device *dma = mvdev->vdev.dma_dev;
|
||||
struct device *dma = mvdev->vdev.vmap.dma_dev;
|
||||
|
||||
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
|
||||
map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
|
||||
@ -432,7 +432,7 @@ err_map:
|
||||
|
||||
static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
|
||||
{
|
||||
struct device *dma = mvdev->vdev.dma_dev;
|
||||
struct device *dma = mvdev->vdev.vmap.dma_dev;
|
||||
|
||||
destroy_direct_mr(mvdev, mr);
|
||||
dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
|
||||
|
||||
@ -3395,14 +3395,17 @@ static int mlx5_vdpa_reset_map(struct vdpa_device *vdev, unsigned int asid)
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx)
|
||||
static union virtio_map mlx5_get_vq_map(struct vdpa_device *vdev, u16 idx)
|
||||
{
|
||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||
union virtio_map map;
|
||||
|
||||
if (is_ctrl_vq_idx(mvdev, idx))
|
||||
return &vdev->dev;
|
||||
map.dma_dev = &vdev->dev;
|
||||
else
|
||||
map.dma_dev = mvdev->vdev.vmap.dma_dev;
|
||||
|
||||
return mvdev->vdev.dma_dev;
|
||||
return map;
|
||||
}
|
||||
|
||||
static void free_irqs(struct mlx5_vdpa_net *ndev)
|
||||
@ -3686,7 +3689,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
|
||||
.set_map = mlx5_vdpa_set_map,
|
||||
.reset_map = mlx5_vdpa_reset_map,
|
||||
.set_group_asid = mlx5_set_group_asid,
|
||||
.get_vq_dma_dev = mlx5_get_vq_dma_dev,
|
||||
.get_vq_map = mlx5_get_vq_map,
|
||||
.free = mlx5_vdpa_free,
|
||||
.suspend = mlx5_vdpa_suspend,
|
||||
.resume = mlx5_vdpa_resume, /* Op disabled if not supported. */
|
||||
@ -3965,7 +3968,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
||||
}
|
||||
|
||||
ndev->mvdev.mlx_features = device_features;
|
||||
mvdev->vdev.dma_dev = &mdev->pdev->dev;
|
||||
mvdev->vdev.vmap.dma_dev = &mdev->pdev->dev;
|
||||
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
|
||||
if (err)
|
||||
goto err_alloc;
|
||||
|
||||
@ -516,7 +516,7 @@ static int octep_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
|
||||
}
|
||||
|
||||
oct_vdpa->pdev = pdev;
|
||||
oct_vdpa->vdpa.dma_dev = &pdev->dev;
|
||||
oct_vdpa->vdpa.vmap.dma_dev = &pdev->dev;
|
||||
oct_vdpa->vdpa.mdev = mdev;
|
||||
oct_vdpa->oct_hw = oct_hw;
|
||||
vdpa_dev = &oct_vdpa->vdpa;
|
||||
|
||||
@ -643,7 +643,7 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
|
||||
|
||||
pdev = vdpa_aux->padev->vf_pdev;
|
||||
dma_dev = &pdev->dev;
|
||||
pdsv->vdpa_dev.dma_dev = dma_dev;
|
||||
pdsv->vdpa_dev.vmap.dma_dev = dma_dev;
|
||||
|
||||
status = pds_vdpa_get_status(&pdsv->vdpa_dev);
|
||||
if (status == 0xff) {
|
||||
|
||||
@ -1052,8 +1052,8 @@ static int snet_vdpa_probe_vf(struct pci_dev *pdev)
|
||||
*/
|
||||
snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet);
|
||||
|
||||
/*set DMA device*/
|
||||
snet->vdpa.dma_dev = &pdev->dev;
|
||||
/* set map metadata */
|
||||
snet->vdpa.vmap.dma_dev = &pdev->dev;
|
||||
|
||||
/* Register VDPA device */
|
||||
ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num);
|
||||
|
||||
@ -151,7 +151,7 @@ static void vdpa_release_dev(struct device *d)
|
||||
* Driver should use vdpa_alloc_device() wrapper macro instead of
|
||||
* using this directly.
|
||||
*
|
||||
* Return: Returns an error when parent/config/dma_dev is not set or fail to get
|
||||
* Return: Returns an error when parent/config/map is not set or fail to get
|
||||
* ida.
|
||||
*/
|
||||
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
||||
|
||||
@ -272,7 +272,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
|
||||
vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
|
||||
&vdpasim->iommu_lock);
|
||||
|
||||
vdpasim->vdpa.dma_dev = dev;
|
||||
vdpasim->vdpa.vmap.dma_dev = dev;
|
||||
|
||||
return vdpasim;
|
||||
|
||||
|
||||
@ -2022,7 +2022,7 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
|
||||
return ret;
|
||||
}
|
||||
set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
|
||||
vdev->vdpa.dma_dev = &vdev->vdpa.dev;
|
||||
vdev->vdpa.vmap.dma_dev = &vdev->vdpa.dev;
|
||||
vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -520,7 +520,7 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
|
||||
|
||||
vp_vdpa_mgtdev->vp_vdpa = vp_vdpa;
|
||||
|
||||
vp_vdpa->vdpa.dma_dev = &pdev->dev;
|
||||
vp_vdpa->vdpa.vmap.dma_dev = &pdev->dev;
|
||||
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
|
||||
vp_vdpa->mdev = mdev;
|
||||
|
||||
|
||||
@ -1318,7 +1318,8 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
|
||||
{
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
|
||||
union virtio_map map = vdpa_get_map(vdpa);
|
||||
struct device *dma_dev = map.dma_dev;
|
||||
int ret;
|
||||
|
||||
/* Device want to do DMA by itself */
|
||||
@ -1353,7 +1354,8 @@ err_attach:
|
||||
static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
|
||||
{
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
|
||||
union virtio_map map = vdpa_get_map(vdpa);
|
||||
struct device *dma_dev = map.dma_dev;
|
||||
|
||||
if (v->domain) {
|
||||
iommu_detach_device(v->domain, dma_dev);
|
||||
|
||||
@ -133,7 +133,6 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
|
||||
const char *name, bool ctx)
|
||||
{
|
||||
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
|
||||
struct device *dma_dev;
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify;
|
||||
struct vdpa_callback cb;
|
||||
@ -182,11 +181,11 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
|
||||
/* Create the vring */
|
||||
align = ops->get_vq_align(vdpa);
|
||||
|
||||
if (ops->get_vq_dma_dev)
|
||||
dma_dev = ops->get_vq_dma_dev(vdpa, index);
|
||||
if (ops->get_vq_map)
|
||||
map = ops->get_vq_map(vdpa, index);
|
||||
else
|
||||
dma_dev = vdpa_get_dma_dev(vdpa);
|
||||
map.dma_dev = dma_dev;
|
||||
map = vdpa_get_map(vdpa);
|
||||
|
||||
vq = vring_create_virtqueue_map(index, max_num, align, vdev,
|
||||
true, may_reduce_num, ctx,
|
||||
notify, callback, name, map);
|
||||
@ -467,7 +466,7 @@ static int virtio_vdpa_probe(struct vdpa_device *vdpa)
|
||||
if (!vd_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
|
||||
vd_dev->vdev.dev.parent = vdpa_get_map(vdpa).dma_dev;
|
||||
vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
|
||||
vd_dev->vdev.config = &virtio_vdpa_config_ops;
|
||||
vd_dev->vdpa = vdpa;
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/vhost_iotlb.h>
|
||||
#include <linux/virtio_net.h>
|
||||
#include <linux/virtio_blk.h>
|
||||
@ -70,7 +71,7 @@ struct vdpa_mgmt_dev;
|
||||
/**
|
||||
* struct vdpa_device - representation of a vDPA device
|
||||
* @dev: underlying device
|
||||
* @dma_dev: the actual device that is performing DMA
|
||||
* @vmap: the metadata passed to upper layer to be used for mapping
|
||||
* @driver_override: driver name to force a match; do not set directly,
|
||||
* because core frees it; use driver_set_override() to
|
||||
* set or clear it.
|
||||
@ -87,7 +88,7 @@ struct vdpa_mgmt_dev;
|
||||
*/
|
||||
struct vdpa_device {
|
||||
struct device dev;
|
||||
struct device *dma_dev;
|
||||
union virtio_map vmap;
|
||||
const char *driver_override;
|
||||
const struct vdpa_config_ops *config;
|
||||
struct rw_semaphore cf_lock; /* Protects get/set config */
|
||||
@ -352,11 +353,11 @@ struct vdpa_map_file {
|
||||
* @vdev: vdpa device
|
||||
* @asid: address space identifier
|
||||
* Returns integer: success (0) or error (< 0)
|
||||
* @get_vq_dma_dev: Get the dma device for a specific
|
||||
* @get_vq_map: Get the map metadata for a specific
|
||||
* virtqueue (optional)
|
||||
* @vdev: vdpa device
|
||||
* @idx: virtqueue index
|
||||
* Returns pointer to structure device or error (NULL)
|
||||
* Returns map token union error (NULL)
|
||||
* @bind_mm: Bind the device to a specific address space
|
||||
* so the vDPA framework can use VA when this
|
||||
* callback is implemented. (optional)
|
||||
@ -436,7 +437,7 @@ struct vdpa_config_ops {
|
||||
int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
|
||||
int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
|
||||
unsigned int asid);
|
||||
struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
|
||||
union virtio_map (*get_vq_map)(struct vdpa_device *vdev, u16 idx);
|
||||
int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm);
|
||||
void (*unbind_mm)(struct vdpa_device *vdev);
|
||||
|
||||
@ -520,9 +521,9 @@ static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
|
||||
dev_set_drvdata(&vdev->dev, data);
|
||||
}
|
||||
|
||||
static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
|
||||
static inline union virtio_map vdpa_get_map(struct vdpa_device *vdev)
|
||||
{
|
||||
return vdev->dma_dev;
|
||||
return vdev->vmap;
|
||||
}
|
||||
|
||||
static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user