return ret;
eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
- dev, &eni_vdpa_ops, NULL, false);
+ dev, &eni_vdpa_ops, 1, NULL, false);
if (IS_ERR(eni_vdpa)) {
ENI_ERR(pdev, "failed to allocate vDPA structure\n");
return PTR_ERR(eni_vdpa);
return vf->config_size;
}
+static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ return 0;
+}
+
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
unsigned int offset,
void *buf, unsigned int len)
.get_device_id = ifcvf_vdpa_get_device_id,
.get_vendor_id = ifcvf_vdpa_get_vendor_id,
.get_vq_align = ifcvf_vdpa_get_vq_align,
+ .get_vq_group = ifcvf_vdpa_get_vq_group,
.get_config_size = ifcvf_vdpa_get_config_size,
.get_config = ifcvf_vdpa_get_config,
.set_config = ifcvf_vdpa_set_config,
pdev = ifcvf_mgmt_dev->pdev;
dev = &pdev->dev;
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
- dev, &ifc_vdpa_ops, name, false);
+ dev, &ifc_vdpa_ops, 1, name, false);
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return PTR_ERR(adapter);
return PAGE_SIZE;
}
+static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ return 0;
+}
+
enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
MLX5_VIRTIO_NET_F_CSUM = 1 << 10,
MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11,
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
+ .get_vq_group = mlx5_vdpa_get_vq_group,
.get_device_features = mlx5_vdpa_get_device_features,
.set_driver_features = mlx5_vdpa_set_driver_features,
.get_driver_features = mlx5_vdpa_get_driver_features,
}
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
- name, false);
+ 1, name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
* initialized but before registered.
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @ngroups: number of groups supported by this device
* @size: size of the parent structure that contains private data
* @name: name of the vdpa device; optional.
* @use_va: indicate whether virtual address must be used by this device
*/
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ unsigned int ngroups,
size_t size, const char *name,
bool use_va)
{
vdev->config = config;
vdev->features_valid = false;
vdev->use_va = use_va;
+ vdev->ngroups = ngroups;
if (name)
err = dev_set_name(&vdev->dev, "%s", name);
else
ops = &vdpasim_config_ops;
- vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 1,
dev_attr->name, false);
if (IS_ERR(vdpasim)) {
ret = PTR_ERR(vdpasim);
return VDPASIM_QUEUE_ALIGN;
}
+static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
+{
+ return 0;
+}
+
static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
+ .get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
+ .get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
u32 status;
u32 generation;
u64 features;
+ u32 groups;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
};
return -EEXIST;
vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
- &vduse_vdpa_config_ops, name, true);
+ &vduse_vdpa_config_ops, 1, name, true);
if (IS_ERR(vdev))
return PTR_ERR(vdev);
return ret;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
- dev, &vp_vdpa_ops, NULL, false);
+ dev, &vp_vdpa_ops, 1, NULL, false);
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return PTR_ERR(vp_vdpa);
bool use_va;
u32 nvqs;
struct vdpa_mgmt_dev *mdev;
+ unsigned int ngroups;
};
/**
* for the device
* @vdev: vdpa device
* Returns virtqueue algin requirement
+ * @get_vq_group: Get the group id for a specific virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns u32: group id for this virtqueue
* @get_device_features: Get virtio features supported by the device
* @vdev: vdpa device
* Returns the virtio features support by the
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
+ u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
u64 (*get_driver_features)(struct vdpa_device *vdev);
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ unsigned int ngroups,
size_t size, const char *name,
bool use_va);
* @member: the name of struct vdpa_device within the @dev_struct
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @ngroups: the number of virtqueue groups supported by this device
* @name: name of the vdpa device
* @use_va: indicate whether virtual address must be used by this device
*
* Return allocated data structure or ERR_PTR upon error
*/
-#define vdpa_alloc_device(dev_struct, member, parent, config, name, use_va) \
- container_of(__vdpa_alloc_device( \
- parent, config, \
+#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, name, use_va) \
+ container_of((__vdpa_alloc_device( \
+ parent, config, ngroups, \
sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
- dev_struct, member)), name, use_va), \
+ dev_struct, member)), name, use_va)), \
dev_struct, member)
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);