drivers/vfio: Import OnePlus Changes
Signed-off-by: Cyber Knight <cyberknight755@gmail.com>
This commit is contained in:
parent
93c8425cbd
commit
1e0bb18a32
|
@ -29,7 +29,6 @@
|
|||
#include <linux/vfio.h>
|
||||
#include <linux/vgaarb.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
#include "vfio_pci_private.h"
|
||||
|
||||
|
@ -182,7 +181,6 @@ no_mmap:
|
|||
|
||||
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
|
||||
static void vfio_pci_disable(struct vfio_pci_device *vdev);
|
||||
static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
|
||||
|
||||
/*
|
||||
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
|
||||
|
@ -399,19 +397,6 @@ static void vfio_pci_release(void *device_data)
|
|||
if (!(--vdev->refcnt)) {
|
||||
vfio_spapr_pci_eeh_release(vdev->pdev);
|
||||
vfio_pci_disable(vdev);
|
||||
mutex_lock(&vdev->igate);
|
||||
if (vdev->err_trigger) {
|
||||
eventfd_ctx_put(vdev->err_trigger);
|
||||
vdev->err_trigger = NULL;
|
||||
}
|
||||
mutex_unlock(&vdev->igate);
|
||||
|
||||
mutex_lock(&vdev->igate);
|
||||
if (vdev->req_trigger) {
|
||||
eventfd_ctx_put(vdev->req_trigger);
|
||||
vdev->req_trigger = NULL;
|
||||
}
|
||||
mutex_unlock(&vdev->igate);
|
||||
}
|
||||
|
||||
mutex_unlock(&driver_lock);
|
||||
|
@ -659,12 +644,6 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct vfio_devices {
|
||||
struct vfio_device **devices;
|
||||
int cur_index;
|
||||
int max_index;
|
||||
};
|
||||
|
||||
static long vfio_pci_ioctl(void *device_data,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -738,7 +717,7 @@ static long vfio_pci_ioctl(void *device_data,
|
|||
{
|
||||
void __iomem *io;
|
||||
size_t size;
|
||||
u16 cmd;
|
||||
u16 orig_cmd;
|
||||
|
||||
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
|
||||
info.flags = 0;
|
||||
|
@ -758,7 +737,10 @@ static long vfio_pci_ioctl(void *device_data,
|
|||
* Is it really there? Enable memory decode for
|
||||
* implicit access in pci_map_rom().
|
||||
*/
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
|
||||
pci_write_config_word(pdev, PCI_COMMAND,
|
||||
orig_cmd | PCI_COMMAND_MEMORY);
|
||||
|
||||
io = pci_map_rom(pdev, &size);
|
||||
if (io) {
|
||||
info.flags = VFIO_REGION_INFO_FLAG_READ;
|
||||
|
@ -766,8 +748,8 @@ static long vfio_pci_ioctl(void *device_data,
|
|||
} else {
|
||||
info.size = 0;
|
||||
}
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
|
||||
pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
|
||||
break;
|
||||
}
|
||||
case VFIO_PCI_VGA_REGION_INDEX:
|
||||
|
@ -903,16 +885,8 @@ static long vfio_pci_ioctl(void *device_data,
|
|||
return ret;
|
||||
|
||||
} else if (cmd == VFIO_DEVICE_RESET) {
|
||||
int ret;
|
||||
|
||||
if (!vdev->reset_works)
|
||||
return -EINVAL;
|
||||
|
||||
vfio_pci_zap_and_down_write_memory_lock(vdev);
|
||||
ret = pci_try_reset_function(vdev->pdev);
|
||||
up_write(&vdev->memory_lock);
|
||||
|
||||
return ret;
|
||||
return vdev->reset_works ?
|
||||
pci_try_reset_function(vdev->pdev) : -EINVAL;
|
||||
|
||||
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
|
||||
struct vfio_pci_hot_reset_info hdr;
|
||||
|
@ -992,9 +966,8 @@ reset_info_exit:
|
|||
int32_t *group_fds;
|
||||
struct vfio_pci_group_entry *groups;
|
||||
struct vfio_pci_group_info info;
|
||||
struct vfio_devices devs = { .cur_index = 0 };
|
||||
bool slot = false;
|
||||
int i, group_idx, mem_idx = 0, count = 0, ret = 0;
|
||||
int i, count = 0, ret = 0;
|
||||
|
||||
minsz = offsetofend(struct vfio_pci_hot_reset, count);
|
||||
|
||||
|
@ -1046,9 +1019,9 @@ reset_info_exit:
|
|||
* user interface and store the group and iommu ID. This
|
||||
* ensures the group is held across the reset.
|
||||
*/
|
||||
for (group_idx = 0; group_idx < hdr.count; group_idx++) {
|
||||
for (i = 0; i < hdr.count; i++) {
|
||||
struct vfio_group *group;
|
||||
struct fd f = fdget(group_fds[group_idx]);
|
||||
struct fd f = fdget(group_fds[i]);
|
||||
if (!f.file) {
|
||||
ret = -EBADF;
|
||||
break;
|
||||
|
@ -1061,9 +1034,8 @@ reset_info_exit:
|
|||
break;
|
||||
}
|
||||
|
||||
groups[group_idx].group = group;
|
||||
groups[group_idx].id =
|
||||
vfio_external_user_iommu_id(group);
|
||||
groups[i].group = group;
|
||||
groups[i].id = vfio_external_user_iommu_id(group);
|
||||
}
|
||||
|
||||
kfree(group_fds);
|
||||
|
@ -1082,65 +1054,14 @@ reset_info_exit:
|
|||
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
|
||||
vfio_pci_validate_devs,
|
||||
&info, slot);
|
||||
|
||||
if (ret)
|
||||
goto hot_reset_release;
|
||||
|
||||
devs.max_index = count;
|
||||
devs.devices = kcalloc(count, sizeof(struct vfio_device *),
|
||||
GFP_KERNEL);
|
||||
if (!devs.devices) {
|
||||
ret = -ENOMEM;
|
||||
goto hot_reset_release;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to get memory_lock for each device, but devices
|
||||
* can share mmap_sem, therefore we need to zap and hold
|
||||
* the vma_lock for each device, and only then get each
|
||||
* memory_lock.
|
||||
*/
|
||||
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
|
||||
vfio_pci_try_zap_and_vma_lock_cb,
|
||||
&devs, slot);
|
||||
if (ret)
|
||||
goto hot_reset_release;
|
||||
|
||||
for (; mem_idx < devs.cur_index; mem_idx++) {
|
||||
struct vfio_pci_device *tmp;
|
||||
|
||||
tmp = vfio_device_data(devs.devices[mem_idx]);
|
||||
|
||||
ret = down_write_trylock(&tmp->memory_lock);
|
||||
if (!ret) {
|
||||
ret = -EBUSY;
|
||||
goto hot_reset_release;
|
||||
}
|
||||
mutex_unlock(&tmp->vma_lock);
|
||||
}
|
||||
|
||||
/* User has access, do the reset */
|
||||
ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
|
||||
pci_try_reset_bus(vdev->pdev->bus);
|
||||
if (!ret)
|
||||
/* User has access, do the reset */
|
||||
ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
|
||||
pci_try_reset_bus(vdev->pdev->bus);
|
||||
|
||||
hot_reset_release:
|
||||
for (i = 0; i < devs.cur_index; i++) {
|
||||
struct vfio_device *device;
|
||||
struct vfio_pci_device *tmp;
|
||||
|
||||
device = devs.devices[i];
|
||||
tmp = vfio_device_data(device);
|
||||
|
||||
if (i < mem_idx)
|
||||
up_write(&tmp->memory_lock);
|
||||
else
|
||||
mutex_unlock(&tmp->vma_lock);
|
||||
vfio_device_put(device);
|
||||
}
|
||||
kfree(devs.devices);
|
||||
|
||||
for (group_idx--; group_idx >= 0; group_idx--)
|
||||
vfio_group_put_external_user(groups[group_idx].group);
|
||||
for (i--; i >= 0; i--)
|
||||
vfio_group_put_external_user(groups[i].group);
|
||||
|
||||
kfree(groups);
|
||||
return ret;
|
||||
|
@ -1199,202 +1120,6 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
|
|||
return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
|
||||
}
|
||||
|
||||
/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
|
||||
static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
|
||||
{
|
||||
struct vfio_pci_mmap_vma *mmap_vma, *tmp;
|
||||
|
||||
/*
|
||||
* Lock ordering:
|
||||
* vma_lock is nested under mmap_sem for vm_ops callback paths.
|
||||
* The memory_lock semaphore is used by both code paths calling
|
||||
* into this function to zap vmas and the vm_ops.fault callback
|
||||
* to protect the memory enable state of the device.
|
||||
*
|
||||
* When zapping vmas we need to maintain the mmap_sem => vma_lock
|
||||
* ordering, which requires using vma_lock to walk vma_list to
|
||||
* acquire an mm, then dropping vma_lock to get the mmap_sem and
|
||||
* reacquiring vma_lock. This logic is derived from similar
|
||||
* requirements in uverbs_user_mmap_disassociate().
|
||||
*
|
||||
* mmap_sem must always be the top-level lock when it is taken.
|
||||
* Therefore we can only hold the memory_lock write lock when
|
||||
* vma_list is empty, as we'd need to take mmap_sem to clear
|
||||
* entries. vma_list can only be guaranteed empty when holding
|
||||
* vma_lock, thus memory_lock is nested under vma_lock.
|
||||
*
|
||||
* This enables the vm_ops.fault callback to acquire vma_lock,
|
||||
* followed by memory_lock read lock, while already holding
|
||||
* mmap_sem without risk of deadlock.
|
||||
*/
|
||||
while (1) {
|
||||
struct mm_struct *mm = NULL;
|
||||
|
||||
if (try) {
|
||||
if (!mutex_trylock(&vdev->vma_lock))
|
||||
return 0;
|
||||
} else {
|
||||
mutex_lock(&vdev->vma_lock);
|
||||
}
|
||||
while (!list_empty(&vdev->vma_list)) {
|
||||
mmap_vma = list_first_entry(&vdev->vma_list,
|
||||
struct vfio_pci_mmap_vma,
|
||||
vma_next);
|
||||
mm = mmap_vma->vma->vm_mm;
|
||||
if (mmget_not_zero(mm))
|
||||
break;
|
||||
|
||||
list_del(&mmap_vma->vma_next);
|
||||
kfree(mmap_vma);
|
||||
mm = NULL;
|
||||
}
|
||||
if (!mm)
|
||||
return 1;
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
|
||||
if (try) {
|
||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
||||
mmput(mm);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
down_read(&mm->mmap_sem);
|
||||
}
|
||||
if (mmget_still_valid(mm)) {
|
||||
if (try) {
|
||||
if (!mutex_trylock(&vdev->vma_lock)) {
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
mutex_lock(&vdev->vma_lock);
|
||||
}
|
||||
list_for_each_entry_safe(mmap_vma, tmp,
|
||||
&vdev->vma_list, vma_next) {
|
||||
struct vm_area_struct *vma = mmap_vma->vma;
|
||||
|
||||
if (vma->vm_mm != mm)
|
||||
continue;
|
||||
|
||||
list_del(&mmap_vma->vma_next);
|
||||
kfree(mmap_vma);
|
||||
|
||||
zap_vma_ptes(vma, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start);
|
||||
}
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
}
|
||||
|
||||
void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
|
||||
{
|
||||
vfio_pci_zap_and_vma_lock(vdev, false);
|
||||
down_write(&vdev->memory_lock);
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
}
|
||||
|
||||
u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
|
||||
{
|
||||
u16 cmd;
|
||||
|
||||
down_write(&vdev->memory_lock);
|
||||
pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
|
||||
if (!(cmd & PCI_COMMAND_MEMORY))
|
||||
pci_write_config_word(vdev->pdev, PCI_COMMAND,
|
||||
cmd | PCI_COMMAND_MEMORY);
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
|
||||
{
|
||||
pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
|
||||
up_write(&vdev->memory_lock);
|
||||
}
|
||||
|
||||
/* Caller holds vma_lock */
|
||||
static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct vfio_pci_mmap_vma *mmap_vma;
|
||||
|
||||
mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
|
||||
if (!mmap_vma)
|
||||
return -ENOMEM;
|
||||
|
||||
mmap_vma->vma = vma;
|
||||
list_add(&mmap_vma->vma_next, &vdev->vma_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Zap mmaps on open so that we can fault them in on access and therefore
|
||||
* our vma_list only tracks mappings accessed since last zap.
|
||||
*/
|
||||
static void vfio_pci_mmap_open(struct vm_area_struct *vma)
|
||||
{
|
||||
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
}
|
||||
|
||||
static void vfio_pci_mmap_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vfio_pci_device *vdev = vma->vm_private_data;
|
||||
struct vfio_pci_mmap_vma *mmap_vma;
|
||||
|
||||
mutex_lock(&vdev->vma_lock);
|
||||
list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
|
||||
if (mmap_vma->vma == vma) {
|
||||
list_del(&mmap_vma->vma_next);
|
||||
kfree(mmap_vma);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
}
|
||||
|
||||
static int vfio_pci_mmap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct vfio_pci_device *vdev = vma->vm_private_data;
|
||||
int ret = VM_FAULT_NOPAGE;
|
||||
|
||||
mutex_lock(&vdev->vma_lock);
|
||||
down_read(&vdev->memory_lock);
|
||||
|
||||
if (!__vfio_pci_memory_enabled(vdev)) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
goto up_out;
|
||||
}
|
||||
|
||||
if (__vfio_pci_add_vma(vdev, vma)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
goto up_out;
|
||||
}
|
||||
|
||||
mutex_unlock(&vdev->vma_lock);
|
||||
|
||||
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot))
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
|
||||
up_out:
|
||||
up_read(&vdev->memory_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct vfio_pci_mmap_ops = {
|
||||
.open = vfio_pci_mmap_open,
|
||||
.close = vfio_pci_mmap_close,
|
||||
.fault = vfio_pci_mmap_fault,
|
||||
};
|
||||
|
||||
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vfio_pci_device *vdev = device_data;
|
||||
|
@ -1460,14 +1185,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
|
|||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
|
||||
|
||||
/*
|
||||
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
|
||||
* change vm_flags within the fault handler. Set them now.
|
||||
*/
|
||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_ops = &vfio_pci_mmap_ops;
|
||||
|
||||
return 0;
|
||||
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
req_len, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
static void vfio_pci_request(void *device_data, unsigned int count)
|
||||
|
@ -1524,9 +1243,6 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
vdev->irq_type = VFIO_PCI_NUM_IRQS;
|
||||
mutex_init(&vdev->igate);
|
||||
spin_lock_init(&vdev->irqlock);
|
||||
mutex_init(&vdev->vma_lock);
|
||||
INIT_LIST_HEAD(&vdev->vma_list);
|
||||
init_rwsem(&vdev->memory_lock);
|
||||
|
||||
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
|
||||
if (ret) {
|
||||
|
@ -1621,6 +1337,12 @@ static struct pci_driver vfio_pci_driver = {
|
|||
.err_handler = &vfio_err_handlers,
|
||||
};
|
||||
|
||||
struct vfio_devices {
|
||||
struct vfio_device **devices;
|
||||
int cur_index;
|
||||
int max_index;
|
||||
};
|
||||
|
||||
static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
struct vfio_devices *devs = data;
|
||||
|
@ -1642,39 +1364,6 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
|
||||
{
|
||||
struct vfio_devices *devs = data;
|
||||
struct vfio_device *device;
|
||||
struct vfio_pci_device *vdev;
|
||||
|
||||
if (devs->cur_index == devs->max_index)
|
||||
return -ENOSPC;
|
||||
|
||||
device = vfio_device_get_from_dev(&pdev->dev);
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
if (pci_dev_driver(pdev) != &vfio_pci_driver) {
|
||||
vfio_device_put(device);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
vdev = vfio_device_data(device);
|
||||
|
||||
/*
|
||||
* Locking multiple devices is prone to deadlock, runaway and
|
||||
* unwind if we hit contention.
|
||||
*/
|
||||
if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
|
||||
vfio_device_put(device);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
devs->devices[devs->cur_index++] = device;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempt to do a bus/slot reset if there are devices affected by a reset for
|
||||
* this device that are needs_reset and all of the affected devices are unused
|
||||
|
|
|
@ -398,20 +398,6 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
|
|||
*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
|
||||
}
|
||||
|
||||
/* Caller should hold memory_lock semaphore */
|
||||
bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
|
||||
|
||||
/*
|
||||
* SR-IOV VF memory enable is handled by the MSE bit in the
|
||||
* PF SR-IOV capability, there's therefore no need to trigger
|
||||
* faults based on the virtual value.
|
||||
*/
|
||||
return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the *real* BARs after we detect a FLR or backdoor reset.
|
||||
* (backdoor = some device specific technique that we didn't catch)
|
||||
|
@ -572,18 +558,13 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
|||
|
||||
new_cmd = le32_to_cpu(val);
|
||||
|
||||
phys_io = !!(phys_cmd & PCI_COMMAND_IO);
|
||||
virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
|
||||
new_io = !!(new_cmd & PCI_COMMAND_IO);
|
||||
|
||||
phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
|
||||
virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
|
||||
new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
|
||||
|
||||
if (!new_mem)
|
||||
vfio_pci_zap_and_down_write_memory_lock(vdev);
|
||||
else
|
||||
down_write(&vdev->memory_lock);
|
||||
phys_io = !!(phys_cmd & PCI_COMMAND_IO);
|
||||
virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
|
||||
new_io = !!(new_cmd & PCI_COMMAND_IO);
|
||||
|
||||
/*
|
||||
* If the user is writing mem/io enable (new_mem/io) and we
|
||||
|
@ -600,11 +581,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
|||
}
|
||||
|
||||
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
|
||||
if (count < 0) {
|
||||
if (offset == PCI_COMMAND)
|
||||
up_write(&vdev->memory_lock);
|
||||
if (count < 0)
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save current memory/io enable bits in vconfig to allow for
|
||||
|
@ -615,8 +593,6 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
|||
|
||||
*virt_cmd &= cpu_to_le16(~mask);
|
||||
*virt_cmd |= cpu_to_le16(new_cmd & mask);
|
||||
|
||||
up_write(&vdev->memory_lock);
|
||||
}
|
||||
|
||||
/* Emulate INTx disable */
|
||||
|
@ -854,11 +830,8 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
|
|||
pos - offset + PCI_EXP_DEVCAP,
|
||||
&cap);
|
||||
|
||||
if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
|
||||
vfio_pci_zap_and_down_write_memory_lock(vdev);
|
||||
if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
|
||||
pci_try_reset_function(vdev->pdev);
|
||||
up_write(&vdev->memory_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -936,11 +909,8 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
|
|||
pos - offset + PCI_AF_CAP,
|
||||
&cap);
|
||||
|
||||
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
|
||||
vfio_pci_zap_and_down_write_memory_lock(vdev);
|
||||
if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
|
||||
pci_try_reset_function(vdev->pdev);
|
||||
up_write(&vdev->memory_lock);
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
|
@ -1738,15 +1708,6 @@ int vfio_config_init(struct vfio_pci_device *vdev)
|
|||
vconfig[PCI_INTERRUPT_PIN]);
|
||||
|
||||
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
|
||||
|
||||
/*
|
||||
* VFs do no implement the memory enable bit of the COMMAND
|
||||
* register therefore we'll not have it set in our initial
|
||||
* copy of config space after pci_enable_device(). For
|
||||
* consistency with PFs, set the virtual enable bit here.
|
||||
*/
|
||||
*(__le16 *)&vconfig[PCI_COMMAND] |=
|
||||
cpu_to_le16(PCI_COMMAND_MEMORY);
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
|
||||
|
|
|
@ -252,7 +252,6 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
|||
struct pci_dev *pdev = vdev->pdev;
|
||||
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
|
||||
int ret;
|
||||
u16 cmd;
|
||||
|
||||
if (!is_irq_none(vdev))
|
||||
return -EINVAL;
|
||||
|
@ -262,16 +261,13 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
|||
return -ENOMEM;
|
||||
|
||||
/* return the number of supported vectors if we can't get all: */
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
|
||||
if (ret < nvec) {
|
||||
if (ret > 0)
|
||||
pci_free_irq_vectors(pdev);
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
kfree(vdev->ctx);
|
||||
return ret;
|
||||
}
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
|
||||
vdev->num_ctx = nvec;
|
||||
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
|
||||
|
@ -294,7 +290,6 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|||
struct pci_dev *pdev = vdev->pdev;
|
||||
struct eventfd_ctx *trigger;
|
||||
int irq, ret;
|
||||
u16 cmd;
|
||||
|
||||
if (vector < 0 || vector >= vdev->num_ctx)
|
||||
return -EINVAL;
|
||||
|
@ -303,11 +298,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|||
|
||||
if (vdev->ctx[vector].trigger) {
|
||||
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
|
||||
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
free_irq(irq, vdev->ctx[vector].trigger);
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
|
||||
kfree(vdev->ctx[vector].name);
|
||||
eventfd_ctx_put(vdev->ctx[vector].trigger);
|
||||
vdev->ctx[vector].trigger = NULL;
|
||||
|
@ -335,7 +326,6 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|||
* such a reset it would be unsuccessful. To avoid this, restore the
|
||||
* cached value of the message prior to enabling.
|
||||
*/
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
if (msix) {
|
||||
struct msi_msg msg;
|
||||
|
||||
|
@ -345,7 +335,6 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|||
|
||||
ret = request_irq(irq, vfio_msihandler, 0,
|
||||
vdev->ctx[vector].name, trigger);
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
if (ret) {
|
||||
kfree(vdev->ctx[vector].name);
|
||||
eventfd_ctx_put(trigger);
|
||||
|
@ -355,13 +344,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
|||
vdev->ctx[vector].producer.token = trigger;
|
||||
vdev->ctx[vector].producer.irq = irq;
|
||||
ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
|
||||
if (unlikely(ret)) {
|
||||
if (unlikely(ret))
|
||||
dev_info(&pdev->dev,
|
||||
"irq bypass producer (token %p) registration fails: %d\n",
|
||||
vdev->ctx[vector].producer.token, ret);
|
||||
|
||||
vdev->ctx[vector].producer.token = NULL;
|
||||
}
|
||||
vdev->ctx[vector].trigger = trigger;
|
||||
|
||||
return 0;
|
||||
|
@ -392,7 +379,6 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
|
|||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
int i;
|
||||
u16 cmd;
|
||||
|
||||
for (i = 0; i < vdev->num_ctx; i++) {
|
||||
vfio_virqfd_disable(&vdev->ctx[i].unmask);
|
||||
|
@ -401,9 +387,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
|
|||
|
||||
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
|
||||
|
||||
cmd = vfio_pci_memory_lock_and_enable(vdev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
vfio_pci_memory_unlock_and_restore(vdev, cmd);
|
||||
|
||||
/*
|
||||
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
|
||||
|
|
|
@ -63,11 +63,6 @@ struct vfio_pci_dummy_resource {
|
|||
struct list_head res_next;
|
||||
};
|
||||
|
||||
struct vfio_pci_mmap_vma {
|
||||
struct vm_area_struct *vma;
|
||||
struct list_head vma_next;
|
||||
};
|
||||
|
||||
struct vfio_pci_device {
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
|
||||
|
@ -100,9 +95,6 @@ struct vfio_pci_device {
|
|||
struct eventfd_ctx *err_trigger;
|
||||
struct eventfd_ctx *req_trigger;
|
||||
struct list_head dummy_resources_list;
|
||||
struct mutex vma_lock;
|
||||
struct list_head vma_list;
|
||||
struct rw_semaphore memory_lock;
|
||||
};
|
||||
|
||||
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
|
||||
|
@ -138,14 +130,6 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
|
|||
unsigned int type, unsigned int subtype,
|
||||
const struct vfio_pci_regops *ops,
|
||||
size_t size, u32 flags, void *data);
|
||||
|
||||
extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
|
||||
extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
|
||||
*vdev);
|
||||
extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
|
||||
extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
|
||||
u16 cmd);
|
||||
|
||||
#ifdef CONFIG_VFIO_PCI_IGD
|
||||
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
|
||||
#else
|
||||
|
|
|
@ -122,7 +122,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
|||
size_t x_start = 0, x_end = 0;
|
||||
resource_size_t end;
|
||||
void __iomem *io;
|
||||
struct resource *res = &vdev->pdev->resource[bar];
|
||||
ssize_t done;
|
||||
|
||||
if (pci_resource_start(pdev, bar))
|
||||
|
@ -138,14 +137,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
|||
|
||||
count = min(count, (size_t)(end - pos));
|
||||
|
||||
if (res->flags & IORESOURCE_MEM) {
|
||||
down_read(&vdev->memory_lock);
|
||||
if (!__vfio_pci_memory_enabled(vdev)) {
|
||||
up_read(&vdev->memory_lock);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
if (bar == PCI_ROM_RESOURCE) {
|
||||
/*
|
||||
* The ROM can fill less space than the BAR, so we start the
|
||||
|
@ -153,21 +144,20 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
|||
* filling large ROM BARs much faster.
|
||||
*/
|
||||
io = pci_map_rom(pdev, &x_start);
|
||||
if (!io) {
|
||||
done = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!io)
|
||||
return -ENOMEM;
|
||||
x_end = end;
|
||||
} else if (!vdev->barmap[bar]) {
|
||||
done = pci_request_selected_regions(pdev, 1 << bar, "vfio");
|
||||
if (done)
|
||||
goto out;
|
||||
int ret;
|
||||
|
||||
ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
io = pci_iomap(pdev, bar, 0);
|
||||
if (!io) {
|
||||
pci_release_selected_regions(pdev, 1 << bar);
|
||||
done = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vdev->barmap[bar] = io;
|
||||
|
@ -186,9 +176,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
|||
|
||||
if (bar == PCI_ROM_RESOURCE)
|
||||
pci_unmap_rom(pdev, io);
|
||||
out:
|
||||
if (res->flags & IORESOURCE_MEM)
|
||||
up_read(&vdev->memory_lock);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
|
|
@ -336,32 +336,6 @@ static int put_pfn(unsigned long pfn, int prot)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
|
||||
unsigned long vaddr, unsigned long *pfn,
|
||||
bool write_fault)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = follow_pfn(vma, vaddr, pfn);
|
||||
if (ret) {
|
||||
bool unlocked = false;
|
||||
|
||||
ret = fixup_user_fault(NULL, mm, vaddr,
|
||||
FAULT_FLAG_REMOTE |
|
||||
(write_fault ? FAULT_FLAG_WRITE : 0),
|
||||
&unlocked);
|
||||
if (unlocked)
|
||||
return -EAGAIN;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = follow_pfn(vma, vaddr, pfn);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
||||
int prot, unsigned long *pfn)
|
||||
{
|
||||
|
@ -403,16 +377,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
|||
|
||||
vaddr = untagged_addr(vaddr);
|
||||
|
||||
retry:
|
||||
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
|
||||
|
||||
if (vma && vma->vm_flags & VM_PFNMAP) {
|
||||
ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
|
||||
if (ret == -EAGAIN)
|
||||
goto retry;
|
||||
|
||||
if (!ret && !is_invalid_reserved_pfn(*pfn))
|
||||
ret = -EFAULT;
|
||||
if (!follow_pfn(vma, vaddr, pfn) &&
|
||||
is_invalid_reserved_pfn(*pfn))
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
|
@ -631,8 +601,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
|
|||
|
||||
ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
|
||||
if (ret) {
|
||||
if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
|
||||
vfio_lock_acct(dma, -1, true);
|
||||
vfio_unpin_page_external(dma, iova, do_accounting);
|
||||
goto pin_unwind;
|
||||
}
|
||||
}
|
||||
|
@ -1119,16 +1088,13 @@ static int vfio_bus_type(struct device *dev, void *data)
|
|||
static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
||||
struct vfio_domain *domain)
|
||||
{
|
||||
struct vfio_domain *d = NULL;
|
||||
struct vfio_domain *d;
|
||||
struct rb_node *n;
|
||||
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||
int ret;
|
||||
|
||||
/* Arbitrarily pick the first domain in the list for lookups */
|
||||
if (!list_empty(&iommu->domain_list))
|
||||
d = list_first_entry(&iommu->domain_list,
|
||||
struct vfio_domain, next);
|
||||
|
||||
d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
|
||||
n = rb_first(&iommu->dma_list);
|
||||
|
||||
for (; n; n = rb_next(n)) {
|
||||
|
@ -1146,11 +1112,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
|||
phys_addr_t p;
|
||||
dma_addr_t i;
|
||||
|
||||
if (WARN_ON(!d)) { /* mapped w/o a domain?! */
|
||||
ret = -EINVAL;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
phys = iommu_iova_to_phys(d->domain, iova);
|
||||
|
||||
if (WARN_ON(!phys)) {
|
||||
|
@ -1180,7 +1141,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
|||
if (npage <= 0) {
|
||||
WARN_ON(!npage);
|
||||
ret = (int)npage;
|
||||
goto unwind;
|
||||
return ret;
|
||||
}
|
||||
|
||||
phys = pfn << PAGE_SHIFT;
|
||||
|
@ -1189,67 +1150,14 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
|||
|
||||
ret = iommu_map(domain->domain, iova, phys,
|
||||
size, dma->prot | domain->prot);
|
||||
if (ret) {
|
||||
if (!dma->iommu_mapped)
|
||||
vfio_unpin_pages_remote(dma, iova,
|
||||
phys >> PAGE_SHIFT,
|
||||
size >> PAGE_SHIFT,
|
||||
true);
|
||||
goto unwind;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iova += size;
|
||||
}
|
||||
}
|
||||
|
||||
/* All dmas are now mapped, defer to second tree walk for unwind */
|
||||
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
|
||||
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
|
||||
|
||||
dma->iommu_mapped = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
for (; n; n = rb_prev(n)) {
|
||||
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
|
||||
dma_addr_t iova;
|
||||
|
||||
if (dma->iommu_mapped) {
|
||||
iommu_unmap(domain->domain, dma->iova, dma->size);
|
||||
continue;
|
||||
}
|
||||
|
||||
iova = dma->iova;
|
||||
while (iova < dma->iova + dma->size) {
|
||||
phys_addr_t phys, p;
|
||||
size_t size;
|
||||
dma_addr_t i;
|
||||
|
||||
phys = iommu_iova_to_phys(domain->domain, iova);
|
||||
if (!phys) {
|
||||
iova += PAGE_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
size = PAGE_SIZE;
|
||||
p = phys + size;
|
||||
i = iova + size;
|
||||
while (i < dma->iova + dma->size &&
|
||||
p == iommu_iova_to_phys(domain->domain, i)) {
|
||||
size += PAGE_SIZE;
|
||||
p += PAGE_SIZE;
|
||||
i += PAGE_SIZE;
|
||||
}
|
||||
|
||||
iommu_unmap(domain->domain, iova, size);
|
||||
vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
|
||||
size >> PAGE_SHIFT, true);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue