diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2010-04-28 12:27:38 +0300 |
---|---|---|
committer | Doug Goldstein <cardoe@gentoo.org> | 2010-07-20 18:34:30 -0500 |
commit | 58171759d148b740715eebeaed2828382377d8a6 (patch) | |
tree | b3a3103ec6dfb3ed1a9dd25a3781e4bda3917f3e | |
parent | target-mips: fix DINSU instruction (diff) | |
download | qemu-kvm-0.12.4-gentoo.tar.gz qemu-kvm-0.12.4-gentoo.tar.bz2 qemu-kvm-0.12.4-gentoo.zip |
qemu-kvm: fix crash on reboot with vhost-netqemu-kvm-0.12.4-gentoo-2qemu-kvm-0.12.4-gentoo
When vhost-net is disabled on reboot, we set msix mask notifier
to NULL to disable further mask/unmask notifications.
Code currently tries to pass this NULL to notifier,
leading to a crash. The right thing to do is
to add explicit APIs to enable/disable notifications.
Now when disabling notifications:
- if vector is masked, we don't need to notify backend,
just disable future notifications
- if vector is unmasked, invoke callback to unassign backend,
then disable future notifications
This patch also polls notifier before closing it,
to make sure we don't lose events if poll callback
didn't have time to run.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | hw/msix.c | 44 | ||||
-rw-r--r-- | hw/msix.h | 2 | ||||
-rw-r--r-- | hw/virtio-pci.c | 93 |
3 files changed, 139 insertions, 0 deletions
@@ -587,3 +587,47 @@ void msix_unuse_all_vectors(PCIDevice *dev) return; msix_free_irq_entries(dev); } + +int msix_set_mask_notifier(PCIDevice *dev, unsigned vector, void *opaque) +{ + int r; + if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) + return 0; + + assert(dev->msix_mask_notifier); + assert(opaque); + assert(!dev->msix_mask_notifier_opaque[vector]); + + if (msix_is_masked(dev, vector)) { + return 0; + } + r = dev->msix_mask_notifier(dev, vector, opaque, + msix_is_masked(dev, vector)); + if (r < 0) { + return r; + } + dev->msix_mask_notifier_opaque[vector] = opaque; + return r; +} + +int msix_unset_mask_notifier(PCIDevice *dev, unsigned vector) +{ + int r = 0; + if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) + return 0; + + assert(dev->msix_mask_notifier); + assert(dev->msix_mask_notifier_opaque[vector]); + + if (msix_is_masked(dev, vector)) { + return 0; + } + r = dev->msix_mask_notifier(dev, vector, + dev->msix_mask_notifier_opaque[vector], + msix_is_masked(dev, vector)); + if (r < 0) { + return r; + } + dev->msix_mask_notifier_opaque[vector] = NULL; + return r; +} @@ -33,4 +33,6 @@ void msix_reset(PCIDevice *dev); extern int msix_supported; +int msix_set_mask_notifier(PCIDevice *dev, unsigned vector, void *opaque); +int msix_unset_mask_notifier(PCIDevice *dev, unsigned vector); #endif diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c index 359415226..59609476d 100644 --- a/hw/virtio-pci.c +++ b/hw/virtio-pci.c @@ -382,11 +382,104 @@ static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, static unsigned virtio_pci_get_features(void *opaque) { +<<<<<<< HEAD unsigned ret = 0; ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY); ret |= (1 << VIRTIO_RING_F_INDIRECT_DESC); ret |= (1 << VIRTIO_F_BAD_FEATURE); return ret; +======= + VirtIOPCIProxy *proxy = opaque; + return proxy->host_features; +} + +static void virtio_pci_guest_notifier_read(void *opaque) +{ + VirtQueue *vq = opaque; + EventNotifier *n = virtio_queue_get_guest_notifier(vq); + if (event_notifier_test_and_clear(n)) { + virtio_irq(vq); + } +} + +static int virtio_pci_mask_notifier(PCIDevice *dev, unsigned vector, + void *opaque, int masked) +{ + VirtQueue *vq = opaque; + EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); + int r = kvm_set_irqfd(dev->msix_irq_entries[vector].gsi, + event_notifier_get_fd(notifier), + !masked); + if (r < 0) { + return (r == -ENOSYS) ? 0 : r; + } + if (masked) { + qemu_set_fd_handler(event_notifier_get_fd(notifier), + virtio_pci_guest_notifier_read, NULL, vq); + } else { + qemu_set_fd_handler(event_notifier_get_fd(notifier), + NULL, NULL, NULL); + } + return 0; +} + +static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign) +{ + VirtIOPCIProxy *proxy = opaque; + VirtQueue *vq = virtio_get_queue(proxy->vdev, n); + EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); + + if (assign) { + int r = event_notifier_init(notifier, 0); + if (r < 0) { + return r; + } + qemu_set_fd_handler(event_notifier_get_fd(notifier), + virtio_pci_guest_notifier_read, NULL, vq); + msix_set_mask_notifier(&proxy->pci_dev, + virtio_queue_vector(proxy->vdev, n), vq); + } else { + msix_unset_mask_notifier(&proxy->pci_dev, + virtio_queue_vector(proxy->vdev, n)); + qemu_set_fd_handler(event_notifier_get_fd(notifier), + NULL, NULL, NULL); + /* Test and clear notifier before closing it, + * in case poll callback didn't have time to run. */ + virtio_pci_guest_notifier_read(vq); + event_notifier_cleanup(notifier); + } + + return 0; +} + +static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign) +{ + VirtIOPCIProxy *proxy = opaque; + VirtQueue *vq = virtio_get_queue(proxy->vdev, n); + EventNotifier *notifier = virtio_queue_get_host_notifier(vq); + int r; + if (assign) { + r = event_notifier_init(notifier, 1); + if (r < 0) { + return r; + } + r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier), + proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY, + n, assign); + if (r < 0) { + event_notifier_cleanup(notifier); + } + } else { + r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier), + proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY, + n, assign); + if (r < 0) { + return r; + } + event_notifier_cleanup(notifier); + } + return r; +>>>>>>> 992cc81... qemu-kvm: fix crash on reboot with vhost-net } static const VirtIOBindings virtio_pci_bindings = { |