summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSam James <sam@gentoo.org>2022-06-04 03:59:28 +0100
committerSam James <sam@gentoo.org>2022-06-04 04:01:29 +0100
commita2440aa2b4c8ceaf4195e30f6b4888ede061d8dd (patch)
tree3ebe00c83e28fb1b3f98cf43eb18bd88ac954e3c /app-emulation/qemu/files
parentvirtual/tmpfiles: Stabilize 0-r3 sparc, #848435 (diff)
downloadgentoo-a2440aa2b4c8ceaf4195e30f6b4888ede061d8dd.tar.gz
gentoo-a2440aa2b4c8ceaf4195e30f6b4888ede061d8dd.tar.bz2
gentoo-a2440aa2b4c8ceaf4195e30f6b4888ede061d8dd.zip
app-emulation/qemu: backport virtio-iscsi CPU usage fix; FORTIFY_SOURCE=3 fixes
- Backport virtio-iscsi CPU usage fix; - Don't force -D_FORTIFY_SOURCE=2 (we patch it into toolchain so need to set it, and by doing -U... -D...=2, it prevents usage of =3) - Backport FORTIFY_SOURCE=3 crash fix Closes: https://bugs.gentoo.org/849587 Closes: https://bugs.gentoo.org/849500 Signed-off-by: Sam James <sam@gentoo.org>
Diffstat (limited to 'app-emulation/qemu/files')
-rw-r--r--app-emulation/qemu/files/qemu-7.0.0-pci-overflow-fortify-source-3.patch94
-rw-r--r--app-emulation/qemu/files/qemu-7.0.0-virtio-scsi-fixes.patch182
2 files changed, 276 insertions, 0 deletions
diff --git a/app-emulation/qemu/files/qemu-7.0.0-pci-overflow-fortify-source-3.patch b/app-emulation/qemu/files/qemu-7.0.0-pci-overflow-fortify-source-3.patch
new file mode 100644
index 000000000000..767f66243fcc
--- /dev/null
+++ b/app-emulation/qemu/files/qemu-7.0.0-pci-overflow-fortify-source-3.patch
@@ -0,0 +1,94 @@
+https://bugs.gentoo.org/849587
+https://bugzilla.opensuse.org/show_bug.cgi?id=1199924
+https://lists.gnu.org/archive/html/qemu-devel/2022-05/msg06183.html
+
+From qemu-devel Tue May 31 11:47:07 2022
+From: Claudio Fontana <cfontana () suse ! de>
+Date: Tue, 31 May 2022 11:47:07 +0000
+To: qemu-devel
+Subject: [PATCH] pci: fix overflow in snprintf string formatting
+Message-Id: <20220531114707.18830-1-cfontana () suse ! de>
+X-MARC-Message: https://marc.info/?l=qemu-devel&m=165399772310578
+
+the code in pcibus_get_fw_dev_path contained the potential for a
+stack buffer overflow of 1 byte, potentially writing to the stack an
+extra NUL byte.
+
+This overflow could happen if the PCI slot is >= 0x10000000,
+and the PCI function is >= 0x10000000, due to the size parameter
+of snprintf being incorrectly calculated in the call:
+
+ if (PCI_FUNC(d->devfn))
+ snprintf(path + off, sizeof(path) + off, ",%x", PCI_FUNC(d->devfn));
+
+since the off obtained from a previous call to snprintf is added
+instead of subtracted from the total available size of the buffer.
+
+Without the accurate size guard from snprintf, we end up writing in the
+worst case:
+
+name (32) + "@" (1) + SLOT (8) + "," (1) + FUNC (8) + term NUL (1) = 51 bytes
+
+In order to provide something more robust, replace all of the code in
+pcibus_get_fw_dev_path with a single call to g_strdup_printf,
+so there is no need to rely on manual calculations.
+
+Found by compiling QEMU with FORTIFY_SOURCE=3 as the error:
+
+*** buffer overflow detected ***: terminated
+
+Thread 1 "qemu-system-x86" received signal SIGABRT, Aborted.
+[Switching to Thread 0x7ffff642c380 (LWP 121307)]
+0x00007ffff71ff55c in __pthread_kill_implementation () from /lib64/libc.so.6
+(gdb) bt
+ #0 0x00007ffff71ff55c in __pthread_kill_implementation () at /lib64/libc.so.6
+ #1 0x00007ffff71ac6f6 in raise () at /lib64/libc.so.6
+ #2 0x00007ffff7195814 in abort () at /lib64/libc.so.6
+ #3 0x00007ffff71f279e in __libc_message () at /lib64/libc.so.6
+ #4 0x00007ffff729767a in __fortify_fail () at /lib64/libc.so.6
+ #5 0x00007ffff7295c36 in () at /lib64/libc.so.6
+ #6 0x00007ffff72957f5 in __snprintf_chk () at /lib64/libc.so.6
+ #7 0x0000555555b1c1fd in pcibus_get_fw_dev_path ()
+ #8 0x0000555555f2bde4 in qdev_get_fw_dev_path_helper.constprop ()
+ #9 0x0000555555f2bd86 in qdev_get_fw_dev_path_helper.constprop ()
+ #10 0x00005555559a6e5d in get_boot_device_path ()
+ #11 0x00005555559a712c in get_boot_devices_list ()
+ #12 0x0000555555b1a3d0 in fw_cfg_machine_reset ()
+ #13 0x0000555555bf4c2d in pc_machine_reset ()
+ #14 0x0000555555c66988 in qemu_system_reset ()
+ #15 0x0000555555a6dff6 in qdev_machine_creation_done ()
+ #16 0x0000555555c79186 in qmp_x_exit_preconfig.part ()
+ #17 0x0000555555c7b459 in qemu_init ()
+ #18 0x0000555555960a29 in main ()
+
+Found-by: Dario Faggioli <Dario Faggioli <dfaggioli@suse.com>
+Found-by: Martin Liška <martin.liska@suse.com>
+Cc: qemu-stable@nongnu.org
+Signed-off-by: Claudio Fontana <cfontana@suse.de>
+--- a/hw/pci/pci.c
++++ b/hw/pci/pci.c
+@@ -2640,15 +2640,15 @@ static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
+ static char *pcibus_get_fw_dev_path(DeviceState *dev)
+ {
+ PCIDevice *d = (PCIDevice *)dev;
+- char path[50], name[33];
+- int off;
+-
+- off = snprintf(path, sizeof(path), "%s@%x",
+- pci_dev_fw_name(dev, name, sizeof name),
+- PCI_SLOT(d->devfn));
+- if (PCI_FUNC(d->devfn))
+- snprintf(path + off, sizeof(path) + off, ",%x", PCI_FUNC(d->devfn));
+- return g_strdup(path);
++ char name[33];
++ int has_func = !!PCI_FUNC(d->devfn);
++
++ return g_strdup_printf("%s@%x%s%.*x",
++ pci_dev_fw_name(dev, name, sizeof(name)),
++ PCI_SLOT(d->devfn),
++ has_func ? "," : "",
++ has_func,
++ PCI_FUNC(d->devfn));
+ }
+
+ static char *pcibus_get_dev_path(DeviceState *dev)
diff --git a/app-emulation/qemu/files/qemu-7.0.0-virtio-scsi-fixes.patch b/app-emulation/qemu/files/qemu-7.0.0-virtio-scsi-fixes.patch
new file mode 100644
index 000000000000..9ec6ede80896
--- /dev/null
+++ b/app-emulation/qemu/files/qemu-7.0.0-virtio-scsi-fixes.patch
@@ -0,0 +1,182 @@
+https://bugs.gentoo.org/849500
+https://gitlab.com/qemu-project/qemu/-/commit/2f743ef6366c2df4ef51ef3ae318138cdc0125ab.patch
+https://gitlab.com/qemu-project/qemu/-/commit/38738f7dbbda90fbc161757b7f4be35b52205552.patch
+
+From: Stefan Hajnoczi <stefanha@redhat.com>
+Date: Wed, 27 Apr 2022 15:35:36 +0100
+Subject: [PATCH] virtio-scsi: fix ctrl and event handler functions in
+ dataplane mode
+
+Commit f34e8d8b8d48d73f36a67b6d5e492ef9784b5012 ("virtio-scsi: prepare
+virtio_scsi_handle_cmd for dataplane") prepared the virtio-scsi cmd
+virtqueue handler function to be used in both the dataplane and
+non-datpalane code paths.
+
+It failed to convert the ctrl and event virtqueue handler functions,
+which are not designed to be called from the dataplane code path but
+will be since the ioeventfd is set up for those virtqueues when
+dataplane starts.
+
+Convert the ctrl and event virtqueue handler functions now so they
+operate correctly when called from the dataplane code path. Avoid code
+duplication by extracting this code into a helper function.
+
+Fixes: f34e8d8b8d48d73f36a67b6d5e492ef9784b5012 ("virtio-scsi: prepare virtio_scsi_handle_cmd for dataplane")
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-id: 20220427143541.119567-2-stefanha@redhat.com
+[Fixed s/by used/be used/ typo pointed out by Michael Tokarev
+<mjt@tls.msk.ru>.
+--Stefan]
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+--- a/hw/scsi/virtio-scsi.c
++++ b/hw/scsi/virtio-scsi.c
+@@ -472,16 +472,32 @@ bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
+ return progress;
+ }
+
++/*
++ * If dataplane is configured but not yet started, do so now and return true on
++ * success.
++ *
++ * Dataplane is started by the core virtio code but virtqueue handler functions
++ * can also be invoked when a guest kicks before DRIVER_OK, so this helper
++ * function helps us deal with manually starting ioeventfd in that case.
++ */
++static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
++{
++ if (!s->ctx || s->dataplane_started) {
++ return false;
++ }
++
++ virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
++ return !s->dataplane_fenced;
++}
++
+ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
+ {
+ VirtIOSCSI *s = (VirtIOSCSI *)vdev;
+
+- if (s->ctx) {
+- virtio_device_start_ioeventfd(vdev);
+- if (!s->dataplane_fenced) {
+- return;
+- }
++ if (virtio_scsi_defer_to_dataplane(s)) {
++ return;
+ }
++
+ virtio_scsi_acquire(s);
+ virtio_scsi_handle_ctrl_vq(s, vq);
+ virtio_scsi_release(s);
+@@ -720,12 +736,10 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
+ /* use non-QOM casts in the data path */
+ VirtIOSCSI *s = (VirtIOSCSI *)vdev;
+
+- if (s->ctx && !s->dataplane_started) {
+- virtio_device_start_ioeventfd(vdev);
+- if (!s->dataplane_fenced) {
+- return;
+- }
++ if (virtio_scsi_defer_to_dataplane(s)) {
++ return;
+ }
++
+ virtio_scsi_acquire(s);
+ virtio_scsi_handle_cmd_vq(s, vq);
+ virtio_scsi_release(s);
+@@ -855,12 +869,10 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
+ {
+ VirtIOSCSI *s = VIRTIO_SCSI(vdev);
+
+- if (s->ctx) {
+- virtio_device_start_ioeventfd(vdev);
+- if (!s->dataplane_fenced) {
+- return;
+- }
++ if (virtio_scsi_defer_to_dataplane(s)) {
++ return;
+ }
++
+ virtio_scsi_acquire(s);
+ virtio_scsi_handle_event_vq(s, vq);
+ virtio_scsi_release(s);
+GitLab
+
+From: Stefan Hajnoczi <stefanha@redhat.com>
+Date: Wed, 27 Apr 2022 15:35:37 +0100
+Subject: [PATCH] virtio-scsi: don't waste CPU polling the event virtqueue
+
+The virtio-scsi event virtqueue is not emptied by its handler function.
+This is typical for rx virtqueues where the device uses buffers when
+some event occurs (e.g. a packet is received, an error condition
+happens, etc).
+
+Polling non-empty virtqueues wastes CPU cycles. We are not waiting for
+new buffers to become available, we are waiting for an event to occur,
+so it's a misuse of CPU resources to poll for buffers.
+
+Introduce the new virtio_queue_aio_attach_host_notifier_no_poll() API,
+which is identical to virtio_queue_aio_attach_host_notifier() except
+that it does not poll the virtqueue.
+
+Before this patch the following command-line consumed 100% CPU in the
+IOThread polling and calling virtio_scsi_handle_event():
+
+ $ qemu-system-x86_64 -M accel=kvm -m 1G -cpu host \
+ --object iothread,id=iothread0 \
+ --device virtio-scsi-pci,iothread=iothread0 \
+ --blockdev file,filename=test.img,aio=native,cache.direct=on,node-name=drive0 \
+ --device scsi-hd,drive=drive0
+
+After this patch CPU is no longer wasted.
+
+Reported-by: Nir Soffer <nsoffer@redhat.com>
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Tested-by: Nir Soffer <nsoffer@redhat.com>
+Message-id: 20220427143541.119567-3-stefanha@redhat.com
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+--- a/hw/scsi/virtio-scsi-dataplane.c
++++ b/hw/scsi/virtio-scsi-dataplane.c
+@@ -138,7 +138,7 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
+
+ aio_context_acquire(s->ctx);
+ virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
+- virtio_queue_aio_attach_host_notifier(vs->event_vq, s->ctx);
++ virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
+
+ for (i = 0; i < vs->conf.num_queues; i++) {
+ virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
+--- a/hw/virtio/virtio.c
++++ b/hw/virtio/virtio.c
+@@ -3534,6 +3534,19 @@ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
+ virtio_queue_host_notifier_aio_poll_end);
+ }
+
++/*
++ * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
++ * this for rx virtqueues and similar cases where the virtqueue handler
++ * function does not pop all elements. When the virtqueue is left non-empty
++ * polling consumes CPU cycles and should not be used.
++ */
++void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
++{
++ aio_set_event_notifier(ctx, &vq->host_notifier, true,
++ virtio_queue_host_notifier_read,
++ NULL, NULL);
++}
++
+ void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
+ {
+ aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
+--- a/include/hw/virtio/virtio.h
++++ b/include/hw/virtio/virtio.h
+@@ -317,6 +317,7 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+ void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
+ void virtio_queue_host_notifier_read(EventNotifier *n);
+ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx);
++void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx);
+ void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx);
+ VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
+ VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
+GitLab