From feb93f361739071778ca2d23df3876db399548f7 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 17 Jul 2015 15:19:18 +0800 Subject: virtio-net: unbreak any layout Commit 032a74a1c0fcdd5fd1c69e56126b4c857ee36611 ("virtio-net: byteswap virtio-net header") breaks any layout by requiring out_sg[0].iov_len >= n->guest_hdr_len. Fixing this by copying header to temporary buffer if swap is needed, and then use this buffer as part of out_sg. Fixes 032a74a1c0fcdd5fd1c69e56126b4c857ee36611 ("virtio-net: byteswap virtio-net header") Cc: qemu-stable@nongnu.org Cc: clg@fr.ibm.com Signed-off-by: Jason Wang Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Eric Blake --- hw/net/virtio-net.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'hw/net/virtio-net.c') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index e3c2db3250..9f7e91ddce 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1142,7 +1142,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) ssize_t ret, len; unsigned int out_num = elem.out_num; struct iovec *out_sg = &elem.out_sg[0]; - struct iovec sg[VIRTQUEUE_MAX_SIZE]; + struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1]; + struct virtio_net_hdr_mrg_rxbuf mhdr; if (out_num < 1) { error_report("virtio-net header not in first element"); @@ -1150,13 +1151,25 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) } if (n->has_vnet_hdr) { - if (out_sg[0].iov_len < n->guest_hdr_len) { + if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < + n->guest_hdr_len) { error_report("virtio-net header incorrect"); exit(1); } - virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base); + if (virtio_needs_swap(vdev)) { + virtio_net_hdr_swap(vdev, (void *) &mhdr); + sg2[0].iov_base = &mhdr; + sg2[0].iov_len = n->guest_hdr_len; + out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, + out_sg, out_num, + n->guest_hdr_len, -1); + if (out_num == VIRTQUEUE_MAX_SIZE) { + goto drop; + } + out_num += 1; + out_sg = sg2; + } } - /* * If host wants to see the guest header as is, we can * pass it on unchanged. Otherwise, copy just the parts @@ -1186,7 +1199,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) } len += ret; - +drop: virtqueue_push(q->tx_vq, &elem, 0); virtio_notify(vdev, q->tx_vq); -- cgit 1.4.1 From 38705bb57bf1cd9e3f837cf11bcdee3876786c07 Mon Sep 17 00:00:00 2001 From: Fam Zheng Date: Wed, 15 Jul 2015 11:02:27 +0800 Subject: virtio-net: Flush incoming queues when DRIVER_OK is being set This patch fixes network hang after "stop" then "cont", while network packets keep arriving. Tested both manually (tap, host pinging guest) and with Jason's qtest series (plus his "[PATCH 2.4] socket: pass correct size in net_socket_send()" fix). As virtio_net_set_status is called when guest driver is setting status byte and when vm state is changing, it is a good opportunity to flush queued packets. This is necessary because during vm stop the backend (e.g. tap) would stop rx processing after .can_receive returns false, until the queue is explicitly flushed or purged. The other interesting condition in .can_receive, virtio_queue_ready(), is handled by virtio_net_handle_rx() when guest kicks; the 3rd condition is invalid queue index which doesn't need flushing. Signed-off-by: Fam Zheng Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/net/virtio-net.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'hw/net/virtio-net.c') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 9f7e91ddce..e1d9cbf722 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -162,6 +162,8 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) virtio_net_vhost_status(n, status); for (i = 0; i < n->max_queues; i++) { + NetClientState *ncs = qemu_get_subqueue(n->nic, i); + bool queue_started; q = &n->vqs[i]; if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { @@ -169,12 +171,18 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) } else { queue_status = status; } + queue_started = + virtio_net_started(n, queue_status) && !n->vhost_started; + + if (queue_started) { + qemu_flush_queued_packets(ncs); + } if (!q->tx_waiting) { continue; } - if (virtio_net_started(n, queue_status) && !n->vhost_started) { + if (queue_started) { if (q->tx_timer) { timer_mod(q->tx_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout); -- cgit 1.4.1 From f9d6dbf0bf6e91b8ed896369ab1b7e91e5a1a4df Mon Sep 17 00:00:00 2001 From: Wen Congyang Date: Wed, 15 Jul 2015 17:20:59 +0800 Subject: virtio-net: remove virtio queues if the guest doesn't support multiqueue commit da51a335 adds all queues in .realize(). But if the guest doesn't support multiqueue, we forget to remove them. And we cannot handle the ctrl vq corretly. The guest will hang. Signed-off-by: Wen Congyang Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang --- hw/net/virtio-net.c | 110 +++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 82 insertions(+), 28 deletions(-) (limited to 'hw/net/virtio-net.c') diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index e1d9cbf722..304d3ddde1 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1327,9 +1327,86 @@ static void virtio_net_tx_bh(void *opaque) } } +static void virtio_net_add_queue(VirtIONet *n, int index) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + + n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); + if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { + n->vqs[index].tx_vq = + virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); + n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + virtio_net_tx_timer, + &n->vqs[index]); + } else { + n->vqs[index].tx_vq = + virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); + n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); + } + + n->vqs[index].tx_waiting = 0; + n->vqs[index].n = n; +} + +static void virtio_net_del_queue(VirtIONet *n, int index) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + VirtIONetQueue *q = &n->vqs[index]; + NetClientState *nc = qemu_get_subqueue(n->nic, index); + + qemu_purge_queued_packets(nc); + + virtio_del_queue(vdev, index * 2); + if (q->tx_timer) { + timer_del(q->tx_timer); + timer_free(q->tx_timer); + } else { + qemu_bh_delete(q->tx_bh); + } + virtio_del_queue(vdev, index * 2 + 1); +} + +static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(n); + int old_num_queues = virtio_get_num_queues(vdev); + int new_num_queues = new_max_queues * 2 + 1; + int i; + + assert(old_num_queues >= 3); + assert(old_num_queues % 2 == 1); + + if (old_num_queues == new_num_queues) { + return; + } + + /* + * We always need to remove and add ctrl vq if + * old_num_queues != new_num_queues. Remove ctrl_vq first, + * and then we only enter one of the following too loops. + */ + virtio_del_queue(vdev, old_num_queues - 1); + + for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) { + /* new_num_queues < old_num_queues */ + virtio_net_del_queue(n, i / 2); + } + + for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) { + /* new_num_queues > old_num_queues */ + virtio_net_add_queue(n, i / 2); + } + + /* add ctrl_vq last */ + n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); +} + static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) { + int max = multiqueue ? n->max_queues : 1; + n->multiqueue = multiqueue; + virtio_net_change_num_queues(n, max); virtio_net_set_queues(n); } @@ -1604,21 +1681,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) } for (i = 0; i < n->max_queues; i++) { - n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); - if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) { - n->vqs[i].tx_vq = - virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); - n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - virtio_net_tx_timer, - &n->vqs[i]); - } else { - n->vqs[i].tx_vq = - virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); - n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]); - } - - n->vqs[i].tx_waiting = 0; - n->vqs[i].n = n; + virtio_net_add_queue(n, i); } n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); @@ -1672,7 +1735,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIONet *n = VIRTIO_NET(dev); - int i; + int i, max_queues; /* This will stop vhost backend if appropriate. */ virtio_net_set_status(vdev, 0); @@ -1687,18 +1750,9 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp) g_free(n->mac_table.macs); g_free(n->vlans); - for (i = 0; i < n->max_queues; i++) { - VirtIONetQueue *q = &n->vqs[i]; - NetClientState *nc = qemu_get_subqueue(n->nic, i); - - qemu_purge_queued_packets(nc); - - if (q->tx_timer) { - timer_del(q->tx_timer); - timer_free(q->tx_timer); - } else if (q->tx_bh) { - qemu_bh_delete(q->tx_bh); - } + max_queues = n->multiqueue ? n->max_queues : 1; + for (i = 0; i < max_queues; i++) { + virtio_net_del_queue(n, i); } timer_del(n->announce_timer); -- cgit 1.4.1