diff options
Diffstat (limited to 'hw')
| -rw-r--r-- | hw/file-op-9p.h | 2 | ||||
| -rw-r--r-- | hw/pc_piix.c | 20 | ||||
| -rw-r--r-- | hw/pci.c | 24 | ||||
| -rw-r--r-- | hw/pci.h | 7 | ||||
| -rw-r--r-- | hw/pcie.c | 8 | ||||
| -rw-r--r-- | hw/pcie_aer.c | 111 | ||||
| -rw-r--r-- | hw/virtio-9p-debug.c | 4 | ||||
| -rw-r--r-- | hw/virtio-9p-local.c | 12 | ||||
| -rw-r--r-- | hw/virtio-9p-xattr.c | 3 | ||||
| -rw-r--r-- | hw/virtio-9p.c | 11 | ||||
| -rw-r--r-- | hw/virtio-net.c | 69 |
11 files changed, 159 insertions, 112 deletions
diff --git a/hw/file-op-9p.h b/hw/file-op-9p.h index 21d60b5855..c7731c2993 100644 --- a/hw/file-op-9p.h +++ b/hw/file-op-9p.h @@ -86,7 +86,7 @@ typedef struct FileOperations int (*fstat)(FsContext *, int, struct stat *); int (*rename)(FsContext *, const char *, const char *); int (*truncate)(FsContext *, const char *, off_t); - int (*fsync)(FsContext *, int); + int (*fsync)(FsContext *, int, int); int (*statfs)(FsContext *s, const char *path, struct statfs *stbuf); ssize_t (*lgetxattr)(FsContext *, const char *, const char *, void *, size_t); diff --git a/hw/pc_piix.c b/hw/pc_piix.c index 7d29d43190..a2fb554aa2 100644 --- a/hw/pc_piix.c +++ b/hw/pc_piix.c @@ -217,6 +217,14 @@ static QEMUMachine pc_machine = { .desc = "Standard PC", .init = pc_init_pci, .max_cpus = 255, + .compat_props = (GlobalProperty[]) { + { + .driver = "PCI", + .property = "command_serr_enable", + .value = "off", + }, + { /* end of list */ } + }, .is_default = 1, }; @@ -265,6 +273,10 @@ static QEMUMachine pc_machine_v0_12 = { .driver = "vmware-svga", .property = "rombar", .value = stringify(0), + },{ + .driver = "PCI", + .property = "command_serr_enable", + .value = "off", }, { /* end of list */ } } @@ -300,6 +312,10 @@ static QEMUMachine pc_machine_v0_11 = { .driver = "PCI", .property = "rombar", .value = stringify(0), + },{ + .driver = "PCI", + .property = "command_serr_enable", + .value = "off", }, { /* end of list */ } } @@ -347,6 +363,10 @@ static QEMUMachine pc_machine_v0_10 = { .driver = "PCI", .property = "rombar", .value = stringify(0), + },{ + .driver = "PCI", + .property = "command_serr_enable", + .value = "off", }, { /* end of list */ } }, diff --git a/hw/pci.c b/hw/pci.c index 24e650a442..ef00d20d5f 100644 --- a/hw/pci.c +++ b/hw/pci.c @@ -25,8 +25,6 @@ #include "pci.h" #include "pci_bridge.h" #include "pci_internals.h" -#include "msix.h" -#include "msi.h" #include "monitor.h" #include "net.h" #include "sysemu.h" @@ -59,6 +57,8 @@ struct BusInfo pci_bus_info = { DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1), DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present, QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false), + DEFINE_PROP_BIT("command_serr_enable", PCIDevice, cap_present, + QEMU_PCI_CAP_SERR_BITNR, true), DEFINE_PROP_END_OF_LIST() } }; @@ -570,6 +570,9 @@ static void pci_init_wmask(PCIDevice *dev) pci_set_word(dev->wmask + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INTX_DISABLE); + if (dev->cap_present & QEMU_PCI_CAP_SERR) { + pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR); + } memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff, config_size - PCI_CONFIG_HEADER_SIZE); @@ -1096,23 +1099,6 @@ static void pci_set_irq(void *opaque, int irq_num, int level) pci_change_irq_level(pci_dev, irq_num, change); } -bool pci_msi_enabled(PCIDevice *dev) -{ - return msix_enabled(dev) || msi_enabled(dev); -} - -void pci_msi_notify(PCIDevice *dev, unsigned int vector) -{ - if (msix_enabled(dev)) { - msix_notify(dev, vector); - } else if (msi_enabled(dev)) { - msi_notify(dev, vector); - } else { - /* MSI/MSI-X must be enabled */ - abort(); - } -} - /***********************************************************/ /* monitor info on PCI */ diff --git a/hw/pci.h b/hw/pci.h index 89f7b761e7..aa3afe9684 100644 --- a/hw/pci.h +++ b/hw/pci.h @@ -118,6 +118,10 @@ enum { /* multifunction capable device */ #define QEMU_PCI_CAP_MULTIFUNCTION_BITNR 3 QEMU_PCI_CAP_MULTIFUNCTION = (1 << QEMU_PCI_CAP_MULTIFUNCTION_BITNR), + + /* command register SERR bit enabled */ +#define QEMU_PCI_CAP_SERR_BITNR 4 + QEMU_PCI_CAP_SERR = (1 << QEMU_PCI_CAP_SERR_BITNR), }; struct PCIDevice { @@ -257,9 +261,6 @@ void do_pci_info_print(Monitor *mon, const QObject *data); void do_pci_info(Monitor *mon, QObject **ret_data); void pci_bridge_update_mappings(PCIBus *b); -bool pci_msi_enabled(PCIDevice *dev); -void pci_msi_notify(PCIDevice *dev, unsigned int vector); - static inline void pci_set_byte(uint8_t *config, uint8_t val) { diff --git a/hw/pcie.c b/hw/pcie.c index f461c1cfbe..d1f0086559 100644 --- a/hw/pcie.c +++ b/hw/pcie.c @@ -167,10 +167,12 @@ static void hotplug_event_notify(PCIDevice *dev) * The Port may optionally send an MSI when there are hot-plug events that * occur while interrupt generation is disabled, and interrupt generation is * subsequently enabled. */ - if (!pci_msi_enabled(dev)) { + if (msix_enabled(dev)) { + msix_notify(dev, pcie_cap_flags_get_vector(dev)); + } else if (msi_enabled(dev)) { + msi_notify(dev, pcie_cap_flags_get_vector(dev)); + } else { qemu_set_irq(dev->irq[dev->exp.hpev_intx], dev->exp.hpev_notified); - } else if (dev->exp.hpev_notified) { - pci_msi_notify(dev, pcie_cap_flags_get_vector(dev)); } } diff --git a/hw/pcie_aer.c b/hw/pcie_aer.c index 47d64003fc..cb97a95d61 100644 --- a/hw/pcie_aer.c +++ b/hw/pcie_aer.c @@ -257,30 +257,49 @@ static unsigned int pcie_aer_root_get_vector(PCIDevice *dev) return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT; } +/* Given a status register, get corresponding bits in the command register */ +static uint32_t pcie_aer_status_to_cmd(uint32_t status) +{ + uint32_t cmd = 0; + if (status & PCI_ERR_ROOT_COR_RCV) { + cmd |= PCI_ERR_ROOT_CMD_COR_EN; + } + if (status & PCI_ERR_ROOT_NONFATAL_RCV) { + cmd |= PCI_ERR_ROOT_CMD_NONFATAL_EN; + } + if (status & PCI_ERR_ROOT_FATAL_RCV) { + cmd |= PCI_ERR_ROOT_CMD_FATAL_EN; + } + return cmd; +} + +static void pcie_aer_root_notify(PCIDevice *dev) +{ + if (msix_enabled(dev)) { + msix_notify(dev, pcie_aer_root_get_vector(dev)); + } else if (msi_enabled(dev)) { + msi_notify(dev, pcie_aer_root_get_vector(dev)); + } else { + qemu_set_irq(dev->irq[dev->exp.aer_intx], 1); + } +} + /* - * return value: - * true: error message is sent up - * false: error message is masked - * * 6.2.6 Error Message Control * Figure 6-3 * root port part */ -static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) +static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) { - bool msg_sent; uint16_t cmd; uint8_t *aer_cap; uint32_t root_cmd; - uint32_t root_status; - bool msi_trigger; + uint32_t root_status, prev_status; - msg_sent = false; cmd = pci_get_word(dev->config + PCI_COMMAND); aer_cap = dev->config + dev->exp.aer_cap; root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); - root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); - msi_trigger = false; + prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); if (cmd & PCI_COMMAND_SERR) { /* System Error. @@ -299,25 +318,14 @@ static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) if (root_status & PCI_ERR_ROOT_COR_RCV) { root_status |= PCI_ERR_ROOT_MULTI_COR_RCV; } else { - if (root_cmd & PCI_ERR_ROOT_CMD_COR_EN) { - msi_trigger = true; - } pci_set_word(aer_cap + PCI_ERR_ROOT_COR_SRC, msg->source_id); } root_status |= PCI_ERR_ROOT_COR_RCV; break; case PCI_ERR_ROOT_CMD_NONFATAL_EN: - if (!(root_status & PCI_ERR_ROOT_NONFATAL_RCV) && - root_cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) { - msi_trigger = true; - } root_status |= PCI_ERR_ROOT_NONFATAL_RCV; break; case PCI_ERR_ROOT_CMD_FATAL_EN: - if (!(root_status & PCI_ERR_ROOT_FATAL_RCV) && - root_cmd & PCI_ERR_ROOT_CMD_FATAL_EN) { - msi_trigger = true; - } if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) { root_status |= PCI_ERR_ROOT_FIRST_FATAL; } @@ -337,18 +345,17 @@ static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) } pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status); - if (root_cmd & msg->severity) { - /* 6.2.4.1.2 Interrupt Generation */ - if (pci_msi_enabled(dev)) { - if (msi_trigger) { - pci_msi_notify(dev, pcie_aer_root_get_vector(dev)); - } - } else { - qemu_set_irq(dev->irq[dev->exp.aer_intx], 1); - } - msg_sent = true; + /* 6.2.4.1.2 Interrupt Generation */ + /* All the above did was set some bits in the status register. + * Specifically these that match message severity. + * The below code relies on this fact. */ + if (!(root_cmd & msg->severity) || + (pcie_aer_status_to_cmd(prev_status) & root_cmd)) { + /* Condition is not being set or was already true so nothing to do. */ + return; } - return msg_sent; + + pcie_aer_root_notify(dev); } /* @@ -739,40 +746,26 @@ void pcie_aer_root_reset(PCIDevice *dev) */ } -static bool pcie_aer_root_does_trigger(uint32_t cmd, uint32_t status) -{ - return - ((cmd & PCI_ERR_ROOT_CMD_COR_EN) && (status & PCI_ERR_ROOT_COR_RCV)) || - ((cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) && - (status & PCI_ERR_ROOT_NONFATAL_RCV)) || - ((cmd & PCI_ERR_ROOT_CMD_FATAL_EN) && - (status & PCI_ERR_ROOT_FATAL_RCV)); -} - void pcie_aer_root_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len, uint32_t root_cmd_prev) { uint8_t *aer_cap = dev->config + dev->exp.aer_cap; - - /* root command register */ + uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); + uint32_t enabled_cmd = pcie_aer_status_to_cmd(root_status); uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); - if (root_cmd & PCI_ERR_ROOT_CMD_EN_MASK) { - /* 6.2.4.1.2 Interrupt Generation */ - - /* 0 -> 1 */ - uint32_t root_cmd_set = (root_cmd_prev ^ root_cmd) & root_cmd; - uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); + /* 6.2.4.1.2 Interrupt Generation */ + if (!msix_enabled(dev) && !msi_enabled(dev)) { + qemu_set_irq(dev->irq[dev->exp.aer_intx], !!(root_cmd & enabled_cmd)); + return; + } - if (pci_msi_enabled(dev)) { - if (pcie_aer_root_does_trigger(root_cmd_set, root_status)) { - pci_msi_notify(dev, pcie_aer_root_get_vector(dev)); - } - } else { - int int_level = pcie_aer_root_does_trigger(root_cmd, root_status); - qemu_set_irq(dev->irq[dev->exp.aer_intx], int_level); - } + if ((root_cmd_prev & enabled_cmd) || !(root_cmd & enabled_cmd)) { + /* Send MSI on transition from false to true. */ + return; } + + pcie_aer_root_notify(dev); } static const VMStateDescription vmstate_pcie_aer_err = { diff --git a/hw/virtio-9p-debug.c b/hw/virtio-9p-debug.c index cff5b07297..6b18842fd4 100644 --- a/hw/virtio-9p-debug.c +++ b/hw/virtio-9p-debug.c @@ -552,8 +552,8 @@ void pprint_pdu(V9fsPDU *pdu) break; case P9_TLINK: fprintf(llogfile, "TLINK: ("); - pprint_int32(pdu, 0, &offset, "fid"); - pprint_str(pdu, 0, &offset, ", oldpath"); + pprint_int32(pdu, 0, &offset, "dfid"); + pprint_int32(pdu, 0, &offset, ", fid"); pprint_str(pdu, 0, &offset, ", newpath"); break; case P9_RLINK: diff --git a/hw/virtio-9p-local.c b/hw/virtio-9p-local.c index 0d520201b4..a8e7525bf6 100644 --- a/hw/virtio-9p-local.c +++ b/hw/virtio-9p-local.c @@ -480,9 +480,9 @@ static int local_chown(FsContext *fs_ctx, const char *path, FsCred *credp) } static int local_utimensat(FsContext *s, const char *path, - const struct timespec *buf) + const struct timespec *buf) { - return utimensat(AT_FDCWD, rpath(s, path), buf, AT_SYMLINK_NOFOLLOW); + return qemu_utimensat(AT_FDCWD, rpath(s, path), buf, AT_SYMLINK_NOFOLLOW); } static int local_remove(FsContext *ctx, const char *path) @@ -490,9 +490,13 @@ static int local_remove(FsContext *ctx, const char *path) return remove(rpath(ctx, path)); } -static int local_fsync(FsContext *ctx, int fd) +static int local_fsync(FsContext *ctx, int fd, int datasync) { - return fsync(fd); + if (datasync) { + return qemu_fdatasync(fd); + } else { + return fsync(fd); + } } static int local_statfs(FsContext *s, const char *path, struct statfs *stbuf) diff --git a/hw/virtio-9p-xattr.c b/hw/virtio-9p-xattr.c index 175f372c39..1aab081de2 100644 --- a/hw/virtio-9p-xattr.c +++ b/hw/virtio-9p-xattr.c @@ -73,6 +73,9 @@ ssize_t v9fs_list_xattr(FsContext *ctx, const char *path, /* Get the actual len */ xattr_len = llistxattr(rpath(ctx, path), value, 0); + if (xattr_len <= 0) { + return xattr_len; + } /* Now fetch the xattr and find the actual size */ orig_value = qemu_malloc(xattr_len); diff --git a/hw/virtio-9p.c b/hw/virtio-9p.c index daade77ed9..7c59988a51 100644 --- a/hw/virtio-9p.c +++ b/hw/virtio-9p.c @@ -248,9 +248,9 @@ static int v9fs_do_remove(V9fsState *s, V9fsString *path) return s->ops->remove(&s->ctx, path->data); } -static int v9fs_do_fsync(V9fsState *s, int fd) +static int v9fs_do_fsync(V9fsState *s, int fd, int datasync) { - return s->ops->fsync(&s->ctx, fd); + return s->ops->fsync(&s->ctx, fd, datasync); } static int v9fs_do_statfs(V9fsState *s, V9fsString *path, struct statfs *stbuf) @@ -1868,16 +1868,17 @@ static void v9fs_fsync(V9fsState *s, V9fsPDU *pdu) int32_t fid; size_t offset = 7; V9fsFidState *fidp; + int datasync; int err; - pdu_unmarshal(pdu, offset, "d", &fid); + pdu_unmarshal(pdu, offset, "dd", &fid, &datasync); fidp = lookup_fid(s, fid); if (fidp == NULL) { err = -ENOENT; v9fs_post_do_fsync(s, pdu, err); return; } - err = v9fs_do_fsync(s, fidp->fs.fd); + err = v9fs_do_fsync(s, fidp->fs.fd, datasync); v9fs_post_do_fsync(s, pdu, err); } @@ -3001,7 +3002,7 @@ static void v9fs_wstat(V9fsState *s, V9fsPDU *pdu) /* do we need to sync the file? */ if (donttouch_stat(&vs->v9stat)) { - err = v9fs_do_fsync(s, vs->fidp->fs.fd); + err = v9fs_do_fsync(s, vs->fidp->fs.fd, 0); v9fs_wstat_post_fsync(s, vs, err); return; } diff --git a/hw/virtio-net.c b/hw/virtio-net.c index 3472f6b28d..ec1bf8dda7 100644 --- a/hw/virtio-net.c +++ b/hw/virtio-net.c @@ -99,9 +99,14 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) } } -static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) +static bool virtio_net_started(VirtIONet *n, uint8_t status) +{ + return (status & VIRTIO_CONFIG_S_DRIVER_OK) && + (n->status & VIRTIO_NET_S_LINK_UP) && n->vm_running; +} + +static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) { - VirtIONet *n = to_virtio_net(vdev); if (!n->nic->nc.peer) { return; } @@ -112,9 +117,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) if (!tap_get_vhost_net(n->nic->nc.peer)) { return; } - if (!!n->vhost_started == ((status & VIRTIO_CONFIG_S_DRIVER_OK) && - (n->status & VIRTIO_NET_S_LINK_UP) && - n->vm_running)) { + if (!!n->vhost_started == virtio_net_started(n, status)) { return; } if (!n->vhost_started) { @@ -131,6 +134,32 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) } } +static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) +{ + VirtIONet *n = to_virtio_net(vdev); + + virtio_net_vhost_status(n, status); + + if (!n->tx_waiting) { + return; + } + + if (virtio_net_started(n, status) && !n->vhost_started) { + if (n->tx_timer) { + qemu_mod_timer(n->tx_timer, + qemu_get_clock(vm_clock) + n->tx_timeout); + } else { + qemu_bh_schedule(n->tx_bh); + } + } else { + if (n->tx_timer) { + qemu_del_timer(n->tx_timer); + } else { + qemu_bh_cancel(n->tx_bh); + } + } +} + static void virtio_net_set_link_status(VLANClientState *nc) { VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; @@ -424,6 +453,9 @@ static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq) static int virtio_net_can_receive(VLANClientState *nc) { VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; + if (!n->vm_running) { + return 0; + } if (!virtio_queue_ready(n->rx_vq) || !(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) @@ -672,11 +704,12 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq) { VirtQueueElement elem; int32_t num_packets = 0; - if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) { return num_packets; } + assert(n->vm_running); + if (n->async_tx.elem.out_num) { virtio_queue_set_notification(n->tx_vq, 0); return num_packets; @@ -735,6 +768,12 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) { VirtIONet *n = to_virtio_net(vdev); + /* This happens when device was stopped but VCPU wasn't. */ + if (!n->vm_running) { + n->tx_waiting = 1; + return; + } + if (n->tx_waiting) { virtio_queue_set_notification(vq, 1); qemu_del_timer(n->tx_timer); @@ -755,14 +794,19 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) if (unlikely(n->tx_waiting)) { return; } + n->tx_waiting = 1; + /* This happens when device was stopped but VCPU wasn't. */ + if (!n->vm_running) { + return; + } virtio_queue_set_notification(vq, 0); qemu_bh_schedule(n->tx_bh); - n->tx_waiting = 1; } static void virtio_net_tx_timer(void *opaque) { VirtIONet *n = opaque; + assert(n->vm_running); n->tx_waiting = 0; @@ -779,6 +823,8 @@ static void virtio_net_tx_bh(void *opaque) VirtIONet *n = opaque; int32_t ret; + assert(n->vm_running); + n->tx_waiting = 0; /* Just in case the driver is not ready on more */ @@ -923,15 +969,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) } } n->mac_table.first_multi = i; - - if (n->tx_waiting) { - if (n->tx_timer) { - qemu_mod_timer(n->tx_timer, - qemu_get_clock(vm_clock) + n->tx_timeout); - } else { - qemu_bh_schedule(n->tx_bh); - } - } return 0; } |