diff options
71 files changed, 1407 insertions, 509 deletions
diff --git a/accel/accel.c b/accel/accel.c index 7c079a5611..fa8584488e 100644 --- a/accel/accel.c +++ b/accel/accel.c @@ -120,6 +120,12 @@ void configure_accelerator(MachineState *ms) } } +void accel_register_compat_props(AccelState *accel) +{ + AccelClass *class = ACCEL_GET_CLASS(accel); + register_compat_props_array(class->global_props); +} + static void register_accel_types(void) { type_register_static(&accel_type); diff --git a/chardev/char.c b/chardev/char.c index bcfc065d16..2b679a2295 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -951,6 +951,18 @@ void qmp_chardev_remove(const char *id, Error **errp) object_unparent(OBJECT(chr)); } +void qmp_chardev_send_break(const char *id, Error **errp) +{ + Chardev *chr; + + chr = qemu_chr_find(id); + if (chr == NULL) { + error_setg(errp, "Chardev '%s' not found", id); + return; + } + qemu_chr_be_event(chr, CHR_EVENT_BREAK); +} + void qemu_chr_cleanup(void) { object_unparent(get_chardevs_root()); diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h index 0844a403dc..474c79d003 100644 --- a/fsdev/file-op-9p.h +++ b/fsdev/file-op-9p.h @@ -76,6 +76,8 @@ typedef struct FsDriverEntry { int export_flags; FileOperations *ops; FsThrottle fst; + mode_t fmode; + mode_t dmode; } FsDriverEntry; typedef struct FsContext @@ -88,6 +90,8 @@ typedef struct FsContext FsThrottle *fst; /* fs driver specific data */ void *private; + mode_t fmode; + mode_t dmode; } FsContext; typedef struct V9fsPath { diff --git a/fsdev/qemu-fsdev-opts.c b/fsdev/qemu-fsdev-opts.c index bf5713008a..7c31ffffaf 100644 --- a/fsdev/qemu-fsdev-opts.c +++ b/fsdev/qemu-fsdev-opts.c @@ -38,6 +38,12 @@ static QemuOptsList qemu_fsdev_opts = { }, { .name = "sock_fd", .type = QEMU_OPT_NUMBER, + }, { + .name = "fmode", + .type = QEMU_OPT_NUMBER, + }, { + .name = "dmode", + .type = QEMU_OPT_NUMBER, }, THROTTLE_OPTS, @@ -75,6 +81,12 @@ static QemuOptsList qemu_virtfs_opts = { }, { .name = "sock_fd", .type = QEMU_OPT_NUMBER, + }, { + .name = "fmode", + .type = QEMU_OPT_NUMBER, + }, { + .name = "dmode", + .type = QEMU_OPT_NUMBER, }, { /*End of list */ } diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index ae169011b1..ba98e581ab 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -100,9 +100,9 @@ ETEXI { .name = "registers", - .args_type = "", - .params = "", - .help = "show the cpu registers", + .args_type = "cpustate_all:-a", + .params = "[-a]", + .help = "show the cpu registers (-a: all - show register info for all cpus)", .cmd = hmp_info_registers, }, diff --git a/hmp-commands.hx b/hmp-commands.hx index e763606fe5..275ccdfbc7 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -1745,6 +1745,22 @@ Removes the chardev @var{id}. ETEXI { + .name = "chardev-send-break", + .args_type = "id:s", + .params = "id", + .help = "send a break on chardev", + .cmd = hmp_chardev_send_break, + .command_completion = chardev_remove_completion, + }, + +STEXI +@item chardev-send-break id +@findex chardev-send-break +Send a break on the chardev @var{id}. + +ETEXI + + { .name = "qemu-io", .args_type = "device:B,command:s", .params = "[device] \"[command]\"", diff --git a/hmp.c b/hmp.c index 8c72c58b20..dee40284c1 100644 --- a/hmp.c +++ b/hmp.c @@ -43,6 +43,7 @@ #include "exec/ramlist.h" #include "hw/intc/intc.h" #include "migration/snapshot.h" +#include "migration/misc.h" #ifdef CONFIG_SPICE #include <spice/enums.h> @@ -164,6 +165,8 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) info = qmp_query_migrate(NULL); caps = qmp_query_migrate_capabilities(NULL); + migration_global_dump(mon); + /* do not display parameters during setup */ if (info->has_status && caps) { monitor_printf(mon, "capabilities: "); @@ -2233,6 +2236,14 @@ void hmp_chardev_remove(Monitor *mon, const QDict *qdict) hmp_handle_error(mon, &local_err); } +void hmp_chardev_send_break(Monitor *mon, const QDict *qdict) +{ + Error *local_err = NULL; + + qmp_chardev_send_break(qdict_get_str(qdict, "id"), &local_err); + hmp_handle_error(mon, &local_err); +} + void hmp_qemu_io(Monitor *mon, const QDict *qdict) { BlockBackend *blk; diff --git a/hmp.h b/hmp.h index d8b94ce9dc..214b2617e7 100644 --- a/hmp.h +++ b/hmp.h @@ -103,6 +103,7 @@ void hmp_nbd_server_add(Monitor *mon, const QDict *qdict); void hmp_nbd_server_stop(Monitor *mon, const QDict *qdict); void hmp_chardev_add(Monitor *mon, const QDict *qdict); void hmp_chardev_remove(Monitor *mon, const QDict *qdict); +void hmp_chardev_send_break(Monitor *mon, const QDict *qdict); void hmp_qemu_io(Monitor *mon, const QDict *qdict); void hmp_cpu_add(Monitor *mon, const QDict *qdict); void hmp_object_add(Monitor *mon, const QDict *qdict); diff --git a/hw/9pfs/9p-local.c b/hw/9pfs/9p-local.c index 1e78b7c9e9..6e478f4765 100644 --- a/hw/9pfs/9p-local.c +++ b/hw/9pfs/9p-local.c @@ -633,7 +633,7 @@ static int local_mknod(FsContext *fs_ctx, V9fsPath *dir_path, if (fs_ctx->export_flags & V9FS_SM_MAPPED || fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { - err = mknodat(dirfd, name, SM_LOCAL_MODE_BITS | S_IFREG, 0); + err = mknodat(dirfd, name, fs_ctx->fmode | S_IFREG, 0); if (err == -1) { goto out; } @@ -685,7 +685,7 @@ static int local_mkdir(FsContext *fs_ctx, V9fsPath *dir_path, if (fs_ctx->export_flags & V9FS_SM_MAPPED || fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { - err = mkdirat(dirfd, name, SM_LOCAL_DIR_MODE_BITS); + err = mkdirat(dirfd, name, fs_ctx->dmode); if (err == -1) { goto out; } @@ -786,7 +786,7 @@ static int local_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, /* Determine the security model */ if (fs_ctx->export_flags & V9FS_SM_MAPPED || fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { - fd = openat_file(dirfd, name, flags, SM_LOCAL_MODE_BITS); + fd = openat_file(dirfd, name, flags, fs_ctx->fmode); if (fd == -1) { goto out; } @@ -849,7 +849,7 @@ static int local_symlink(FsContext *fs_ctx, const char *oldpath, ssize_t oldpath_size, write_size; fd = openat_file(dirfd, name, O_CREAT | O_EXCL | O_RDWR, - SM_LOCAL_MODE_BITS); + fs_ctx->fmode); if (fd == -1) { goto out; } @@ -1100,7 +1100,7 @@ static int local_remove(FsContext *ctx, const char *path) goto out; } - if (fstatat(dirfd, path, &stbuf, AT_SYMLINK_NOFOLLOW) < 0) { + if (fstatat(dirfd, name, &stbuf, AT_SYMLINK_NOFOLLOW) < 0) { goto err_out; } @@ -1467,6 +1467,23 @@ static int local_parse_opts(QemuOpts *opts, struct FsDriverEntry *fse) return -1; } + if (fse->export_flags & V9FS_SM_MAPPED || + fse->export_flags & V9FS_SM_MAPPED_FILE) { + fse->fmode = + qemu_opt_get_number(opts, "fmode", SM_LOCAL_MODE_BITS) & 0777; + fse->dmode = + qemu_opt_get_number(opts, "dmode", SM_LOCAL_DIR_MODE_BITS) & 0777; + } else { + if (qemu_opt_find(opts, "fmode")) { + error_report("fmode is only valid for mapped 9p modes"); + return -1; + } + if (qemu_opt_find(opts, "dmode")) { + error_report("dmode is only valid for mapped 9p modes"); + return -1; + } + } + fse->path = g_strdup(path); return 0; diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c index 4b6d4e6a3f..df0a8de08a 100644 --- a/hw/9pfs/9p-synth.c +++ b/hw/9pfs/9p-synth.c @@ -494,8 +494,7 @@ static int synth_name_to_path(FsContext *ctx, V9fsPath *dir_path, } out: /* Copy the node pointer to fid */ - target->data = g_malloc(sizeof(void *)); - memcpy(target->data, &node, sizeof(void *)); + target->data = g_memdup(&node, sizeof(void *)); target->size = sizeof(void *); return 0; } diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index 96d2683348..6c92bad5b3 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -624,15 +624,11 @@ void pdu_free(V9fsPDU *pdu) QLIST_INSERT_HEAD(&s->free_list, pdu, next); } -/* - * We don't do error checking for pdu_marshal/unmarshal here - * because we always expect to have enough space to encode - * error details - */ static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len) { int8_t id = pdu->id + 1; /* Response */ V9fsState *s = pdu->s; + int ret; if (len < 0) { int err = -len; @@ -644,11 +640,19 @@ static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len) str.data = strerror(err); str.size = strlen(str.data); - len += pdu_marshal(pdu, len, "s", &str); + ret = pdu_marshal(pdu, len, "s", &str); + if (ret < 0) { + goto out_notify; + } + len += ret; id = P9_RERROR; } - len += pdu_marshal(pdu, len, "d", err); + ret = pdu_marshal(pdu, len, "d", err); + if (ret < 0) { + goto out_notify; + } + len += ret; if (s->proto_version == V9FS_PROTO_2000L) { id = P9_RLERROR; @@ -657,12 +661,15 @@ static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len) } /* fill out the header */ - pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag); + if (pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag) < 0) { + goto out_notify; + } /* keep these in sync */ pdu->size = len; pdu->id = id; +out_notify: pdu->s->transport->push_and_notify(pdu); /* Now wakeup anybody waiting in flush for this request */ @@ -1664,7 +1671,7 @@ static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu, unsigned int niov; if (is_write) { - pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov); + pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + skip); } else { pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip); } @@ -3533,6 +3540,9 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp) s->ops = fse->ops; + s->ctx.fmode = fse->fmode; + s->ctx.dmode = fse->dmode; + s->fid_list = NULL; qemu_co_rwlock_init(&s->rename_lock); diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h index c886ba78d2..d1cfeaf10e 100644 --- a/hw/9pfs/9p.h +++ b/hw/9pfs/9p.h @@ -124,6 +124,11 @@ typedef struct { uint8_t id; uint16_t tag_le; } QEMU_PACKED P9MsgHeader; +/* According to the specification, 9p messages start with a 7-byte header. + * Since most of the code uses this header size in literal form, we must be + * sure this is indeed the case. + */ +QEMU_BUILD_BUG_ON(sizeof(P9MsgHeader) != 7); struct V9fsPDU { @@ -358,7 +363,7 @@ struct V9fsTransport { void (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov, unsigned int *pniov, size_t size); void (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov, - unsigned int *pniov); + unsigned int *pniov, size_t size); void (*push_and_notify)(V9fsPDU *pdu); }; diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c index 245abd8aae..62650b0a6b 100644 --- a/hw/9pfs/virtio-9p-device.c +++ b/hw/9pfs/virtio-9p-device.c @@ -53,23 +53,22 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) goto out_free_pdu; } - if (elem->in_num == 0) { + if (iov_size(elem->in_sg, elem->in_num) < 7) { virtio_error(vdev, "The guest sent a VirtFS request without space for " "the reply"); goto out_free_req; } - QEMU_BUILD_BUG_ON(sizeof(out) != 7); - v->elems[pdu->idx] = elem; - len = iov_to_buf(elem->out_sg, elem->out_num, 0, - &out, sizeof(out)); - if (len != sizeof(out)) { + len = iov_to_buf(elem->out_sg, elem->out_num, 0, &out, 7); + if (len != 7) { virtio_error(vdev, "The guest sent a malformed VirtFS request: " "header size is %zd, should be 7", len); goto out_free_req; } + v->elems[pdu->idx] = elem; + pdu_submit(pdu, &out); } @@ -147,8 +146,16 @@ static ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset, V9fsState *s = pdu->s; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = v->elems[pdu->idx]; + ssize_t ret; + + ret = v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap); + if (ret < 0) { + VirtIODevice *vdev = VIRTIO_DEVICE(v); - return v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap); + virtio_error(vdev, "Failed to encode VirtFS reply type %d", + pdu->id + 1); + } + return ret; } static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset, @@ -157,28 +164,52 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset, V9fsState *s = pdu->s; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = v->elems[pdu->idx]; + ssize_t ret; + + ret = v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap); + if (ret < 0) { + VirtIODevice *vdev = VIRTIO_DEVICE(v); - return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap); + virtio_error(vdev, "Failed to decode VirtFS request type %d", pdu->id); + } + return ret; } -/* The size parameter is used by other transports. Do not drop it. */ static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov, unsigned int *pniov, size_t size) { V9fsState *s = pdu->s; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = v->elems[pdu->idx]; + size_t buf_size = iov_size(elem->in_sg, elem->in_num); + + if (buf_size < size) { + VirtIODevice *vdev = VIRTIO_DEVICE(v); + + virtio_error(vdev, + "VirtFS reply type %d needs %zu bytes, buffer has %zu", + pdu->id + 1, size, buf_size); + } *piov = elem->in_sg; *pniov = elem->in_num; } static void virtio_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov, - unsigned int *pniov) + unsigned int *pniov, size_t size) { V9fsState *s = pdu->s; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = v->elems[pdu->idx]; + size_t buf_size = iov_size(elem->out_sg, elem->out_num); + + if (buf_size < size) { + VirtIODevice *vdev = VIRTIO_DEVICE(v); + + virtio_error(vdev, + "VirtFS request type %d needs %zu bytes, buffer has %zu", + pdu->id, size, buf_size); + } *piov = elem->out_sg; *pniov = elem->out_num; diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 922cc967be..ee87f08926 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -54,6 +54,8 @@ typedef struct Xen9pfsDev { Xen9pfsRing *rings; } Xen9pfsDev; +static void xen_9pfs_disconnect(struct XenDevice *xendev); + static void xen_9pfs_in_sg(Xen9pfsRing *ring, struct iovec *in_sg, int *num, @@ -125,10 +127,19 @@ static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu, Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); struct iovec in_sg[2]; int num; + ssize_t ret; xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings], in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512)); - return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap); + + ret = v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap); + if (ret < 0) { + xen_pv_printf(&xen_9pfs->xendev, 0, + "Failed to encode VirtFS request type %d\n", pdu->id + 1); + xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing); + xen_9pfs_disconnect(&xen_9pfs->xendev); + } + return ret; } static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu, @@ -139,15 +150,25 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu, Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); struct iovec out_sg[2]; int num; + ssize_t ret; xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings], out_sg, &num, pdu->idx); - return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap); + + ret = v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap); + if (ret < 0) { + xen_pv_printf(&xen_9pfs->xendev, 0, + "Failed to decode VirtFS request type %d\n", pdu->id); + xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing); + xen_9pfs_disconnect(&xen_9pfs->xendev); + } + return ret; } static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov, - unsigned int *pniov) + unsigned int *pniov, + size_t size) { Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; @@ -169,11 +190,22 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu, Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; int num; + size_t buf_size; g_free(ring->sg); ring->sg = g_malloc0(sizeof(*ring->sg) * 2); xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size); + + buf_size = iov_size(ring->sg, num); + if (buf_size < size) { + xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d" + "needs %zu bytes, buffer has %zu\n", pdu->id, size, + buf_size); + xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing); + xen_9pfs_disconnect(&xen_9pfs->xendev); + } + *piov = ring->sg; *pniov = num; } @@ -217,7 +249,7 @@ static int xen_9pfs_init(struct XenDevice *xendev) static int xen_9pfs_receive(Xen9pfsRing *ring) { P9MsgHeader h; - RING_IDX cons, prod, masked_prod, masked_cons; + RING_IDX cons, prod, masked_prod, masked_cons, queued; V9fsPDU *pdu; if (ring->inprogress) { @@ -228,8 +260,8 @@ static int xen_9pfs_receive(Xen9pfsRing *ring) prod = ring->intf->out_prod; xen_rmb(); - if (xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order)) < - sizeof(h)) { + queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + if (queued < sizeof(h)) { return 0; } ring->inprogress = true; @@ -240,6 +272,9 @@ static int xen_9pfs_receive(Xen9pfsRing *ring) xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h), masked_prod, &masked_cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + if (queued < le32_to_cpu(h.size_le)) { + return 0; + } /* cannot fail, because we only handle one request per ring at a time */ pdu = pdu_alloc(&ring->priv->state); @@ -268,15 +303,30 @@ static void xen_9pfs_evtchn_event(void *opaque) qemu_bh_schedule(ring->bh); } -static int xen_9pfs_free(struct XenDevice *xendev) +static void xen_9pfs_disconnect(struct XenDevice *xendev) { + Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); int i; + + for (i = 0; i < xen_9pdev->num_rings; i++) { + if (xen_9pdev->rings[i].evtchndev != NULL) { + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), + NULL, NULL, NULL); + xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, + xen_9pdev->rings[i].local_port); + xen_9pdev->rings[i].evtchndev = NULL; + } + } +} + +static int xen_9pfs_free(struct XenDevice *xendev) +{ Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); + int i; - g_free(xen_9pdev->id); - g_free(xen_9pdev->tag); - g_free(xen_9pdev->path); - g_free(xen_9pdev->security_model); + if (xen_9pdev->rings[0].evtchndev != NULL) { + xen_9pfs_disconnect(xendev); + } for (i = 0; i < xen_9pdev->num_rings; i++) { if (xen_9pdev->rings[i].data != NULL) { @@ -289,16 +339,15 @@ static int xen_9pfs_free(struct XenDevice *xendev) xen_9pdev->rings[i].intf, 1); } - if (xen_9pdev->rings[i].evtchndev > 0) { - qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), - NULL, NULL, NULL); - xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, - xen_9pdev->rings[i].local_port); - } if (xen_9pdev->rings[i].bh != NULL) { qemu_bh_delete(xen_9pdev->rings[i].bh); } } + + g_free(xen_9pdev->id); + g_free(xen_9pdev->tag); + g_free(xen_9pdev->path); + g_free(xen_9pdev->security_model); g_free(xen_9pdev->rings); return 0; } @@ -422,11 +471,6 @@ static void xen_9pfs_alloc(struct XenDevice *xendev) xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER); } -static void xen_9pfs_disconnect(struct XenDevice *xendev) -{ - /* Dynamic hotplug of PV filesystems at runtime is not supported. */ -} - struct XenDevOps xen_9pfs_ops = { .size = sizeof(Xen9pfsDev), .flags = DEVOPS_FLAG_NEED_GNTDEV, diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 28f6b6ee35..401129073b 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -1217,7 +1217,7 @@ static const VMStateDescription vmstate_fdc = { VMSTATE_UINT8(config, FDCtrl), VMSTATE_UINT8(lock, FDCtrl), VMSTATE_UINT8(pwrd, FDCtrl), - VMSTATE_UINT8_EQUAL(num_floppies, FDCtrl), + VMSTATE_UINT8_EQUAL(num_floppies, FDCtrl, NULL), VMSTATE_STRUCT_ARRAY(drives, FDCtrl, MAX_FD, 1, vmstate_fdrive, FDrive), VMSTATE_END_OF_LIST() diff --git a/hw/core/machine.c b/hw/core/machine.c index 2e7e9778cd..ecb55528e8 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -770,19 +770,6 @@ static void machine_class_finalize(ObjectClass *klass, void *data) g_free(mc->name); } -static void register_compat_prop(const char *driver, - const char *property, - const char *value) -{ - GlobalProperty *p = g_new0(GlobalProperty, 1); - /* Machine compat_props must never cause errors: */ - p->errp = &error_abort; - p->driver = driver; - p->property = property; - p->value = value; - qdev_prop_register_global(p); -} - static void machine_register_compat_for_subclass(ObjectClass *oc, void *opaque) { GlobalProperty *p = opaque; diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index 68cd65345c..f11d57831b 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -1084,6 +1084,27 @@ void qdev_prop_register_global(GlobalProperty *prop) global_props = g_list_append(global_props, prop); } +void register_compat_prop(const char *driver, + const char *property, + const char *value) +{ + GlobalProperty *p = g_new0(GlobalProperty, 1); + + /* Any compat_props must never cause error */ + p->errp = &error_abort; + p->driver = driver; + p->property = property; + p->value = value; + qdev_prop_register_global(p); +} + +void register_compat_props_array(GlobalProperty *prop) +{ + for (; prop && prop->driver; prop++) { + register_compat_prop(prop->driver, prop->property, prop->value); + } +} + void qdev_prop_register_global_list(GlobalProperty *props) { int i; diff --git a/hw/display/qxl.c b/hw/display/qxl.c index ad09bb98f9..3c1688e7cb 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -2373,12 +2373,12 @@ static VMStateDescription qxl_vmstate = { VMSTATE_UINT32(last_release_offset, PCIQXLDevice), VMSTATE_UINT32(mode, PCIQXLDevice), VMSTATE_UINT32(ssd.unique, PCIQXLDevice), - VMSTATE_INT32_EQUAL(num_memslots, PCIQXLDevice), + VMSTATE_INT32_EQUAL(num_memslots, PCIQXLDevice, NULL), VMSTATE_STRUCT_ARRAY(guest_slots, PCIQXLDevice, NUM_MEMSLOTS, 0, qxl_memslot, struct guest_slots), VMSTATE_STRUCT(guest_primary.surface, PCIQXLDevice, 0, qxl_surface, QXLSurfaceCreate), - VMSTATE_INT32_EQUAL(ssd.num_surfaces, PCIQXLDevice), + VMSTATE_INT32_EQUAL(ssd.num_surfaces, PCIQXLDevice, NULL), VMSTATE_VARRAY_INT32(guest_surfaces.cmds, PCIQXLDevice, ssd.num_surfaces, 0, vmstate_info_uint64, uint64_t), diff --git a/hw/display/vga.c b/hw/display/vga.c index dcc95f88e2..80508b83f4 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -2099,7 +2099,7 @@ const VMStateDescription vmstate_vga_common = { VMSTATE_BUFFER(palette, VGACommonState), VMSTATE_INT32(bank_offset, VGACommonState), - VMSTATE_UINT8_EQUAL(is_vbe_vmstate, VGACommonState), + VMSTATE_UINT8_EQUAL(is_vbe_vmstate, VGACommonState, NULL), VMSTATE_UINT16(vbe_index, VGACommonState), VMSTATE_UINT16_ARRAY(vbe_regs, VGACommonState, VBE_DISPI_INDEX_NB), VMSTATE_UINT32(vbe_start_addr, VGACommonState), diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 58dc0b2737..0506d2c1b0 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -962,7 +962,7 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = { .version_id = 1, .fields = (VMStateField[]) { VMSTATE_INT32(enable, struct VirtIOGPU), - VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU), + VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL), VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, conf.max_outputs, 1, vmstate_virtio_gpu_scanout, diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c index ec5f27d67e..c989cef1cd 100644 --- a/hw/display/vmware_vga.c +++ b/hw/display/vmware_vga.c @@ -1192,7 +1192,7 @@ static const VMStateDescription vmstate_vmware_vga_internal = { .minimum_version_id = 0, .post_load = vmsvga_post_load, .fields = (VMStateField[]) { - VMSTATE_INT32_EQUAL(new_depth, struct vmsvga_state_s), + VMSTATE_INT32_EQUAL(new_depth, struct vmsvga_state_s, NULL), VMSTATE_INT32(enable, struct vmsvga_state_s), VMSTATE_INT32(config, struct vmsvga_state_s), VMSTATE_INT32(cursor.id, struct vmsvga_state_s), diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 46a2bc41ab..22dbef64c6 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -314,12 +314,9 @@ static void pc_init1(MachineState *machine, static void pc_compat_2_3(MachineState *machine) { PCMachineState *pcms = PC_MACHINE(machine); - savevm_skip_section_footers(); if (kvm_enabled()) { pcms->smm = ON_OFF_AUTO_OFF; } - global_state_set_optional(); - savevm_skip_configuration(); } static void pc_compat_2_2(MachineState *machine) diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index f60826d6e0..874d3fe280 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -1669,7 +1669,7 @@ const VMStateDescription vmstate_ahci = { VMSTATE_UINT32(control_regs.impl, AHCIState), VMSTATE_UINT32(control_regs.version, AHCIState), VMSTATE_UINT32(idp_index, AHCIState), - VMSTATE_INT32_EQUAL(ports, AHCIState), + VMSTATE_INT32_EQUAL(ports, AHCIState, NULL), VMSTATE_END_OF_LIST() }, }; diff --git a/hw/input/vmmouse.c b/hw/input/vmmouse.c index 4747da9a8d..b6d22086f4 100644 --- a/hw/input/vmmouse.c +++ b/hw/input/vmmouse.c @@ -243,7 +243,7 @@ static const VMStateDescription vmstate_vmmouse = { .minimum_version_id = 0, .post_load = vmmouse_post_load, .fields = (VMStateField[]) { - VMSTATE_INT32_EQUAL(queue_size, VMMouseState), + VMSTATE_INT32_EQUAL(queue_size, VMMouseState, NULL), VMSTATE_UINT32_ARRAY(queue, VMMouseState, VMMOUSE_QUEUE_SIZE), VMSTATE_UINT16(nb_queue, VMMouseState), VMSTATE_UINT16(status, VMMouseState), diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c index f966d0604a..9dd285b923 100644 --- a/hw/intc/openpic.c +++ b/hw/intc/openpic.c @@ -45,6 +45,7 @@ #include "qemu/bitops.h" #include "qapi/qmp/qerror.h" #include "qemu/log.h" +#include "qemu/timer.h" //#define DEBUG_OPENPIC @@ -54,8 +55,10 @@ static const int debug_openpic = 1; static const int debug_openpic = 0; #endif +static int get_current_cpu(void); #define DPRINTF(fmt, ...) do { \ if (debug_openpic) { \ + printf("Core%d: ", get_current_cpu()); \ printf(fmt , ## __VA_ARGS__); \ } \ } while (0) @@ -246,9 +249,31 @@ typedef struct IRQSource { #define IDR_EP 0x80000000 /* external pin */ #define IDR_CI 0x40000000 /* critical interrupt */ +/* Convert between openpic clock ticks and nanosecs. In the hardware the clock + frequency is driven by board inputs to the PIC which the PIC would then + divide by 4 or 8. For now hard code to 25MZ. +*/ +#define OPENPIC_TIMER_FREQ_MHZ 25 +#define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ) +static inline uint64_t ns_to_ticks(uint64_t ns) +{ + return ns / OPENPIC_TIMER_NS_PER_TICK; +} +static inline uint64_t ticks_to_ns(uint64_t ticks) +{ + return ticks * OPENPIC_TIMER_NS_PER_TICK; +} + typedef struct OpenPICTimer { uint32_t tccr; /* Global timer current count register */ uint32_t tbcr; /* Global timer base count register */ + int n_IRQ; + bool qemu_timer_active; /* Is the qemu_timer is running? */ + struct QEMUTimer *qemu_timer; + struct OpenPICState *opp; /* Device timer is part of. */ + /* The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last + current_count written or read, only defined if qemu_timer_active. */ + uint64_t origin_time; } OpenPICTimer; typedef struct OpenPICMSI { @@ -795,6 +820,65 @@ static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len) return retval; } +static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled); + +static void qemu_timer_cb(void *opaque) +{ + OpenPICTimer *tmr = opaque; + OpenPICState *opp = tmr->opp; + uint32_t n_IRQ = tmr->n_IRQ; + uint32_t val = tmr->tbcr & ~TBCR_CI; + uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */ + + DPRINTF("%s n_IRQ=%d\n", __func__, n_IRQ); + /* Reload current count from base count and setup timer. */ + tmr->tccr = val | tog; + openpic_tmr_set_tmr(tmr, val, /*enabled=*/true); + /* Raise the interrupt. */ + opp->src[n_IRQ].destmask = read_IRQreg_idr(opp, n_IRQ); + openpic_set_irq(opp, n_IRQ, 1); + openpic_set_irq(opp, n_IRQ, 0); +} + +/* If enabled is true, arranges for an interrupt to be raised val clocks into + the future, if enabled is false cancels the timer. */ +static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) +{ + uint64_t ns = ticks_to_ns(val & ~TCCR_TOG); + /* A count of zero causes a timer to be set to expire immediately. This + effectively stops the simulation since the timer is constantly expiring + which prevents guest code execution, so we don't honor that + configuration. On real hardware, this situation would generate an + interrupt on every clock cycle if the interrupt was unmasked. */ + if ((ns == 0) || !enabled) { + tmr->qemu_timer_active = false; + tmr->tccr = tmr->tccr & TCCR_TOG; + timer_del(tmr->qemu_timer); /* set timer to never expire. */ + } else { + tmr->qemu_timer_active = true; + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + tmr->origin_time = now; + timer_mod(tmr->qemu_timer, now + ns); /* set timer expiration. */ + } +} + +/* Returns the currrent tccr value, i.e., timer value (in clocks) with + appropriate TOG. */ +static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr) +{ + uint64_t retval; + if (!tmr->qemu_timer_active) { + retval = tmr->tccr; + } else { + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + uint64_t used = now - tmr->origin_time; /* nsecs */ + uint32_t used_ticks = (uint32_t)ns_to_ticks(used); + uint32_t count = (tmr->tccr & ~TCCR_TOG) - used_ticks; + retval = (uint32_t)((tmr->tccr & TCCR_TOG) | (count & ~TCCR_TOG)); + } + return retval; +} + static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val, unsigned len) { @@ -819,10 +903,15 @@ static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val, case 0x00: /* TCCR */ break; case 0x10: /* TBCR */ - if ((opp->timers[idx].tccr & TCCR_TOG) != 0 && - (val & TBCR_CI) == 0 && - (opp->timers[idx].tbcr & TBCR_CI) != 0) { - opp->timers[idx].tccr &= ~TCCR_TOG; + /* Did the enable status change? */ + if ((opp->timers[idx].tbcr & TBCR_CI) != (val & TBCR_CI)) { + /* Did "Count Inhibit" transition from 1 to 0? */ + if ((val & TBCR_CI) == 0) { + opp->timers[idx].tccr = val & ~TCCR_TOG; + } + openpic_tmr_set_tmr(&opp->timers[idx], + (val & ~TBCR_CI), + /*enabled=*/((val & TBCR_CI) == 0)); } opp->timers[idx].tbcr = val; break; @@ -854,7 +943,7 @@ static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len) idx = (addr >> 6) & 0x3; switch (addr & 0x30) { case 0x00: /* TCCR */ - retval = opp->timers[idx].tccr; + retval = openpic_tmr_get_timer(&opp->timers[idx]); break; case 0x10: /* TBCR */ retval = opp->timers[idx].tbcr; @@ -1136,7 +1225,10 @@ static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu) IRQ_resetbit(&dst->raised, irq); } - if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) { + /* Timers and IPIs support multicast. */ + if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) || + ((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) { + DPRINTF("irq is IPI or TMR\n"); src->destmask &= ~(1 << cpu); if (src->destmask && !src->level) { /* trigger on CPUs that didn't know about it yet */ @@ -1341,6 +1433,10 @@ static void openpic_reset(DeviceState *d) for (i = 0; i < OPENPIC_MAX_TMR; i++) { opp->timers[i].tccr = 0; opp->timers[i].tbcr = TBCR_CI; + if (opp->timers[i].qemu_timer_active) { + timer_del(opp->timers[i].qemu_timer); /* Inhibit timer */ + opp->timers[i].qemu_timer_active = false; + } } /* Go out of RESET state */ opp->gcr = 0; @@ -1391,6 +1487,15 @@ static void fsl_common_init(OpenPICState *opp) opp->src[i].type = IRQ_TYPE_FSLSPECIAL; opp->src[i].level = false; } + + for (i = 0; i < OPENPIC_MAX_TMR; i++) { + opp->timers[i].n_IRQ = opp->irq_tim0 + i; + opp->timers[i].qemu_timer_active = false; + opp->timers[i].qemu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + &qemu_timer_cb, + &opp->timers[i]); + opp->timers[i].opp = opp; + } } static void map_list(OpenPICState *opp, const MemReg *list, int *count) @@ -1499,7 +1604,7 @@ static const VMStateDescription vmstate_openpic = { VMSTATE_UINT32(max_irq, OpenPICState), VMSTATE_STRUCT_VARRAY_UINT32(src, OpenPICState, max_irq, 0, vmstate_openpic_irqsource, IRQSource), - VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState), + VMSTATE_UINT32_EQUAL(nb_cpus, OpenPICState, NULL), VMSTATE_STRUCT_VARRAY_UINT32(dst, OpenPICState, nb_cpus, 0, vmstate_openpic_irqdest, IRQDest), VMSTATE_STRUCT_ARRAY(timers, OpenPICState, OPENPIC_MAX_TMR, 0, diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 7ccfb53c55..a84ba51ad8 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -344,10 +344,14 @@ static void icp_realize(DeviceState *dev, Error **errp) } qemu_register_reset(icp_reset, dev); + vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp); } static void icp_unrealize(DeviceState *dev, Error **errp) { + ICPState *icp = ICP(dev); + + vmstate_unregister(NULL, &vmstate_icp_server, icp); qemu_unregister_reset(icp_reset, dev); } @@ -355,7 +359,6 @@ static void icp_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->vmsd = &vmstate_icp_server; dc->realize = icp_realize; dc->unrealize = icp_unrealize; } @@ -574,7 +577,7 @@ static const VMStateDescription vmstate_ics_simple = { .post_load = ics_simple_dispatch_post_load, .fields = (VMStateField[]) { /* Sanity check */ - VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), + VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, vmstate_ics_simple_irq, diff --git a/hw/misc/max111x.c b/hw/misc/max111x.c index 2a277bdb86..6dbdc03677 100644 --- a/hw/misc/max111x.c +++ b/hw/misc/max111x.c @@ -116,7 +116,7 @@ static const VMStateDescription vmstate_max111x = { VMSTATE_UINT8(tb1, MAX111xState), VMSTATE_UINT8(rb2, MAX111xState), VMSTATE_UINT8(rb3, MAX111xState), - VMSTATE_INT32_EQUAL(inputs, MAX111xState), + VMSTATE_INT32_EQUAL(inputs, MAX111xState, NULL), VMSTATE_INT32(com, MAX111xState), VMSTATE_ARRAY_INT32_UNSAFE(input, MAX111xState, inputs, vmstate_info_uint8, uint8_t), diff --git a/hw/nvram/eeprom93xx.c b/hw/nvram/eeprom93xx.c index 848692abc0..2fd0e3c29f 100644 --- a/hw/nvram/eeprom93xx.c +++ b/hw/nvram/eeprom93xx.c @@ -143,7 +143,7 @@ static const VMStateDescription vmstate_eeprom = { VMSTATE_UINT8(addrbits, eeprom_t), VMSTATE_UINT16_HACK_TEST(size, eeprom_t, is_old_eeprom_version), VMSTATE_UNUSED_TEST(is_old_eeprom_version, 1), - VMSTATE_UINT16_EQUAL_V(size, eeprom_t, EEPROM_VERSION), + VMSTATE_UINT16_EQUAL_V(size, eeprom_t, EEPROM_VERSION, NULL), VMSTATE_UINT16(data, eeprom_t), VMSTATE_VARRAY_UINT16_UNSAFE(contents, eeprom_t, size, 0, vmstate_info_uint16, uint16_t), diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 98ccc27533..b7fee4bdf2 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -74,7 +74,7 @@ static const VMStateDescription vmstate_pcibus = { .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_INT32_EQUAL(nirq, PCIBus), + VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), VMSTATE_VARRAY_INT32(irq_count, PCIBus, nirq, 0, vmstate_info_int32, int32_t), diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index 828052b0c0..97200742b4 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -813,7 +813,7 @@ const VMStateDescription vmstate_pcie_aer_log = { .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_UINT16(log_num, PCIEAERLog), - VMSTATE_UINT16_EQUAL(log_max, PCIEAERLog), + VMSTATE_UINT16_EQUAL(log_max, PCIEAERLog, NULL), VMSTATE_VALIDATE("log_num <= log_max", pcie_aer_state_log_num_valid), VMSTATE_STRUCT_VARRAY_POINTER_UINT16(log, PCIEAERLog, log_num, vmstate_pcie_aer_err, PCIEAERErr), diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index d16646c95d..36d3dcd89a 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -36,7 +36,6 @@ #include "hw/pci/pci_host.h" #include "hw/ppc/ppc.h" #include "hw/boards.h" -#include "hw/audio/soundhw.h" #include "qemu/error-report.h" #include "qemu/log.h" #include "hw/ide.h" @@ -782,9 +781,6 @@ static void ibm_40p_init(MachineState *machine) qbus_walk_children(BUS(isa_bus), prep_set_cmos_checksum, NULL, NULL, NULL, &cmos_checksum); - /* initialize audio subsystem */ - soundhw_init(); - /* add some more devices */ if (defaults_enabled()) { isa_create_simple(isa_bus, "i8042"); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index ede5167bc0..0ee9fac50b 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -127,9 +127,49 @@ error: return NULL; } +static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) +{ + /* Dummy entries correspond to unused ICPState objects in older QEMUs, + * and newer QEMUs don't even have them. In both cases, we don't want + * to send anything on the wire. + */ + return false; +} + +static const VMStateDescription pre_2_10_vmstate_dummy_icp = { + .name = "icp/server", + .version_id = 1, + .minimum_version_id = 1, + .needed = pre_2_10_vmstate_dummy_icp_needed, + .fields = (VMStateField[]) { + VMSTATE_UNUSED(4), /* uint32_t xirr */ + VMSTATE_UNUSED(1), /* uint8_t pending_priority */ + VMSTATE_UNUSED(1), /* uint8_t mfrr */ + VMSTATE_END_OF_LIST() + }, +}; + +static void pre_2_10_vmstate_register_dummy_icp(int i) +{ + vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, + (void *)(uintptr_t) i); +} + +static void pre_2_10_vmstate_unregister_dummy_icp(int i) +{ + vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, + (void *)(uintptr_t) i); +} + +static inline int xics_max_server_number(void) +{ + return DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(), smp_threads); +} + static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp) { sPAPRMachineState *spapr = SPAPR_MACHINE(machine); + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); if (kvm_enabled()) { if (machine_kernel_irqchip_allowed(machine) && @@ -151,6 +191,17 @@ static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp) return; } } + + if (smc->pre_2_10_has_unused_icps) { + int i; + + for (i = 0; i < xics_max_server_number(); i++) { + /* Dummy entries get deregistered when real ICPState objects + * are registered during CPU core hotplug. + */ + pre_2_10_vmstate_register_dummy_icp(i); + } + } } static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, @@ -979,7 +1030,6 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, void *fdt; sPAPRPHBState *phb; char *buf; - int smt = kvmppc_smt_threads(); fdt = g_malloc0(FDT_MAX_SIZE); _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE))); @@ -1019,7 +1069,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); /* /interrupt controller */ - spapr_dt_xics(DIV_ROUND_UP(max_cpus * smt, smp_threads), fdt, PHANDLE_XICP); + spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP); ret = spapr_populate_memory(spapr, fdt); if (ret < 0) { @@ -1326,7 +1376,6 @@ static void ppc_spapr_reset(void) * Set the GR bit in PATB so that we know there is no HPT. */ spapr->patb_entry = PATBE1_GR; } else { - spapr->patb_entry = 0; spapr_setup_hpt_and_vrma(spapr); } @@ -1346,6 +1395,8 @@ static void ppc_spapr_reset(void) if (!spapr->cas_reboot) { spapr_ovec_cleanup(spapr->ov5_cas); spapr->ov5_cas = spapr_ovec_new(); + + ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal); } fdt = spapr_build_fdt(spapr, rtas_addr, spapr->rtas_size); @@ -1443,6 +1494,18 @@ static int spapr_post_load(void *opaque, int version_id) err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset); } + if (spapr->patb_entry) { + PowerPCCPU *cpu = POWERPC_CPU(first_cpu); + bool radix = !!(spapr->patb_entry & PATBE1_GR); + bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE); + + err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry); + if (err) { + error_report("Process table config unsupported by the host"); + return -EINVAL; + } + } + return err; } @@ -1558,13 +1621,19 @@ static int htab_save_setup(QEMUFile *f, void *opaque) sPAPRMachineState *spapr = opaque; /* "Iteration" header */ - qemu_put_be32(f, spapr->htab_shift); + if (!spapr->htab_shift) { + qemu_put_be32(f, -1); + } else { + qemu_put_be32(f, spapr->htab_shift); + } if (spapr->htab) { spapr->htab_save_index = 0; spapr->htab_first_pass = true; } else { - assert(kvm_enabled()); + if (spapr->htab_shift) { + assert(kvm_enabled()); + } } @@ -1710,7 +1779,12 @@ static int htab_save_iterate(QEMUFile *f, void *opaque) int rc = 0; /* Iteration header */ - qemu_put_be32(f, 0); + if (!spapr->htab_shift) { + qemu_put_be32(f, -1); + return 0; + } else { + qemu_put_be32(f, 0); + } if (!spapr->htab) { assert(kvm_enabled()); @@ -1744,7 +1818,12 @@ static int htab_save_complete(QEMUFile *f, void *opaque) int fd; /* Iteration header */ - qemu_put_be32(f, 0); + if (!spapr->htab_shift) { + qemu_put_be32(f, -1); + return 0; + } else { + qemu_put_be32(f, 0); + } if (!spapr->htab) { int rc; @@ -1788,6 +1867,11 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id) section_hdr = qemu_get_be32(f); + if (section_hdr == -1) { + spapr_free_hpt(spapr); + return 0; + } + if (section_hdr) { Error *local_err = NULL; @@ -2131,7 +2215,7 @@ static void ppc_spapr_init(MachineState *machine) machine->cpu_model = kvm_enabled() ? "host" : smc->tcg_default_cpu; } - ppc_cpu_parse_features(machine->cpu_model); + spapr_cpu_parse_features(spapr); spapr_init_cpus(spapr); @@ -2503,6 +2587,10 @@ static void spapr_machine_initfn(Object *obj) " place of standard EPOW events when possible" " (required for memory hot-unplug support)", NULL); + + ppc_compat_add_property(obj, "max-cpu-compat", &spapr->max_compat_pvr, + "Maximum permitted CPU compatibility mode", + &error_fatal); } static void spapr_machine_finalizefn(Object *obj) @@ -2548,12 +2636,6 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size, spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp); addr += SPAPR_MEMORY_BLOCK_SIZE; - if (!dev->hotplugged) { - sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); - /* guests expect coldplugged LMBs to be pre-allocated */ - drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE); - drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED); - } } /* send hotplug notification to the * guest only in case of hotplugged memory @@ -2806,9 +2888,24 @@ static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); CPUCore *cc = CPU_CORE(dev); CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); + if (smc->pre_2_10_has_unused_icps) { + sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); + sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc)); + const char *typename = object_class_get_name(scc->cpu_class); + size_t size = object_type_get_instance_size(typename); + int i; + + for (i = 0; i < cc->nr_threads; i++) { + CPUState *cs = CPU(sc->threads + i * size); + + pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); + } + } + assert(core_slot); core_slot->cpu = NULL; object_unparent(OBJECT(dev)); @@ -2860,6 +2957,7 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev, { sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); MachineClass *mc = MACHINE_GET_CLASS(spapr); + sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev)); CPUCore *cc = CPU_CORE(dev); CPUState *cs = CPU(core->threads); @@ -2905,17 +3003,23 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev, * of hotplugged CPUs. */ spapr_hotplug_req_add_by_index(drc); - } else { - /* - * Set the right DRC states for cold plugged CPU. - */ - if (drc) { - sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); - drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE); - drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED); - } } core_slot->cpu = OBJECT(dev); + + if (smc->pre_2_10_has_unused_icps) { + sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc)); + const char *typename = object_class_get_name(scc->cpu_class); + size_t size = object_type_get_instance_size(typename); + int i; + + for (i = 0; i < cc->nr_threads; i++) { + sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev); + void *obj = sc->threads + i * size; + + cs = CPU(obj); + pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); + } + } } static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, @@ -3356,7 +3460,12 @@ DEFINE_SPAPR_MACHINE(2_10, "2.10", true); * pseries-2.9 */ #define SPAPR_COMPAT_2_9 \ - HW_COMPAT_2_9 + HW_COMPAT_2_9 \ + { \ + .driver = TYPE_POWERPC_CPU, \ + .property = "pre-2.10-migration", \ + .value = "on", \ + }, \ static void spapr_machine_2_9_instance_options(MachineState *machine) { @@ -3365,9 +3474,12 @@ static void spapr_machine_2_9_instance_options(MachineState *machine) static void spapr_machine_2_9_class_options(MachineClass *mc) { + sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + spapr_machine_2_10_class_options(mc); SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_9); mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram; + smc->pre_2_10_has_unused_icps = true; } DEFINE_SPAPR_MACHINE(2_9, "2.9", false); @@ -3580,9 +3692,6 @@ DEFINE_SPAPR_MACHINE(2_4, "2.4", false); static void spapr_machine_2_3_instance_options(MachineState *machine) { spapr_machine_2_4_instance_options(machine); - savevm_skip_section_footers(); - global_state_set_optional(); - savevm_skip_configuration(); } static void spapr_machine_2_3_class_options(MachineClass *mc) diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 9fb896b407..ea278ce2a7 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -20,6 +20,57 @@ #include "sysemu/numa.h" #include "qemu/error-report.h" +void spapr_cpu_parse_features(sPAPRMachineState *spapr) +{ + /* + * Backwards compatibility hack: + * + * CPUs had a "compat=" property which didn't make sense for + * anything except pseries. It was replaced by "max-cpu-compat" + * machine option. This supports old command lines like + * -cpu POWER8,compat=power7 + * By stripping the compat option and applying it to the machine + * before passing it on to the cpu level parser. + */ + gchar **inpieces; + int i, j; + gchar *compat_str = NULL; + + inpieces = g_strsplit(MACHINE(spapr)->cpu_model, ",", 0); + + /* inpieces[0] is the actual model string */ + i = 1; + j = 1; + while (inpieces[i]) { + if (g_str_has_prefix(inpieces[i], "compat=")) { + /* in case of multiple compat= options */ + g_free(compat_str); + compat_str = inpieces[i]; + } else { + j++; + } + + i++; + /* Excise compat options from list */ + inpieces[j] = inpieces[i]; + } + + if (compat_str) { + char *val = compat_str + strlen("compat="); + gchar *newprops = g_strjoinv(",", inpieces); + + object_property_set_str(OBJECT(spapr), val, "max-cpu-compat", + &error_fatal); + + ppc_cpu_parse_features(newprops); + g_free(newprops); + } else { + ppc_cpu_parse_features(MACHINE(spapr)->cpu_model); + } + + g_strfreev(inpieces); +} + static void spapr_cpu_reset(void *opaque) { sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); @@ -67,16 +118,6 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, /* Enable PAPR mode in TCG or KVM */ cpu_ppc_set_papr(cpu, PPC_VIRTUAL_HYPERVISOR(spapr)); - if (cpu->max_compat) { - Error *local_err = NULL; - - ppc_set_compat(cpu, cpu->max_compat, &local_err); - if (local_err) { - error_propagate(errp, local_err); - return; - } - } - qemu_register_reset(spapr_cpu_reset, cpu); spapr_cpu_reset(cpu); } @@ -137,7 +178,7 @@ static void spapr_cpu_core_realize_child(Object *child, Error **errp) sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); CPUState *cs = CPU(child); PowerPCCPU *cpu = POWERPC_CPU(cs); - Object *obj = NULL; + Object *obj; object_property_set_bool(child, true, "realized", &local_err); if (local_err) { @@ -157,13 +198,14 @@ static void spapr_cpu_core_realize_child(Object *child, Error **errp) object_property_add_const_link(obj, ICP_PROP_CPU, child, &error_abort); object_property_set_bool(obj, true, "realized", &local_err); if (local_err) { - goto error; + goto free_icp; } return; -error: +free_icp: object_unparent(obj); +error: error_propagate(errp, local_err); } diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index 5cb75bbf34..bd40b84cfc 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -46,30 +46,64 @@ uint32_t spapr_drc_index(sPAPRDRConnector *drc) | (drc->id & DRC_INDEX_ID_MASK); } -static uint32_t set_isolation_state(sPAPRDRConnector *drc, - sPAPRDRIsolationState state) +static uint32_t drc_isolate_physical(sPAPRDRConnector *drc) { - trace_spapr_drc_set_isolation_state(spapr_drc_index(drc), state); - /* if the guest is configuring a device attached to this DRC, we * should reset the configuration state at this point since it may * no longer be reliable (guest released device and needs to start * over, or unplug occurred so the FDT is no longer valid) */ - if (state == SPAPR_DR_ISOLATION_STATE_ISOLATED) { - g_free(drc->ccs); - drc->ccs = NULL; - } + g_free(drc->ccs); + drc->ccs = NULL; - if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) { - /* cannot unisolate a non-existent resource, and, or resources - * which are in an 'UNUSABLE' allocation state. (PAPR 2.7, 13.5.3.5) - */ - if (!drc->dev || - drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) { - return RTAS_OUT_NO_SUCH_INDICATOR; + drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED; + + /* if we're awaiting release, but still in an unconfigured state, + * it's likely the guest is still in the process of configuring + * the device and is transitioning the devices to an ISOLATED + * state as a part of that process. so we only complete the + * removal when this transition happens for a device in a + * configured state, as suggested by the state diagram from PAPR+ + * 2.7, 13.4 + */ + if (drc->awaiting_release) { + uint32_t drc_index = spapr_drc_index(drc); + if (drc->configured) { + trace_spapr_drc_set_isolation_state_finalizing(drc_index); + spapr_drc_detach(drc, DEVICE(drc->dev), NULL); + } else { + trace_spapr_drc_set_isolation_state_deferring(drc_index); } } + drc->configured = false; + + return RTAS_OUT_SUCCESS; +} + +static uint32_t drc_unisolate_physical(sPAPRDRConnector *drc) +{ + /* cannot unisolate a non-existent resource, and, or resources + * which are in an 'UNUSABLE' allocation state. (PAPR 2.7, + * 13.5.3.5) + */ + if (!drc->dev) { + return RTAS_OUT_NO_SUCH_INDICATOR; + } + + drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED; + + return RTAS_OUT_SUCCESS; +} + +static uint32_t drc_isolate_logical(sPAPRDRConnector *drc) +{ + /* if the guest is configuring a device attached to this DRC, we + * should reset the configuration state at this point since it may + * no longer be reliable (guest released device and needs to start + * over, or unplug occurred so the FDT is no longer valid) + */ + g_free(drc->ccs); + drc->ccs = NULL; /* * Fail any requests to ISOLATE the LMB DRC if this LMB doesn't @@ -81,66 +115,87 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc, * If the LMB being removed doesn't belong to a DIMM device that is * actually being unplugged, fail the isolation request here. */ - if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB) { - if ((state == SPAPR_DR_ISOLATION_STATE_ISOLATED) && - !drc->awaiting_release) { - return RTAS_OUT_HW_ERROR; - } + if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB + && !drc->awaiting_release) { + return RTAS_OUT_HW_ERROR; } - drc->isolation_state = state; + drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED; - if (drc->isolation_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) { - /* if we're awaiting release, but still in an unconfigured state, - * it's likely the guest is still in the process of configuring - * the device and is transitioning the devices to an ISOLATED - * state as a part of that process. so we only complete the - * removal when this transition happens for a device in a - * configured state, as suggested by the state diagram from - * PAPR+ 2.7, 13.4 - */ - if (drc->awaiting_release) { - uint32_t drc_index = spapr_drc_index(drc); - if (drc->configured) { - trace_spapr_drc_set_isolation_state_finalizing(drc_index); - spapr_drc_detach(drc, DEVICE(drc->dev), NULL); - } else { - trace_spapr_drc_set_isolation_state_deferring(drc_index); - } + /* if we're awaiting release, but still in an unconfigured state, + * it's likely the guest is still in the process of configuring + * the device and is transitioning the devices to an ISOLATED + * state as a part of that process. so we only complete the + * removal when this transition happens for a device in a + * configured state, as suggested by the state diagram from PAPR+ + * 2.7, 13.4 + */ + if (drc->awaiting_release) { + uint32_t drc_index = spapr_drc_index(drc); + if (drc->configured) { + trace_spapr_drc_set_isolation_state_finalizing(drc_index); + spapr_drc_detach(drc, DEVICE(drc->dev), NULL); + } else { + trace_spapr_drc_set_isolation_state_deferring(drc_index); } - drc->configured = false; } + drc->configured = false; return RTAS_OUT_SUCCESS; } -static uint32_t set_allocation_state(sPAPRDRConnector *drc, - sPAPRDRAllocationState state) +static uint32_t drc_unisolate_logical(sPAPRDRConnector *drc) { - trace_spapr_drc_set_allocation_state(spapr_drc_index(drc), state); + /* cannot unisolate a non-existent resource, and, or resources + * which are in an 'UNUSABLE' allocation state. (PAPR 2.7, + * 13.5.3.5) + */ + if (!drc->dev || + drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) { + return RTAS_OUT_NO_SUCH_INDICATOR; + } + + drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED; + + return RTAS_OUT_SUCCESS; +} - if (state == SPAPR_DR_ALLOCATION_STATE_USABLE) { - /* if there's no resource/device associated with the DRC, there's - * no way for us to put it in an allocation state consistent with - * being 'USABLE'. PAPR 2.7, 13.5.3.4 documents that this should - * result in an RTAS return code of -3 / "no such indicator" +static uint32_t drc_set_usable(sPAPRDRConnector *drc) +{ + /* if there's no resource/device associated with the DRC, there's + * no way for us to put it in an allocation state consistent with + * being 'USABLE'. PAPR 2.7, 13.5.3.4 documents that this should + * result in an RTAS return code of -3 / "no such indicator" + */ + if (!drc->dev) { + return RTAS_OUT_NO_SUCH_INDICATOR; + } + if (drc->awaiting_release && drc->awaiting_allocation) { + /* kernel is acknowledging a previous hotplug event + * while we are already removing it. + * it's safe to ignore awaiting_allocation here since we know the + * situation is predicated on the guest either already having done + * so (boot-time hotplug), or never being able to acquire in the + * first place (hotplug followed by immediate unplug). */ - if (!drc->dev) { - return RTAS_OUT_NO_SUCH_INDICATOR; - } + return RTAS_OUT_NO_SUCH_INDICATOR; } - if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) { - drc->allocation_state = state; - if (drc->awaiting_release && - drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) { - uint32_t drc_index = spapr_drc_index(drc); - trace_spapr_drc_set_allocation_state_finalizing(drc_index); - spapr_drc_detach(drc, DEVICE(drc->dev), NULL); - } else if (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) { - drc->awaiting_allocation = false; - } + drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE; + drc->awaiting_allocation = false; + + return RTAS_OUT_SUCCESS; +} + +static uint32_t drc_set_unusable(sPAPRDRConnector *drc) +{ + drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE; + if (drc->awaiting_release) { + uint32_t drc_index = spapr_drc_index(drc); + trace_spapr_drc_set_allocation_state_finalizing(drc_index); + spapr_drc_detach(drc, DEVICE(drc->dev), NULL); } + return RTAS_OUT_SUCCESS; } @@ -172,12 +227,6 @@ static const char *spapr_drc_name(sPAPRDRConnector *drc) return g_strdup_printf("%s%d", drck->drc_name_prefix, drc->id); } -/* has the guest been notified of device attachment? */ -static void set_signalled(sPAPRDRConnector *drc) -{ - drc->signalled = true; -} - /* * dr-entity-sense sensor value * returned via get-sensor-state RTAS calls @@ -304,33 +353,12 @@ void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt, } g_assert(fdt || coldplug); - /* NOTE: setting initial isolation state to UNISOLATED means we can't - * detach unless guest has a userspace/kernel that moves this state - * back to ISOLATED in response to an unplug event, or this is done - * manually by the admin prior. if we force things while the guest - * may be accessing the device, we can easily crash the guest, so we - * we defer completion of removal in such cases to the reset() hook. - */ - if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) { - drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED; - } drc->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE; drc->dev = d; drc->fdt = fdt; drc->fdt_start_offset = fdt_start_offset; drc->configured = coldplug; - /* 'logical' DR resources such as memory/cpus are in some cases treated - * as a pool of resources from which the guest is free to choose from - * based on only a count. for resources that can be assigned in this - * fashion, we must assume the resource is signalled immediately - * since a single hotplug request might make an arbitrary number of - * such attached resources available to the guest, as opposed to - * 'physical' DR resources such as PCI where each device/resource is - * signalled individually. - */ - drc->signalled = (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) - ? true : coldplug; if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) { drc->awaiting_allocation = true; @@ -342,49 +370,8 @@ void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt, NULL, 0, NULL); } -void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp) +static void spapr_drc_release(sPAPRDRConnector *drc) { - trace_spapr_drc_detach(spapr_drc_index(drc)); - - /* if we've signalled device presence to the guest, or if the guest - * has gone ahead and configured the device (via manually-executed - * device add via drmgr in guest, namely), we need to wait - * for the guest to quiesce the device before completing detach. - * Otherwise, we can assume the guest hasn't seen it and complete the - * detach immediately. Note that there is a small race window - * just before, or during, configuration, which is this context - * refers mainly to fetching the device tree via RTAS. - * During this window the device access will be arbitrated by - * associated DRC, which will simply fail the RTAS calls as invalid. - * This is recoverable within guest and current implementations of - * drmgr should be able to cope. - */ - if (!drc->signalled && !drc->configured) { - /* if the guest hasn't seen the device we can't rely on it to - * set it back to an isolated state via RTAS, so do it here manually - */ - drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED; - } - - if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) { - trace_spapr_drc_awaiting_isolated(spapr_drc_index(drc)); - drc->awaiting_release = true; - return; - } - - if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI && - drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) { - trace_spapr_drc_awaiting_unusable(spapr_drc_index(drc)); - drc->awaiting_release = true; - return; - } - - if (drc->awaiting_allocation) { - drc->awaiting_release = true; - trace_spapr_drc_awaiting_allocation(spapr_drc_index(drc)); - return; - } - drc->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE; /* Calling release callbacks based on spapr_drc_type(drc). */ @@ -412,6 +399,32 @@ void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp) drc->dev = NULL; } +void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp) +{ + trace_spapr_drc_detach(spapr_drc_index(drc)); + + if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) { + trace_spapr_drc_awaiting_isolated(spapr_drc_index(drc)); + drc->awaiting_release = true; + return; + } + + if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI && + drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) { + trace_spapr_drc_awaiting_unusable(spapr_drc_index(drc)); + drc->awaiting_release = true; + return; + } + + if (drc->awaiting_allocation) { + drc->awaiting_release = true; + trace_spapr_drc_awaiting_allocation(spapr_drc_index(drc)); + return; + } + + spapr_drc_release(drc); +} + static bool release_pending(sPAPRDRConnector *drc) { return drc->awaiting_release; @@ -420,7 +433,6 @@ static bool release_pending(sPAPRDRConnector *drc) static void reset(DeviceState *d) { sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d); - sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); trace_spapr_drc_reset(spapr_drc_index(drc)); @@ -428,32 +440,26 @@ static void reset(DeviceState *d) drc->ccs = NULL; /* immediately upon reset we can safely assume DRCs whose devices - * are pending removal can be safely removed, and that they will - * subsequently be left in an ISOLATED state. move the DRC to this - * state in these cases (which will in turn complete any pending - * device removals) + * are pending removal can be safely removed. */ if (drc->awaiting_release) { - drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_ISOLATED); - /* generally this should also finalize the removal, but if the device - * hasn't yet been configured we normally defer removal under the - * assumption that this transition is taking place as part of device - * configuration. so check if we're still waiting after this, and - * force removal if we are - */ - if (drc->awaiting_release) { - spapr_drc_detach(drc, DEVICE(drc->dev), NULL); - } - - /* non-PCI devices may be awaiting a transition to UNUSABLE */ - if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI && - drc->awaiting_release) { - drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_UNUSABLE); - } + spapr_drc_release(drc); } - if (drck->dr_entity_sense(drc) == SPAPR_DR_ENTITY_SENSE_PRESENT) { - drck->set_signalled(drc); + drc->awaiting_allocation = false; + + if (drc->dev) { + /* A device present at reset is coldplugged */ + drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED; + if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) { + drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE; + } + } else { + /* Otherwise device is absent, but might be hotplugged */ + drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED; + if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) { + drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE; + } } } @@ -479,7 +485,7 @@ static bool spapr_drc_needed(void *opaque) case SPAPR_DR_CONNECTOR_TYPE_LMB: rc = !((drc->isolation_state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) && (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) && - drc->configured && drc->signalled && !drc->awaiting_release); + drc->configured && !drc->awaiting_release); break; case SPAPR_DR_CONNECTOR_TYPE_PHB: case SPAPR_DR_CONNECTOR_TYPE_VIO: @@ -501,7 +507,6 @@ static const VMStateDescription vmstate_spapr_drc = { VMSTATE_BOOL(configured, sPAPRDRConnector), VMSTATE_BOOL(awaiting_release, sPAPRDRConnector), VMSTATE_BOOL(awaiting_allocation, sPAPRDRConnector), - VMSTATE_BOOL(signalled, sPAPRDRConnector), VMSTATE_END_OF_LIST() } }; @@ -596,10 +601,7 @@ static void spapr_dr_connector_class_init(ObjectClass *k, void *data) dk->reset = reset; dk->realize = realize; dk->unrealize = unrealize; - drck->set_isolation_state = set_isolation_state; - drck->set_allocation_state = set_allocation_state; drck->release_pending = release_pending; - drck->set_signalled = set_signalled; /* * Reason: it crashes FIXME find and document the real reason */ @@ -611,6 +613,8 @@ static void spapr_drc_physical_class_init(ObjectClass *k, void *data) sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); drck->dr_entity_sense = physical_entity_sense; + drck->isolate = drc_isolate_physical; + drck->unisolate = drc_unisolate_physical; } static void spapr_drc_logical_class_init(ObjectClass *k, void *data) @@ -618,6 +622,8 @@ static void spapr_drc_logical_class_init(ObjectClass *k, void *data) sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); drck->dr_entity_sense = logical_entity_sense; + drck->isolate = drc_isolate_logical; + drck->unisolate = drc_unisolate_logical; } static void spapr_drc_cpu_class_init(ObjectClass *k, void *data) @@ -858,24 +864,45 @@ static uint32_t rtas_set_isolation_state(uint32_t idx, uint32_t state) sPAPRDRConnectorClass *drck; if (!drc) { - return RTAS_OUT_PARAM_ERROR; + return RTAS_OUT_NO_SUCH_INDICATOR; } + trace_spapr_drc_set_isolation_state(spapr_drc_index(drc), state); + drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); - return drck->set_isolation_state(drc, state); + + switch (state) { + case SPAPR_DR_ISOLATION_STATE_ISOLATED: + return drck->isolate(drc); + + case SPAPR_DR_ISOLATION_STATE_UNISOLATED: + return drck->unisolate(drc); + + default: + return RTAS_OUT_PARAM_ERROR; + } } static uint32_t rtas_set_allocation_state(uint32_t idx, uint32_t state) { sPAPRDRConnector *drc = spapr_drc_by_index(idx); - sPAPRDRConnectorClass *drck; - if (!drc) { - return RTAS_OUT_PARAM_ERROR; + if (!drc || !object_dynamic_cast(OBJECT(drc), TYPE_SPAPR_DRC_LOGICAL)) { + return RTAS_OUT_NO_SUCH_INDICATOR; } - drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); - return drck->set_allocation_state(drc, state); + trace_spapr_drc_set_allocation_state(spapr_drc_index(drc), state); + + switch (state) { + case SPAPR_DR_ALLOCATION_STATE_USABLE: + return drc_set_usable(drc); + + case SPAPR_DR_ALLOCATION_STATE_UNUSABLE: + return drc_set_unusable(drc); + + default: + return RTAS_OUT_PARAM_ERROR; + } } static uint32_t rtas_set_dr_indicator(uint32_t idx, uint32_t state) diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c index 171aedc7e0..587a3dacb2 100644 --- a/hw/ppc/spapr_events.c +++ b/hw/ppc/spapr_events.c @@ -475,13 +475,6 @@ static void spapr_powerdown_req(Notifier *n, void *opaque) RTAS_LOG_TYPE_EPOW))); } -static void spapr_hotplug_set_signalled(uint32_t drc_index) -{ - sPAPRDRConnector *drc = spapr_drc_by_index(drc_index); - sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); - drck->set_signalled(drc); -} - static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action, sPAPRDRConnectorType drc_type, union drc_identifier *drc_id) @@ -528,9 +521,6 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action, switch (drc_type) { case SPAPR_DR_CONNECTOR_TYPE_PCI: hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI; - if (hp->hotplug_action == RTAS_LOG_V6_HP_ACTION_ADD) { - spapr_hotplug_set_signalled(drc_id->index); - } break; case SPAPR_DR_CONNECTOR_TYPE_LMB: hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY; diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index aa1ffea9e5..8624ce8d5b 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -1045,11 +1045,11 @@ static target_ulong h_signal_sys_reset(PowerPCCPU *cpu, } } -static uint32_t cas_check_pvr(PowerPCCPU *cpu, target_ulong *addr, - Error **errp) +static uint32_t cas_check_pvr(sPAPRMachineState *spapr, PowerPCCPU *cpu, + target_ulong *addr, Error **errp) { bool explicit_match = false; /* Matched the CPU's real PVR */ - uint32_t max_compat = cpu->max_compat; + uint32_t max_compat = spapr->max_compat_pvr; uint32_t best_compat = 0; int i; @@ -1105,7 +1105,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu, bool guest_radix; Error *local_err = NULL; - cas_pvr = cas_check_pvr(cpu, &addr, &local_err); + cas_pvr = cas_check_pvr(spapr, cpu, &addr, &local_err); if (local_err) { error_report_err(local_err); return H_HARDWARE; diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index 0341bc069d..8656a54a3e 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -231,7 +231,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { .post_load = spapr_tce_table_post_load, .fields = (VMStateField []) { /* Sanity check */ - VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), + VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable, NULL), /* IOMMU state */ VMSTATE_UINT32(mig_nb_table, sPAPRTCETable), diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 0b447f2eed..3b37dcdc09 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1848,7 +1848,7 @@ static const VMStateDescription vmstate_spapr_pci_lsi = { .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi), + VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi, NULL), VMSTATE_END_OF_LIST() }, @@ -1936,7 +1936,7 @@ static const VMStateDescription vmstate_spapr_pci = { .pre_save = spapr_pci_pre_save, .post_load = spapr_pci_post_load, .fields = (VMStateField[]) { - VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState), + VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState, NULL), VMSTATE_UINT32_TEST(mig_liobn, sPAPRPHBState, pre_2_8_migration), VMSTATE_UINT64_TEST(mig_mem_win_addr, sPAPRPHBState, pre_2_8_migration), VMSTATE_UINT64_TEST(mig_mem_win_size, sPAPRPHBState, pre_2_8_migration), diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index a0ee4fd265..ea3bc8bd9e 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -557,8 +557,8 @@ const VMStateDescription vmstate_spapr_vio = { .minimum_version_id = 1, .fields = (VMStateField[]) { /* Sanity check */ - VMSTATE_UINT32_EQUAL(reg, VIOsPAPRDevice), - VMSTATE_UINT32_EQUAL(irq, VIOsPAPRDevice), + VMSTATE_UINT32_EQUAL(reg, VIOsPAPRDevice, NULL), + VMSTATE_UINT32_EQUAL(irq, VIOsPAPRDevice, NULL), /* General VIO device state */ VMSTATE_UINT64(signal_state, VIOsPAPRDevice), diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index ca72a80f27..e3562a4c60 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -415,7 +415,7 @@ static const VMStateDescription vmstate_uhci = { .post_load = uhci_post_load, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(dev, UHCIState), - VMSTATE_UINT8_EQUAL(num_ports_vmstate, UHCIState), + VMSTATE_UINT8_EQUAL(num_ports_vmstate, UHCIState, NULL), VMSTATE_STRUCT_ARRAY(ports, UHCIState, NB_PORTS, 1, vmstate_uhci_port, UHCIPort), VMSTATE_UINT16(cmd, UHCIState), diff --git a/hw/xen/xen-common.c b/hw/xen/xen-common.c index d3fa705a82..632a938dcc 100644 --- a/hw/xen/xen-common.c +++ b/hw/xen/xen-common.c @@ -138,20 +138,35 @@ static int xen_init(MachineState *ms) return -1; } qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); - - global_state_set_optional(); - savevm_skip_configuration(); - savevm_skip_section_footers(); - return 0; } +static GlobalProperty xen_compat_props[] = { + { + .driver = "migration", + .property = "store-global-state", + .value = "off", + }, + { + .driver = "migration", + .property = "send-configuration", + .value = "off", + }, + { + .driver = "migration", + .property = "send-section-footer", + .value = "off", + }, + { /* end of list */ }, +}; + static void xen_accel_class_init(ObjectClass *oc, void *data) { AccelClass *ac = ACCEL_CLASS(oc); ac->name = "Xen"; ac->init_machine = xen_init; ac->allowed = &xen_allowed; + ac->global_props = xen_compat_props; } #define TYPE_XEN_ACCEL ACCEL_CLASS_NAME("xen") diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 73d1bea8b6..c04f4f67f6 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -386,8 +386,9 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, int k; int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); unsigned long * const *src; - unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; - unsigned long offset = BIT_WORD((page * BITS_PER_LONG) % + unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); + unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; + unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % DIRTY_MEMORY_BLOCK_SIZE); rcu_read_lock(); @@ -414,9 +415,11 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, rcu_read_unlock(); } else { + ram_addr_t offset = rb->offset; + for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { if (cpu_physical_memory_test_and_clear_dirty( - start + addr, + start + addr + offset, TARGET_PAGE_SIZE, DIRTY_MEMORY_MIGRATION)) { *real_dirty_pages += 1; diff --git a/include/hw/compat.h b/include/hw/compat.h index 26cd5851a5..08f36004da 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -181,6 +181,18 @@ .driver = TYPE_PCI_DEVICE,\ .property = "x-pcie-lnksta-dllla",\ .value = "off",\ + },{\ + .driver = "migration",\ + .property = "send-configuration",\ + .value = "off",\ + },{\ + .driver = "migration",\ + .property = "send-section-footer",\ + .value = "off",\ + },{\ + .driver = "migration",\ + .property = "store-global-state",\ + .value = "off",\ }, #define HW_COMPAT_2_2 \ diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index f973b02845..a66bbac352 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -53,6 +53,7 @@ struct sPAPRMachineClass { bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */ bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */ const char *tcg_default_cpu; /* which (TCG) CPU to simulate by default */ + bool pre_2_10_has_unused_icps; void (*phb_placement)(sPAPRMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio32, hwaddr *mmio64, @@ -86,16 +87,19 @@ struct sPAPRMachineState { uint64_t rtc_offset; /* Now used only during incoming migration */ struct PPCTimebase tb; bool has_graphics; - sPAPROptionVector *ov5; /* QEMU-supported option vectors */ - sPAPROptionVector *ov5_cas; /* negotiated (via CAS) option vectors */ - bool cas_reboot; - bool cas_legacy_guest_workaround; Notifier epow_notifier; QTAILQ_HEAD(, sPAPREventLogEntry) pending_events; bool use_hotplug_event_source; sPAPREventSource *event_sources; + /* ibm,client-architecture-support option negotiation */ + bool cas_reboot; + bool cas_legacy_guest_workaround; + sPAPROptionVector *ov5; /* QEMU-supported option vectors */ + sPAPROptionVector *ov5_cas; /* negotiated (via CAS) option vectors */ + uint32_t max_compat_pvr; + /* Migration state */ int htab_save_index; bool htab_first_pass; @@ -635,6 +639,7 @@ void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type, uint32_t count, uint32_t index); void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type, uint32_t count, uint32_t index); +void spapr_cpu_parse_features(sPAPRMachineState *spapr); void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset, sPAPRMachineState *spapr); diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h index bc9f98851e..d9cacb368f 100644 --- a/include/hw/ppc/spapr_drc.h +++ b/include/hw/ppc/spapr_drc.h @@ -199,7 +199,6 @@ typedef struct sPAPRDRConnector { sPAPRConfigureConnectorState *ccs; bool awaiting_release; - bool signalled; bool awaiting_allocation; /* device pointer, via link property */ @@ -216,16 +215,11 @@ typedef struct sPAPRDRConnectorClass { const char *drc_name_prefix; /* used other places in device tree */ sPAPRDREntitySense (*dr_entity_sense)(sPAPRDRConnector *drc); - - /* accessors for guest-visible (generally via RTAS) DR state */ - uint32_t (*set_isolation_state)(sPAPRDRConnector *drc, - sPAPRDRIsolationState state); - uint32_t (*set_allocation_state)(sPAPRDRConnector *drc, - sPAPRDRAllocationState state); + uint32_t (*isolate)(sPAPRDRConnector *drc); + uint32_t (*unisolate)(sPAPRDRConnector *drc); /* QEMU interfaces for managing hotplug operations */ bool (*release_pending)(sPAPRDRConnector *drc); - void (*set_signalled)(sPAPRDRConnector *drc); } sPAPRDRConnectorClass; uint32_t spapr_drc_index(sPAPRDRConnector *drc); diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h index 39bf4b292e..0604c337e0 100644 --- a/include/hw/qdev-properties.h +++ b/include/hw/qdev-properties.h @@ -210,6 +210,35 @@ void error_set_from_qdev_prop_error(Error **errp, int ret, DeviceState *dev, Property *prop, const char *value); /** + * register_compat_prop: + * + * Register internal (not user-provided) global property, changing the + * default value of a given property in a device type. This can be used + * for enabling machine-type compatibility or for enabling + * accelerator-specific defaults in devices. + * + * The property values set using this function must be always valid and + * never report setter errors, as the property will have + * GlobalProperty::errp set to &error_abort. + * + * User-provided global properties should override internal global + * properties, so callers of this function should ensure that it is + * called before user-provided global properties are registered. + * + * @driver: Device type to be affected + * @property: Property whose default value is going to be changed + * @value: New default value for the property + */ +void register_compat_prop(const char *driver, const char *property, + const char *value); +/* + * register_compat_props_array(): using register_compat_prop(), which + * only registers internal global properties (which has lower priority + * than user-provided global properties) + */ +void register_compat_props_array(GlobalProperty *prop); + +/** * qdev_property_add_static: * @dev: Device to add the property to. * @prop: The qdev property definition. diff --git a/include/migration/global_state.h b/include/migration/global_state.h index 90faea72b4..d307de8350 100644 --- a/include/migration/global_state.h +++ b/include/migration/global_state.h @@ -16,7 +16,6 @@ #include "sysemu/sysemu.h" void register_global_state(void); -void global_state_set_optional(void); int global_state_store(void); void global_state_store_running(void); bool global_state_received(void); diff --git a/include/migration/misc.h b/include/migration/misc.h index 65c7070262..22551216bb 100644 --- a/include/migration/misc.h +++ b/include/migration/misc.h @@ -41,10 +41,9 @@ int64_t self_announce_delay(int round) /* migration/savevm.c */ void dump_vmstate_json_to_file(FILE *out_fp); -void savevm_skip_section_footers(void); -void savevm_skip_configuration(void); /* migration/migration.c */ +void migration_object_init(void); void qemu_start_incoming_migration(const char *uri, Error **errp); bool migration_is_idle(void); void add_migration_state_change_notifier(Notifier *notify); @@ -54,4 +53,7 @@ bool migration_has_finished(MigrationState *); bool migration_has_failed(MigrationState *); /* ...and after the device transmission */ bool migration_in_postcopy_after_devices(MigrationState *); +void migration_only_migratable_set(void); +void migration_global_dump(Monitor *mon); + #endif diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index e85fbd81fc..85e43da568 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -155,6 +155,7 @@ typedef enum { struct VMStateField { const char *name; + const char *err_hint; size_t offset; size_t size; size_t start; @@ -256,6 +257,18 @@ extern const VMStateInfo vmstate_info_qtailq; .offset = vmstate_offset_value(_state, _field, _type), \ } +#define VMSTATE_SINGLE_FULL(_field, _state, _test, _version, _info, \ + _type, _err_hint) { \ + .name = (stringify(_field)), \ + .err_hint = (_err_hint), \ + .version_id = (_version), \ + .field_exists = (_test), \ + .size = sizeof(_type), \ + .info = &(_info), \ + .flags = VMS_SINGLE, \ + .offset = vmstate_offset_value(_state, _field, _type), \ +} + /* Validate state using a boolean predicate. */ #define VMSTATE_VALIDATE(_name, _test) { \ .name = (_name), \ @@ -762,29 +775,35 @@ extern const VMStateInfo vmstate_info_qtailq; #define VMSTATE_UINT64(_f, _s) \ VMSTATE_UINT64_V(_f, _s, 0) -#define VMSTATE_UINT8_EQUAL(_f, _s) \ - VMSTATE_SINGLE(_f, _s, 0, vmstate_info_uint8_equal, uint8_t) +#define VMSTATE_UINT8_EQUAL(_f, _s, _err_hint) \ + VMSTATE_SINGLE_FULL(_f, _s, 0, 0, \ + vmstate_info_uint8_equal, uint8_t, _err_hint) -#define VMSTATE_UINT16_EQUAL(_f, _s) \ - VMSTATE_SINGLE(_f, _s, 0, vmstate_info_uint16_equal, uint16_t) +#define VMSTATE_UINT16_EQUAL(_f, _s, _err_hint) \ + VMSTATE_SINGLE_FULL(_f, _s, 0, 0, \ + vmstate_info_uint16_equal, uint16_t, _err_hint) -#define VMSTATE_UINT16_EQUAL_V(_f, _s, _v) \ - VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint16_equal, uint16_t) +#define VMSTATE_UINT16_EQUAL_V(_f, _s, _v, _err_hint) \ + VMSTATE_SINGLE_FULL(_f, _s, 0, _v, \ + vmstate_info_uint16_equal, uint16_t, _err_hint) -#define VMSTATE_INT32_EQUAL(_f, _s) \ - VMSTATE_SINGLE(_f, _s, 0, vmstate_info_int32_equal, int32_t) +#define VMSTATE_INT32_EQUAL(_f, _s, _err_hint) \ + VMSTATE_SINGLE_FULL(_f, _s, 0, 0, \ + vmstate_info_int32_equal, int32_t, _err_hint) -#define VMSTATE_UINT32_EQUAL_V(_f, _s, _v) \ - VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint32_equal, uint32_t) +#define VMSTATE_UINT32_EQUAL_V(_f, _s, _v, _err_hint) \ + VMSTATE_SINGLE_FULL(_f, _s, 0, _v, \ + vmstate_info_uint32_equal, uint32_t, _err_hint) -#define VMSTATE_UINT32_EQUAL(_f, _s) \ - VMSTATE_UINT32_EQUAL_V(_f, _s, 0) +#define VMSTATE_UINT32_EQUAL(_f, _s, _err_hint) \ + VMSTATE_UINT32_EQUAL_V(_f, _s, 0, _err_hint) -#define VMSTATE_UINT64_EQUAL_V(_f, _s, _v) \ - VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64_equal, uint64_t) +#define VMSTATE_UINT64_EQUAL_V(_f, _s, _v, _err_hint) \ + VMSTATE_SINGLE_FULL(_f, _s, 0, _v, \ + vmstate_info_uint64_equal, uint64_t, _err_hint) -#define VMSTATE_UINT64_EQUAL(_f, _s) \ - VMSTATE_UINT64_EQUAL_V(_f, _s, 0) +#define VMSTATE_UINT64_EQUAL(_f, _s, _err_hint) \ + VMSTATE_UINT64_EQUAL_V(_f, _s, 0, _err_hint) #define VMSTATE_INT32_POSITIVE_LE(_f, _s) \ VMSTATE_SINGLE(_f, _s, 0, vmstate_info_int32_le, int32_t) diff --git a/include/sysemu/accel.h b/include/sysemu/accel.h index 15944c152c..ecc5c84621 100644 --- a/include/sysemu/accel.h +++ b/include/sysemu/accel.h @@ -24,6 +24,7 @@ #define HW_ACCEL_H #include "qom/object.h" +#include "hw/qdev-properties.h" typedef struct AccelState { /*< private >*/ @@ -40,6 +41,14 @@ typedef struct AccelClass { int (*available)(void); int (*init_machine)(MachineState *ms); bool *allowed; + /* + * Array of global properties that would be applied when specific + * accelerator is chosen. It works like MachineClass.compat_props + * but it's for accelerators not machines. Accelerator-provided + * global properties may be overridden by machine-type + * compat_props or user-provided global properties. + */ + GlobalProperty *global_props; } AccelClass; #define TYPE_ACCEL "accel" @@ -57,5 +66,7 @@ typedef struct AccelClass { extern int tcg_tb_size; void configure_accelerator(MachineState *ms); +/* Register accelerator specific global properties */ +void accel_register_compat_props(AccelState *accel); #endif diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index 9841a527a1..b21369672a 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -15,7 +15,6 @@ /* vl.c */ extern const char *bios_name; -extern int only_migratable; extern const char *qemu_name; extern QemuUUID qemu_uuid; extern bool qemu_uuid_set; diff --git a/migration/global_state.c b/migration/global_state.c index f792cf5242..dcbbcb28be 100644 --- a/migration/global_state.c +++ b/migration/global_state.c @@ -15,12 +15,12 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/util.h" +#include "migration.h" #include "migration/global_state.h" #include "migration/vmstate.h" #include "trace.h" typedef struct { - bool optional; uint32_t size; uint8_t runstate[100]; RunState state; @@ -57,11 +57,6 @@ RunState global_state_get_runstate(void) return global_state.state; } -void global_state_set_optional(void) -{ - global_state.optional = true; -} - static bool global_state_needed(void *opaque) { GlobalState *s = opaque; @@ -69,7 +64,7 @@ static bool global_state_needed(void *opaque) /* If it is not optional, it is mandatory */ - if (s->optional == false) { + if (migrate_get_current()->store_global_state) { return true; } diff --git a/migration/migration.c b/migration/migration.c index f588329f4c..51ccd1a4c5 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -42,6 +42,8 @@ #include "exec/target_page.h" #include "io/channel-buffer.h" #include "migration/colo.h" +#include "hw/boards.h" +#include "monitor/monitor.h" #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */ @@ -98,32 +100,37 @@ enum mig_rp_message_type { migrations at once. For now we don't need to add dynamic creation of migration */ +static MigrationState *current_migration; + +void migration_object_init(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + + /* This can only be called once. */ + assert(!current_migration); + current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); + + /* + * We cannot really do this in migration_instance_init() since at + * that time global properties are not yet applied, then this + * value will be definitely replaced by something else. + */ + if (ms->enforce_config_section) { + current_migration->send_configuration = true; + } +} + /* For outgoing */ MigrationState *migrate_get_current(void) { - static bool once; - static MigrationState current_migration = { - .state = MIGRATION_STATUS_NONE, - .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, - .mbps = -1, - .parameters = { - .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL, - .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, - .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, - .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL, - .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT, - .max_bandwidth = MAX_THROTTLE, - .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME, - .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY, - }, - }; + /* This can only be called after the object created. */ + assert(current_migration); + return current_migration; +} - if (!once) { - current_migration.parameters.tls_creds = g_strdup(""); - current_migration.parameters.tls_hostname = g_strdup(""); - once = true; - } - return ¤t_migration; +void migration_only_migratable_set(void) +{ + migrate_get_current()->only_migratable = true; } MigrationIncomingState *migration_incoming_get_current(void) @@ -997,7 +1004,7 @@ static GSList *migration_blockers; int migrate_add_blocker(Error *reason, Error **errp) { - if (only_migratable) { + if (migrate_get_current()->only_migratable) { error_propagate(errp, error_copy(reason)); error_prepend(errp, "disallowing migration blocker " "(--only_migratable) for: "); @@ -1304,6 +1311,15 @@ bool migrate_use_block(void) return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK]; } +bool migrate_use_return_path(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH]; +} + bool migrate_use_block_incremental(void) { MigrationState *s; @@ -1968,10 +1984,11 @@ void migrate_fd_connect(MigrationState *s) notifier_list_notify(&migration_state_notifiers, s); /* - * Open the return path; currently for postcopy but other things might - * also want it. + * Open the return path. For postcopy, it is used exclusively. For + * precopy, only if user specified "return-path" capability would + * QEMU uses the return path. */ - if (migrate_postcopy_ram()) { + if (migrate_postcopy_ram() || migrate_use_return_path()) { if (open_return_path_on_source(s)) { error_report("Unable to open return-path for postcopy"); migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, @@ -1987,3 +2004,76 @@ void migrate_fd_connect(MigrationState *s) s->migration_thread_running = true; } +void migration_global_dump(Monitor *mon) +{ + MigrationState *ms = migrate_get_current(); + + monitor_printf(mon, "globals: store-global-state=%d, only_migratable=%d, " + "send-configuration=%d, send-section-footer=%d\n", + ms->store_global_state, ms->only_migratable, + ms->send_configuration, ms->send_section_footer); +} + +static Property migration_properties[] = { + DEFINE_PROP_BOOL("store-global-state", MigrationState, + store_global_state, true), + DEFINE_PROP_BOOL("only-migratable", MigrationState, only_migratable, false), + DEFINE_PROP_BOOL("send-configuration", MigrationState, + send_configuration, true), + DEFINE_PROP_BOOL("send-section-footer", MigrationState, + send_section_footer, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void migration_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->user_creatable = false; + dc->props = migration_properties; +} + +static void migration_instance_init(Object *obj) +{ + MigrationState *ms = MIGRATION_OBJ(obj); + + ms->state = MIGRATION_STATUS_NONE; + ms->xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE; + ms->mbps = -1; + ms->parameters = (MigrationParameters) { + .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL, + .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, + .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, + .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL, + .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT, + .max_bandwidth = MAX_THROTTLE, + .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME, + .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY, + }; + ms->parameters.tls_creds = g_strdup(""); + ms->parameters.tls_hostname = g_strdup(""); +} + +static const TypeInfo migration_type = { + .name = TYPE_MIGRATION, + /* + * NOTE: "migration" itself is not really a device. We used + * TYPE_DEVICE here only to leverage some existing QDev features + * like "-global" properties, and HW_COMPAT_* fields (which are + * finally applied as global properties as well). If one day the + * global property feature can be migrated from QDev to QObject in + * general, then we can switch to QObject as well. + */ + .parent = TYPE_DEVICE, + .class_init = migration_class_init, + .class_size = sizeof(MigrationClass), + .instance_size = sizeof(MigrationState), + .instance_init = migration_instance_init, +}; + +static void register_migration_types(void) +{ + type_register_static(&migration_type); +} + +type_init(register_migration_types); diff --git a/migration/migration.h b/migration/migration.h index d9a268a3af..148c9facbc 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -19,6 +19,7 @@ #include "qapi-types.h" #include "exec/cpu-common.h" #include "qemu/coroutine_int.h" +#include "hw/qdev.h" /* State for the incoming migration */ struct MigrationIncomingState { @@ -62,8 +63,26 @@ struct MigrationIncomingState { MigrationIncomingState *migration_incoming_get_current(void); void migration_incoming_state_destroy(void); +#define TYPE_MIGRATION "migration" + +#define MIGRATION_CLASS(klass) \ + OBJECT_CLASS_CHECK(MigrationClass, (klass), TYPE_MIGRATION) +#define MIGRATION_OBJ(obj) \ + OBJECT_CHECK(MigrationState, (obj), TYPE_MIGRATION) +#define MIGRATION_GET_CLASS(obj) \ + OBJECT_GET_CLASS(MigrationClass, (obj), TYPE_MIGRATION) + +typedef struct MigrationClass { + /*< private >*/ + DeviceClass parent_class; +} MigrationClass; + struct MigrationState { + /*< private >*/ + DeviceState parent_obj; + + /*< public >*/ size_t bytes_xfer; size_t xfer_limit; QemuThread thread; @@ -114,6 +133,20 @@ struct MigrationState /* Do we have to clean up -b/-i from old migrate parameters */ /* This feature is deprecated and will be removed */ bool must_remove_block_options; + + /* + * Global switch on whether we need to store the global state + * during migration. + */ + bool store_global_state; + + /* Whether the VM is only allowing for migratable devices */ + bool only_migratable; + + /* Whether we send QEMU_VM_CONFIGURATION during migration */ + bool send_configuration; + /* Whether we send section footer during migration */ + bool send_section_footer; }; void migrate_set_state(int *state, int old_state, int new_state); @@ -144,6 +177,7 @@ bool migrate_colo_enabled(void); bool migrate_use_block(void); bool migrate_use_block_incremental(void); +bool migrate_use_return_path(void); bool migrate_use_compression(void); int migrate_compress_level(void); diff --git a/migration/savevm.c b/migration/savevm.c index c7a49c93c5..be3f885119 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -62,8 +62,6 @@ const unsigned int postcopy_ram_discard_version = 0; -static bool skip_section_footers; - /* Subcommands for QEMU_VM_COMMAND */ enum qemu_vm_cmd { MIG_CMD_INVALID = 0, /* Must be 0 */ @@ -287,7 +285,6 @@ typedef struct SaveStateEntry { typedef struct SaveState { QTAILQ_HEAD(, SaveStateEntry) handlers; int global_section_id; - bool skip_configuration; uint32_t len; const char *name; uint32_t target_page_bits; @@ -296,15 +293,8 @@ typedef struct SaveState { static SaveState savevm_state = { .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), .global_section_id = 0, - .skip_configuration = false, }; -void savevm_skip_configuration(void) -{ - savevm_state.skip_configuration = true; -} - - static void configuration_pre_save(void *opaque) { SaveState *state = opaque; @@ -769,11 +759,6 @@ static void vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc) vmstate_save_state(f, se->vmsd, se->opaque, vmdesc); } -void savevm_skip_section_footers(void) -{ - skip_section_footers = true; -} - /* * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL) */ @@ -801,7 +786,7 @@ static void save_section_header(QEMUFile *f, SaveStateEntry *se, */ static void save_section_footer(QEMUFile *f, SaveStateEntry *se) { - if (!skip_section_footers) { + if (migrate_get_current()->send_section_footer) { qemu_put_byte(f, QEMU_VM_SECTION_FOOTER); qemu_put_be32(f, se->section_id); } @@ -958,23 +943,16 @@ bool qemu_savevm_state_blocked(Error **errp) return false; } -static bool enforce_config_section(void) -{ - MachineState *machine = MACHINE(qdev_get_machine()); - return machine->enforce_config_section; -} - void qemu_savevm_state_header(QEMUFile *f) { trace_savevm_state_header(); qemu_put_be32(f, QEMU_VM_FILE_MAGIC); qemu_put_be32(f, QEMU_VM_FILE_VERSION); - if (!savevm_state.skip_configuration || enforce_config_section()) { + if (migrate_get_current()->send_configuration) { qemu_put_byte(f, QEMU_VM_CONFIGURATION); vmstate_save_state(f, &vmstate_configuration, &savevm_state, 0); } - } void qemu_savevm_state_begin(QEMUFile *f) @@ -1810,7 +1788,7 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) uint8_t read_mark; uint32_t read_section_id; - if (skip_section_footers) { + if (!migrate_get_current()->send_section_footer) { /* No footer to check */ return true; } @@ -1995,7 +1973,7 @@ int qemu_loadvm_state(QEMUFile *f) return -ENOTSUP; } - if (!savevm_state.skip_configuration || enforce_config_section()) { + if (migrate_get_current()->send_configuration) { if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { error_report("Configuration section missing"); return -EINVAL; @@ -2336,7 +2314,7 @@ void vmstate_register_ram_global(MemoryRegion *mr) bool vmstate_check_only_migratable(const VMStateDescription *vmsd) { /* check needed if --only-migratable is specified */ - if (!only_migratable) { + if (!migrate_get_current()->only_migratable) { return true; } diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c index 02f05a3359..c056c98bdb 100644 --- a/migration/vmstate-types.c +++ b/migration/vmstate-types.c @@ -126,6 +126,9 @@ static int get_int32_equal(QEMUFile *f, void *pv, size_t size, return 0; } error_report("%" PRIx32 " != %" PRIx32, *v, v2); + if (field->err_hint) { + error_printf("%s\n", field->err_hint); + } return -EINVAL; } @@ -267,6 +270,9 @@ static int get_uint32_equal(QEMUFile *f, void *pv, size_t size, return 0; } error_report("%" PRIx32 " != %" PRIx32, *v, v2); + if (field->err_hint) { + error_printf("%s\n", field->err_hint); + } return -EINVAL; } @@ -341,6 +347,9 @@ static int get_uint64_equal(QEMUFile *f, void *pv, size_t size, return 0; } error_report("%" PRIx64 " != %" PRIx64, *v, v2); + if (field->err_hint) { + error_printf("%s\n", field->err_hint); + } return -EINVAL; } @@ -364,6 +373,9 @@ static int get_uint8_equal(QEMUFile *f, void *pv, size_t size, return 0; } error_report("%x != %x", *v, v2); + if (field->err_hint) { + error_printf("%s\n", field->err_hint); + } return -EINVAL; } @@ -387,6 +399,9 @@ static int get_uint16_equal(QEMUFile *f, void *pv, size_t size, return 0; } error_report("%x != %x", *v, v2); + if (field->err_hint) { + error_printf("%s\n", field->err_hint); + } return -EINVAL; } diff --git a/monitor.c b/monitor.c index 3c369f4dd5..4031876411 100644 --- a/monitor.c +++ b/monitor.c @@ -1078,13 +1078,24 @@ int monitor_get_cpu_index(void) static void hmp_info_registers(Monitor *mon, const QDict *qdict) { - CPUState *cs = mon_get_cpu(); + bool all_cpus = qdict_get_try_bool(qdict, "cpustate_all", false); + CPUState *cs; - if (!cs) { - monitor_printf(mon, "No CPU available\n"); - return; + if (all_cpus) { + CPU_FOREACH(cs) { + monitor_printf(mon, "\nCPU#%d\n", cs->cpu_index); + cpu_dump_state(cs, (FILE *)mon, monitor_fprintf, CPU_DUMP_FPU); + } + } else { + cs = mon_get_cpu(); + + if (!cs) { + monitor_printf(mon, "No CPU available\n"); + return; + } + + cpu_dump_state(cs, (FILE *)mon, monitor_fprintf, CPU_DUMP_FPU); } - cpu_dump_state(cs, (FILE *)mon, monitor_fprintf, CPU_DUMP_FPU); } static void hmp_info_jit(Monitor *mon, const QDict *qdict) diff --git a/qapi-schema.json b/qapi-schema.json index 4b50b652d3..37c4b95aad 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -900,12 +900,15 @@ # offers more flexibility. # (Since 2.10) # +# @return-path: If enabled, migration will use the return path even +# for precopy. (since 2.10) +# # Since: 1.2 ## { 'enum': 'MigrationCapability', 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram', - 'block' ] } + 'block', 'return-path' ] } ## # @MigrationCapabilityStatus: @@ -5114,6 +5117,26 @@ { 'command': 'chardev-remove', 'data': {'id': 'str'} } ## +# @chardev-send-break: +# +# Send a break to a character device +# +# @id: the chardev's ID, must exist +# +# Returns: Nothing on success +# +# Since: 2.10 +# +# Example: +# +# -> { "execute": "chardev-send-break", "arguments": { "id" : "foo" } } +# <- { "return": {} } +# +## +{ 'command': 'chardev-send-break', 'data': {'id': 'str'} } + + +## # @TpmModel: # # An enumeration of TPM models diff --git a/qapi/string-input-visitor.c b/qapi/string-input-visitor.c index c089491c24..63ae115b2a 100644 --- a/qapi/string-input-visitor.c +++ b/qapi/string-input-visitor.c @@ -326,6 +326,16 @@ static void parse_type_number(Visitor *v, const char *name, double *obj, *obj = val; } +static void parse_type_null(Visitor *v, const char *name, Error **errp) +{ + StringInputVisitor *siv = to_siv(v); + + if (!siv->string || siv->string[0]) { + error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "null"); + } +} + static void string_input_free(Visitor *v) { StringInputVisitor *siv = to_siv(v); @@ -349,6 +359,7 @@ Visitor *string_input_visitor_new(const char *str) v->visitor.type_bool = parse_type_bool; v->visitor.type_str = parse_type_str; v->visitor.type_number = parse_type_number; + v->visitor.type_null = parse_type_null; v->visitor.start_list = start_list; v->visitor.next_list = next_list; v->visitor.check_list = check_list; diff --git a/qapi/string-output-visitor.c b/qapi/string-output-visitor.c index 53c2175d81..af649e1d6e 100644 --- a/qapi/string-output-visitor.c +++ b/qapi/string-output-visitor.c @@ -256,6 +256,19 @@ static void print_type_number(Visitor *v, const char *name, double *obj, string_output_set(sov, g_strdup_printf("%f", *obj)); } +static void print_type_null(Visitor *v, const char *name, Error **errp) +{ + StringOutputVisitor *sov = to_sov(v); + char *out; + + if (sov->human) { + out = g_strdup("<null>"); + } else { + out = g_strdup(""); + } + string_output_set(sov, out); +} + static void start_list(Visitor *v, const char *name, GenericList **list, size_t size, Error **errp) @@ -341,6 +354,7 @@ Visitor *string_output_visitor_new(bool human, char **result) v->visitor.type_bool = print_type_bool; v->visitor.type_str = print_type_str; v->visitor.type_number = print_type_number; + v->visitor.type_null = print_type_null; v->visitor.start_list = start_list; v->visitor.next_list = next_list; v->visitor.end_list = end_list; diff --git a/qemu-options.hx b/qemu-options.hx index 896ff177c3..297bd8aca4 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -1010,7 +1010,7 @@ ETEXI DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev, "-fsdev fsdriver,id=id[,path=path,][security_model={mapped-xattr|mapped-file|passthrough|none}]\n" - " [,writeout=immediate][,readonly][,socket=socket|sock_fd=sock_fd]\n" + " [,writeout=immediate][,readonly][,socket=socket|sock_fd=sock_fd][,fmode=fmode][,dmode=dmode]\n" " [[,throttling.bps-total=b]|[[,throttling.bps-read=r][,throttling.bps-write=w]]]\n" " [[,throttling.iops-total=i]|[[,throttling.iops-read=r][,throttling.iops-write=w]]]\n" " [[,throttling.bps-total-max=bm]|[[,throttling.bps-read-max=rm][,throttling.bps-write-max=wm]]]\n" @@ -1020,7 +1020,7 @@ DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev, STEXI -@item -fsdev @var{fsdriver},id=@var{id},path=@var{path},[security_model=@var{security_model}][,writeout=@var{writeout}][,readonly][,socket=@var{socket}|sock_fd=@var{sock_fd}] +@item -fsdev @var{fsdriver},id=@var{id},path=@var{path},[security_model=@var{security_model}][,writeout=@var{writeout}][,readonly][,socket=@var{socket}|sock_fd=@var{sock_fd}][,fmode=@var{fmode}][,dmode=@var{dmode}] @findex -fsdev Define a new file system device. Valid options are: @table @option @@ -1061,6 +1061,12 @@ with virtfs-proxy-helper Enables proxy filesystem driver to use passed socket descriptor for communicating with virtfs-proxy-helper. Usually a helper like libvirt will create socketpair and pass one of the fds as sock_fd +@item fmode=@var{fmode} +Specifies the default mode for newly created files on the host. Works only +with security models "mapped-xattr" and "mapped-file". +@item dmode=@var{dmode} +Specifies the default mode for newly created directories on the host. Works +only with security models "mapped-xattr" and "mapped-file". @end table -fsdev option is used along with -device driver "virtio-9p-pci". @@ -1077,12 +1083,12 @@ ETEXI DEF("virtfs", HAS_ARG, QEMU_OPTION_virtfs, "-virtfs local,path=path,mount_tag=tag,security_model=[mapped-xattr|mapped-file|passthrough|none]\n" - " [,id=id][,writeout=immediate][,readonly][,socket=socket|sock_fd=sock_fd]\n", + " [,id=id][,writeout=immediate][,readonly][,socket=socket|sock_fd=sock_fd][,fmode=fmode][,dmode=dmode]\n", QEMU_ARCH_ALL) STEXI -@item -virtfs @var{fsdriver}[,path=@var{path}],mount_tag=@var{mount_tag}[,security_model=@var{security_model}][,writeout=@var{writeout}][,readonly][,socket=@var{socket}|sock_fd=@var{sock_fd}] +@item -virtfs @var{fsdriver}[,path=@var{path}],mount_tag=@var{mount_tag}[,security_model=@var{security_model}][,writeout=@var{writeout}][,readonly][,socket=@var{socket}|sock_fd=@var{sock_fd}][,fmode=@var{fmode}][,dmode=@var{dmode}] @findex -virtfs The general form of a Virtual File system pass-through options are: @@ -1124,6 +1130,12 @@ will create socketpair and pass one of the fds as sock_fd @item sock_fd Enables proxy filesystem driver to use passed 'sock_fd' as the socket descriptor for interfacing with virtfs-proxy-helper +@item fmode=@var{fmode} +Specifies the default mode for newly created files on the host. Works only +with security models "mapped-xattr" and "mapped-file". +@item dmode=@var{dmode} +Specifies the default mode for newly created directories on the host. Works +only with security models "mapped-xattr" and "mapped-file". @end table ETEXI diff --git a/target/ppc/compat.c b/target/ppc/compat.c index e8ec1e19e7..f1b67faa97 100644 --- a/target/ppc/compat.c +++ b/target/ppc/compat.c @@ -24,9 +24,11 @@ #include "sysemu/cpus.h" #include "qemu/error-report.h" #include "qapi/error.h" +#include "qapi/visitor.h" #include "cpu-models.h" typedef struct { + const char *name; uint32_t pvr; uint64_t pcr; uint64_t pcr_level; @@ -38,6 +40,7 @@ static const CompatInfo compat_table[] = { * Ordered from oldest to newest - the code relies on this */ { /* POWER6, ISA2.05 */ + .name = "power6", .pvr = CPU_POWERPC_LOGICAL_2_05, .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05 | PCR_TM_DIS | PCR_VSX_DIS, @@ -45,24 +48,28 @@ static const CompatInfo compat_table[] = { .max_threads = 2, }, { /* POWER7, ISA2.06 */ + .name = "power7", .pvr = CPU_POWERPC_LOGICAL_2_06, .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, .pcr_level = PCR_COMPAT_2_06, .max_threads = 4, }, { + .name = "power7+", .pvr = CPU_POWERPC_LOGICAL_2_06_PLUS, .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_TM_DIS, .pcr_level = PCR_COMPAT_2_06, .max_threads = 4, }, { /* POWER8, ISA2.07 */ + .name = "power8", .pvr = CPU_POWERPC_LOGICAL_2_07, .pcr = PCR_COMPAT_3_00 | PCR_COMPAT_2_07, .pcr_level = PCR_COMPAT_2_07, .max_threads = 8, }, { /* POWER9, ISA3.00 */ + .name = "power9", .pvr = CPU_POWERPC_LOGICAL_3_00, .pcr = PCR_COMPAT_3_00, .pcr_level = PCR_COMPAT_3_00, @@ -189,3 +196,98 @@ int ppc_compat_max_threads(PowerPCCPU *cpu) return n_threads; } + +static void ppc_compat_prop_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint32_t compat_pvr = *((uint32_t *)opaque); + const char *value; + + if (!compat_pvr) { + value = ""; + } else { + const CompatInfo *compat = compat_by_pvr(compat_pvr); + + g_assert(compat); + + value = compat->name; + } + + visit_type_str(v, name, (char **)&value, errp); +} + +static void ppc_compat_prop_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Error *local_err = NULL; + char *value; + uint32_t compat_pvr; + + visit_type_str(v, name, &value, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (strcmp(value, "") == 0) { + compat_pvr = 0; + } else { + int i; + const CompatInfo *compat = NULL; + + for (i = 0; i < ARRAY_SIZE(compat_table); i++) { + if (strcmp(value, compat_table[i].name) == 0) { + compat = &compat_table[i]; + break; + + } + } + + if (!compat) { + error_setg(errp, "Invalid compatibility mode \"%s\"", value); + goto out; + } + compat_pvr = compat->pvr; + } + + *((uint32_t *)opaque) = compat_pvr; + +out: + g_free(value); +} + +void ppc_compat_add_property(Object *obj, const char *name, + uint32_t *compat_pvr, const char *basedesc, + Error **errp) +{ + Error *local_err = NULL; + gchar *namesv[ARRAY_SIZE(compat_table) + 1]; + gchar *names, *desc; + int i; + + object_property_add(obj, name, "string", + ppc_compat_prop_get, ppc_compat_prop_set, NULL, + compat_pvr, &local_err); + if (local_err) { + goto out; + } + + for (i = 0; i < ARRAY_SIZE(compat_table); i++) { + /* + * Have to discard const here, because g_strjoinv() takes + * (gchar **), not (const gchar **) :( + */ + namesv[i] = (gchar *)compat_table[i].name; + } + namesv[ARRAY_SIZE(compat_table)] = NULL; + + names = g_strjoinv(", ", namesv); + desc = g_strdup_printf("%s. Valid values are %s.", basedesc, names); + object_property_set_description(obj, name, desc, &local_err); + + g_free(names); + g_free(desc); + +out: + error_propagate(errp, local_err); +} diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index d10808d9f4..6ee2a26a96 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1189,7 +1189,6 @@ typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass; * PowerPCCPU: * @env: #CPUPPCState * @cpu_dt_id: CPU index used in the device tree. KVM uses this index too - * @max_compat: Maximal supported logical PVR from the command line * @compat_pvr: Current logical PVR, zero if in "raw" mode * * A PowerPC CPU. @@ -1201,7 +1200,6 @@ struct PowerPCCPU { CPUPPCState env; int cpu_dt_id; - uint32_t max_compat; uint32_t compat_pvr; PPCVirtualHypervisor *vhyp; Object *intc; @@ -1213,6 +1211,7 @@ struct PowerPCCPU { uint64_t mig_insns_flags; uint64_t mig_insns_flags2; uint32_t mig_nb_BATs; + bool pre_2_10_migration; }; static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env) @@ -1375,6 +1374,9 @@ void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr, Error **errp); void ppc_set_compat_all(uint32_t compat_pvr, Error **errp); #endif int ppc_compat_max_threads(PowerPCCPU *cpu); +void ppc_compat_add_property(Object *obj, const char *name, + uint32_t *compat_pvr, const char *basedesc, + Error **errp); #endif /* defined(TARGET_PPC64) */ #include "exec/cpu-all.h" diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 9cb2123187..3a9f0861e7 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -17,6 +17,7 @@ * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" +#include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" #include "exec/exec-all.h" @@ -1132,6 +1133,7 @@ void helper_msgsnd(target_ulong rb) return; } + qemu_mutex_lock_iothread(); CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *cenv = &cpu->env; @@ -1141,5 +1143,6 @@ void helper_msgsnd(target_ulong rb) cpu_interrupt(cs, CPU_INTERRUPT_HARD); } } + qemu_mutex_unlock_iothread(); } #endif diff --git a/target/ppc/machine.c b/target/ppc/machine.c index 6cb3a48db1..f578156dd4 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -8,6 +8,7 @@ #include "helper_regs.h" #include "mmu-hash64.h" #include "migration/cpu.h" +#include "qapi/error.h" static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) { @@ -195,6 +196,27 @@ static void cpu_pre_save(void *opaque) } } +/* + * Determine if a given PVR is a "close enough" match to the CPU + * object. For TCG and KVM PR it would probably be sufficient to + * require an exact PVR match. However for KVM HV the user is + * restricted to a PVR exactly matching the host CPU. The correct way + * to handle this is to put the guest into an architected + * compatibility mode. However, to allow a more forgiving transition + * and migration from before this was widely done, we allow migration + * between sufficiently similar PVRs, as determined by the CPU class's + * pvr_match() hook. + */ +static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) +{ + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); + + if (pvr == pcc->pvr) { + return true; + } + return pcc->pvr_match(pcc, pvr); +} + static int cpu_post_load(void *opaque, int version_id) { PowerPCCPU *cpu = opaque; @@ -203,10 +225,31 @@ static int cpu_post_load(void *opaque, int version_id) target_ulong msr; /* - * We always ignore the source PVR. The user or management - * software has to take care of running QEMU in a compatible mode. + * If we're operating in compat mode, we should be ok as long as + * the destination supports the same compatiblity mode. + * + * Otherwise, however, we require that the destination has exactly + * the same CPU model as the source. */ - env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; + +#if defined(TARGET_PPC64) + if (cpu->compat_pvr) { + Error *local_err = NULL; + + ppc_set_compat(cpu, cpu->compat_pvr, &local_err); + if (local_err) { + error_report_err(local_err); + error_free(local_err); + return -1; + } + } else +#endif + { + if (!pvr_match(cpu, env->spr[SPR_PVR])) { + return -1; + } + } + env->lr = env->spr[SPR_LR]; env->ctr = env->spr[SPR_CTR]; cpu_write_xer(env, env->spr[SPR_XER]); @@ -419,7 +462,7 @@ static const VMStateDescription vmstate_slb = { .needed = slb_needed, .post_load = slb_post_load, .fields = (VMStateField[]) { - VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU), + VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU, NULL), VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), VMSTATE_END_OF_LIST() } @@ -452,7 +495,7 @@ static const VMStateDescription vmstate_tlb6xx = { .minimum_version_id = 1, .needed = tlb6xx_needed, .fields = (VMStateField[]) { - VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU), + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, env.nb_tlb, vmstate_tlb6xx_entry, @@ -510,7 +553,7 @@ static const VMStateDescription vmstate_tlbemb = { .minimum_version_id = 1, .needed = tlbemb_needed, .fields = (VMStateField[]) { - VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU), + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, env.nb_tlb, vmstate_tlbemb_entry, @@ -551,7 +594,7 @@ static const VMStateDescription vmstate_tlbmas = { .minimum_version_id = 1, .needed = tlbmas_needed, .fields = (VMStateField[]) { - VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU), + VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, env.nb_tlb, vmstate_tlbmas_entry, @@ -560,6 +603,25 @@ static const VMStateDescription vmstate_tlbmas = { } }; +static bool compat_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + assert(!(cpu->compat_pvr && !cpu->vhyp)); + return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; +} + +static const VMStateDescription vmstate_compat = { + .name = "cpu/compat", + .version_id = 1, + .minimum_version_id = 1, + .needed = compat_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(compat_pvr, PowerPCCPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_ppc_cpu = { .name = "cpu", .version_id = 5, @@ -613,6 +675,7 @@ const VMStateDescription vmstate_ppc_cpu = { &vmstate_tlb6xx, &vmstate_tlbemb, &vmstate_tlbmas, + &vmstate_compat, NULL } }; diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index de18c0b69e..69fde65276 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -255,5 +255,5 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1UL << page_size); - return 1; + return 0; } diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index 56a0ab22cf..783bf98217 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -33,6 +33,7 @@ #include "hw/qdev-properties.h" #include "hw/ppc/ppc.h" #include "mmu-book3s-v3.h" +#include "sysemu/qtest.h" //#define PPC_DUMP_CPU //#define PPC_DEBUG_SPR @@ -8413,73 +8414,38 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data) pcc->l1_icache_size = 0x10000; } -static void powerpc_get_compat(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) -{ - char *value = (char *)""; - Property *prop = opaque; - uint32_t *max_compat = qdev_get_prop_ptr(DEVICE(obj), prop); - - switch (*max_compat) { - case CPU_POWERPC_LOGICAL_2_05: - value = (char *)"power6"; - break; - case CPU_POWERPC_LOGICAL_2_06: - value = (char *)"power7"; - break; - case CPU_POWERPC_LOGICAL_2_07: - value = (char *)"power8"; - break; - case 0: - break; - default: - error_report("Internal error: compat is set to %x", *max_compat); - abort(); - break; - } - - visit_type_str(v, name, &value, errp); -} - -static void powerpc_set_compat(Object *obj, Visitor *v, const char *name, - void *opaque, Error **errp) +/* + * The CPU used to have a "compat" property which set the + * compatibility mode PVR. However, this was conceptually broken - it + * only makes sense on the pseries machine type (otherwise the guest + * owns the PCR and can control the compatibility mode itself). It's + * been replaced with the 'max-cpu-compat' property on the pseries + * machine type. For backwards compatibility, pseries specially + * parses the -cpu parameter and converts old compat= parameters into + * the appropriate machine parameters. This stub implementation of + * the parameter catches any uses on explicitly created CPUs. + */ +static void getset_compat_deprecated(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) { - Error *error = NULL; - char *value = NULL; - Property *prop = opaque; - uint32_t *max_compat = qdev_get_prop_ptr(DEVICE(obj), prop); - - visit_type_str(v, name, &value, &error); - if (error) { - error_propagate(errp, error); - return; + if (!qtest_enabled()) { + error_report("CPU 'compat' property is deprecated and has no effect; " + "use max-cpu-compat machine property instead"); } - - if (strcmp(value, "power6") == 0) { - *max_compat = CPU_POWERPC_LOGICAL_2_05; - } else if (strcmp(value, "power7") == 0) { - *max_compat = CPU_POWERPC_LOGICAL_2_06; - } else if (strcmp(value, "power8") == 0) { - *max_compat = CPU_POWERPC_LOGICAL_2_07; - } else { - error_setg(errp, "Invalid compatibility mode \"%s\"", value); - } - - g_free(value); + visit_type_null(v, name, NULL); } -static PropertyInfo powerpc_compat_propinfo = { +static PropertyInfo ppc_compat_deprecated_propinfo = { .name = "str", - .description = "compatibility mode, power6/power7/power8", - .get = powerpc_get_compat, - .set = powerpc_set_compat, + .description = "compatibility mode (deprecated)", + .get = getset_compat_deprecated, + .set = getset_compat_deprecated, }; - -#define DEFINE_PROP_POWERPC_COMPAT(_n, _s, _f) \ - DEFINE_PROP(_n, _s, _f, powerpc_compat_propinfo, uint32_t) - static Property powerpc_servercpu_properties[] = { - DEFINE_PROP_POWERPC_COMPAT("compat", PowerPCCPU, max_compat), + { + .name = "compat", + .info = &ppc_compat_deprecated_propinfo, + }, DEFINE_PROP_END_OF_LIST(), }; @@ -9859,14 +9825,14 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp) error_append_hint(errp, "Adjust the number of cpus to %d " "or try to raise the number of threads per core\n", cpu->cpu_dt_id * smp_threads / max_smt); - return; + goto unrealize; } #endif if (tcg_enabled()) { if (ppc_fixup_cpu(cpu) != 0) { error_setg(errp, "Unable to emulate selected CPU with TCG"); - return; + goto unrealize; } } @@ -9875,14 +9841,14 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp) error_setg(errp, "CPU does not possess a BookE or 4xx MMU. " "Please use qemu-system-ppc or qemu-system-ppc64 instead " "or choose another CPU model."); - return; + goto unrealize; } #endif create_ppc_opcodes(cpu, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); - return; + goto unrealize; } init_ppc_proc(cpu); @@ -10067,6 +10033,10 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp) fflush(stdout); } #endif + return; + +unrealize: + cpu_exec_unrealizefn(cs); } static void ppc_cpu_unrealizefn(DeviceState *dev, Error **errp) @@ -10640,6 +10610,8 @@ static gchar *ppc_gdb_arch_name(CPUState *cs) static Property ppc_cpu_properties[] = { DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false), + DEFINE_PROP_BOOL("pre-2.10-migration", PowerPCCPU, pre_2_10_migration, + false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/tests/test-char.c b/tests/test-char.c index 9e361c8d09..87c724c5c2 100644 --- a/tests/test-char.c +++ b/tests/test-char.c @@ -53,7 +53,9 @@ static void fe_event(void *opaque, int event) FeHandler *h = opaque; h->last_event = event; - quit = true; + if (event != CHR_EVENT_BREAK) { + quit = true; + } } #ifdef CONFIG_HAS_GLIB_SUBPROCESS_TESTS @@ -517,7 +519,7 @@ static void char_file_test(void) file.in = fifo; file.has_in = true; - chr = qemu_chardev_new(NULL, TYPE_CHARDEV_FILE, &backend, + chr = qemu_chardev_new("label-file", TYPE_CHARDEV_FILE, &backend, &error_abort); qemu_chr_fe_init(&be, chr, &error_abort); @@ -527,6 +529,12 @@ static void char_file_test(void) fe_event, &fe, NULL, true); + g_assert_cmpint(fe.last_event, !=, CHR_EVENT_BREAK); + qmp_chardev_send_break("label-foo", NULL); + g_assert_cmpint(fe.last_event, !=, CHR_EVENT_BREAK); + qmp_chardev_send_break("label-file", NULL); + g_assert_cmpint(fe.last_event, ==, CHR_EVENT_BREAK); + main_loop(); close(fd); diff --git a/tests/test-hmp.c b/tests/test-hmp.c index 99e35ec15a..6dfa0c36e2 100644 --- a/tests/test-hmp.c +++ b/tests/test-hmp.c @@ -22,6 +22,7 @@ static int verbose; static const char *hmp_cmds[] = { "boot_set ndc", "chardev-add null,id=testchardev1", + "chardev-send-break testchardev2", "chardev-remove testchardev1", "commit all", "cpu-add 1", diff --git a/ui/cocoa.m b/ui/cocoa.m index 9e81285d86..93e56d0518 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -571,7 +571,7 @@ QemuCocoaView *cocoaView; // bitmask. if (qemu_console_is_graphic(NULL)) { - NSEventModifierFlags modifiers = [event modifierFlags]; + NSUInteger modifiers = [event modifierFlags]; if (!!(modifiers & NSEventModifierFlagCapsLock) != !!modifiers_state[Q_KEY_CODE_CAPS_LOCK]) { [self toggleStatefulModifier:Q_KEY_CODE_CAPS_LOCK]; diff --git a/vl.c b/vl.c index 59fea15488..36ff3f4345 100644 --- a/vl.c +++ b/vl.c @@ -188,7 +188,6 @@ bool boot_strict; uint8_t *boot_splash_filedata; size_t boot_splash_filedata_size; uint8_t qemu_extra_params_fw[2]; -int only_migratable; /* turn it off unless user states otherwise */ int icount_align_option; @@ -2969,6 +2968,25 @@ static int qemu_read_default_config_file(void) return 0; } +static void user_register_global_props(void) +{ + qemu_opts_foreach(qemu_find_opts("global"), + global_init_func, NULL, NULL); +} + +/* + * Note: we should see that these properties are actually having a + * priority: accel < machine < user. This means e.g. when user + * specifies something in "-global", it'll always be used with highest + * priority than either machine/accelerator compat properties. + */ +static void register_global_properties(MachineState *ms) +{ + accel_register_compat_props(ms->accelerator); + machine_register_compat_props(ms); + user_register_global_props(); +} + int main(int argc, char **argv, char **envp) { int i; @@ -3934,7 +3952,13 @@ int main(int argc, char **argv, char **envp) incoming = optarg; break; case QEMU_OPTION_only_migratable: - only_migratable = 1; + /* + * TODO: we can remove this option one day, and we + * should all use: + * + * "-global migration.only-migratable=true" + */ + migration_only_migratable_set(); break; case QEMU_OPTION_nodefaults: has_defaults = 0; @@ -4571,10 +4595,17 @@ int main(int argc, char **argv, char **envp) exit (i == 1 ? 1 : 0); } - machine_register_compat_props(current_machine); + /* + * Register all the global properties, including accel properties, + * machine properties, and user-specified ones. + */ + register_global_properties(current_machine); - qemu_opts_foreach(qemu_find_opts("global"), - global_init_func, NULL, NULL); + /* + * Migration object can only be created after global properties + * are applied correctly. + */ + migration_object_init(); /* This checkpoint is required by replay to separate prior clock reading from the other reads, because timer polling functions query |