summary refs log tree commit diff stats
path: root/hw/i386/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'hw/i386/kvm')
-rw-r--r--hw/i386/kvm/clock.c6
-rw-r--r--hw/i386/kvm/xen_evtchn.c34
-rw-r--r--hw/i386/kvm/xen_gnttab.c4
-rw-r--r--hw/i386/kvm/xen_overlay.c4
-rw-r--r--hw/i386/kvm/xen_xenstore.c4
5 files changed, 26 insertions, 26 deletions
diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c
index e756b0aa43..40aa9a32c3 100644
--- a/hw/i386/kvm/clock.c
+++ b/hw/i386/kvm/clock.c
@@ -245,7 +245,7 @@ static const VMStateDescription kvmclock_reliable_get_clock = {
     .version_id = 1,
     .minimum_version_id = 1,
     .needed = kvmclock_clock_is_reliable_needed,
-    .fields = (VMStateField[]) {
+    .fields = (const VMStateField[]) {
         VMSTATE_BOOL(clock_is_reliable, KVMClockState),
         VMSTATE_END_OF_LIST()
     }
@@ -295,11 +295,11 @@ static const VMStateDescription kvmclock_vmsd = {
     .minimum_version_id = 1,
     .pre_load = kvmclock_pre_load,
     .pre_save = kvmclock_pre_save,
-    .fields = (VMStateField[]) {
+    .fields = (const VMStateField[]) {
         VMSTATE_UINT64(clock, KVMClockState),
         VMSTATE_END_OF_LIST()
     },
-    .subsections = (const VMStateDescription * []) {
+    .subsections = (const VMStateDescription * const []) {
         &kvmclock_reliable_get_clock,
         NULL
     }
diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c
index 02b8cbf8df..0171ef6d59 100644
--- a/hw/i386/kvm/xen_evtchn.c
+++ b/hw/i386/kvm/xen_evtchn.c
@@ -240,7 +240,7 @@ static const VMStateDescription xen_evtchn_port_vmstate = {
     .name = "xen_evtchn_port",
     .version_id = 1,
     .minimum_version_id = 1,
-    .fields = (VMStateField[]) {
+    .fields = (const VMStateField[]) {
         VMSTATE_UINT32(vcpu, XenEvtchnPort),
         VMSTATE_UINT16(type, XenEvtchnPort),
         VMSTATE_UINT16(u.val, XenEvtchnPort),
@@ -255,7 +255,7 @@ static const VMStateDescription xen_evtchn_vmstate = {
     .needed = xen_evtchn_is_needed,
     .pre_load = xen_evtchn_pre_load,
     .post_load = xen_evtchn_post_load,
-    .fields = (VMStateField[]) {
+    .fields = (const VMStateField[]) {
         VMSTATE_UINT64(callback_param, XenEvtchnState),
         VMSTATE_UINT32(nr_ports, XenEvtchnState),
         VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1,
@@ -425,7 +425,7 @@ void xen_evtchn_set_callback_level(int level)
      * effect immediately. That just leaves interdomain loopback as the case
      * which uses the BH.
      */
-    if (!qemu_mutex_iothread_locked()) {
+    if (!bql_locked()) {
         qemu_bh_schedule(s->gsi_bh);
         return;
     }
@@ -459,7 +459,7 @@ int xen_evtchn_set_callback_param(uint64_t param)
      * We need the BQL because set_callback_pci_intx() may call into PCI code,
      * and because we may need to manipulate the old and new GSI levels.
      */
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
     qemu_mutex_lock(&s->port_lock);
 
     switch (type) {
@@ -1037,7 +1037,7 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port,
     XenEvtchnPort *p = &s->port_table[port];
 
     /* Because it *might* be a PIRQ port */
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
 
     switch (p->type) {
     case EVTCHNSTAT_closed:
@@ -1104,7 +1104,7 @@ int xen_evtchn_soft_reset(void)
         return -ENOTSUP;
     }
 
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
 
     qemu_mutex_lock(&s->port_lock);
 
@@ -1127,7 +1127,7 @@ int xen_evtchn_reset_op(struct evtchn_reset *reset)
         return -ESRCH;
     }
 
-    QEMU_IOTHREAD_LOCK_GUARD();
+    BQL_LOCK_GUARD();
     return xen_evtchn_soft_reset();
 }
 
@@ -1145,7 +1145,7 @@ int xen_evtchn_close_op(struct evtchn_close *close)
         return -EINVAL;
     }
 
-    QEMU_IOTHREAD_LOCK_GUARD();
+    BQL_LOCK_GUARD();
     qemu_mutex_lock(&s->port_lock);
 
     ret = close_port(s, close->port, &flush_kvm_routes);
@@ -1272,7 +1272,7 @@ int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
         return -EINVAL;
     }
 
-    QEMU_IOTHREAD_LOCK_GUARD();
+    BQL_LOCK_GUARD();
 
     if (s->pirq[pirq->pirq].port) {
         return -EBUSY;
@@ -1601,7 +1601,7 @@ bool xen_evtchn_set_gsi(int gsi, int level)
     XenEvtchnState *s = xen_evtchn_singleton;
     int pirq;
 
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
 
     if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
         return false;
@@ -1712,7 +1712,7 @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
         return;
     }
 
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
 
     pirq = msi_pirq_target(addr, data);
 
@@ -1749,7 +1749,7 @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
         return 1; /* Not a PIRQ */
     }
 
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
 
     pirq = msi_pirq_target(address, data);
     if (!pirq || pirq >= s->nr_pirqs) {
@@ -1796,7 +1796,7 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
         return false;
     }
 
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
 
     pirq = msi_pirq_target(address, data);
     if (!pirq || pirq >= s->nr_pirqs) {
@@ -1824,7 +1824,7 @@ int xen_physdev_map_pirq(struct physdev_map_pirq *map)
         return -ENOTSUP;
     }
 
-    QEMU_IOTHREAD_LOCK_GUARD();
+    BQL_LOCK_GUARD();
     QEMU_LOCK_GUARD(&s->port_lock);
 
     if (map->domid != DOMID_SELF && map->domid != xen_domid) {
@@ -1884,7 +1884,7 @@ int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
         return -EINVAL;
     }
 
-    QEMU_IOTHREAD_LOCK_GUARD();
+    BQL_LOCK_GUARD();
     qemu_mutex_lock(&s->port_lock);
 
     if (!pirq_inuse(s, pirq)) {
@@ -1924,7 +1924,7 @@ int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
         return -ENOTSUP;
     }
 
-    QEMU_IOTHREAD_LOCK_GUARD();
+    BQL_LOCK_GUARD();
     QEMU_LOCK_GUARD(&s->port_lock);
 
     if (!pirq_inuse(s, pirq)) {
@@ -1956,7 +1956,7 @@ int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
         return -ENOTSUP;
     }
 
-    QEMU_IOTHREAD_LOCK_GUARD();
+    BQL_LOCK_GUARD();
     QEMU_LOCK_GUARD(&s->port_lock);
 
     if (!pirq_inuse(s, pirq)) {
diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c
index 0a24f53f20..245e4b15db 100644
--- a/hw/i386/kvm/xen_gnttab.c
+++ b/hw/i386/kvm/xen_gnttab.c
@@ -127,7 +127,7 @@ static const VMStateDescription xen_gnttab_vmstate = {
     .minimum_version_id = 1,
     .needed = xen_gnttab_is_needed,
     .post_load = xen_gnttab_post_load,
-    .fields = (VMStateField[]) {
+    .fields = (const VMStateField[]) {
         VMSTATE_UINT32(nr_frames, XenGnttabState),
         VMSTATE_VARRAY_UINT32(gnt_frame_gpas, XenGnttabState, nr_frames, 0,
                               vmstate_info_uint64, uint64_t),
@@ -176,7 +176,7 @@ int xen_gnttab_map_page(uint64_t idx, uint64_t gfn)
         return -EINVAL;
     }
 
-    QEMU_IOTHREAD_LOCK_GUARD();
+    BQL_LOCK_GUARD();
     QEMU_LOCK_GUARD(&s->gnt_lock);
 
     xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa);
diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c
index 39fda1b72c..c68e78ac5c 100644
--- a/hw/i386/kvm/xen_overlay.c
+++ b/hw/i386/kvm/xen_overlay.c
@@ -139,7 +139,7 @@ static const VMStateDescription xen_overlay_vmstate = {
     .needed = xen_overlay_is_needed,
     .pre_save = xen_overlay_pre_save,
     .post_load = xen_overlay_post_load,
-    .fields = (VMStateField[]) {
+    .fields = (const VMStateField[]) {
         VMSTATE_UINT64(shinfo_gpa, XenOverlayState),
         VMSTATE_BOOL(long_mode, XenOverlayState),
         VMSTATE_END_OF_LIST()
@@ -194,7 +194,7 @@ int xen_overlay_map_shinfo_page(uint64_t gpa)
         return -ENOENT;
     }
 
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
 
     if (s->shinfo_gpa) {
         /* If removing shinfo page, turn the kernel magic off first */
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index 6e651960b3..1a9bc342b8 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -243,7 +243,7 @@ static const VMStateDescription xen_xenstore_vmstate = {
     .needed = xen_xenstore_is_needed,
     .pre_save = xen_xenstore_pre_save,
     .post_load = xen_xenstore_post_load,
-    .fields = (VMStateField[]) {
+    .fields = (const VMStateField[]) {
         VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState,
                             sizeof_field(XenXenstoreState, req_data)),
         VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState,
@@ -1341,7 +1341,7 @@ static void fire_watch_cb(void *opaque, const char *path, const char *token)
 {
     XenXenstoreState *s = opaque;
 
-    assert(qemu_mutex_iothread_locked());
+    assert(bql_locked());
 
     /*
      * If there's a response pending, we obviously can't scribble over