diff options
| -rw-r--r-- | accel/kvm/kvm-all.c | 2 | ||||
| -rw-r--r-- | accel/tcg/cpu-exec.c | 1 | ||||
| -rw-r--r-- | accel/tcg/tcg-accel-ops-rr.c | 9 | ||||
| -rw-r--r-- | accel/tcg/tcg-accel-ops.c | 2 | ||||
| -rw-r--r-- | accel/tcg/user-exec.c | 1 | ||||
| -rw-r--r-- | system/cpus.c | 1 | ||||
| -rw-r--r-- | target/i386/nvmm/nvmm-all.c | 2 | ||||
| -rw-r--r-- | target/i386/whpx/whpx-all.c | 2 |
8 files changed, 9 insertions, 11 deletions
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index e4167d94b4..d13156bee8 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -3155,7 +3155,6 @@ int kvm_cpu_exec(CPUState *cpu) trace_kvm_cpu_exec(); if (kvm_arch_process_async_events(cpu)) { - qatomic_set(&cpu->exit_request, 0); return EXCP_HLT; } @@ -3345,7 +3344,6 @@ int kvm_cpu_exec(CPUState *cpu) vm_stop(RUN_STATE_INTERNAL_ERROR); } - qatomic_set(&cpu->exit_request, 0); return ret; } diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 3ae545e888..ad94f96b25 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -872,7 +872,6 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, * The corresponding store-release is in cpu_exit. */ if (unlikely(qatomic_load_acquire(&cpu->exit_request)) || icount_exit_request(cpu)) { - qatomic_set(&cpu->exit_request, 0); if (cpu->exception_index == -1) { cpu->exception_index = EXCP_INTERRUPT; } diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 813b313859..7dbdba7b51 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -293,8 +293,13 @@ static void *rr_cpu_thread_fn(void *arg) /* Does not need a memory barrier because a spurious wakeup is okay. */ qatomic_set(&rr_current_cpu, NULL); - if (cpu && qatomic_read(&cpu->exit_request)) { - qatomic_set_mb(&cpu->exit_request, 0); + if (cpu) { + /* + * This could even reset exit_request for all CPUs, but in practice + * races between CPU exits and changes to "cpu" are so rare that + * there's no advantage in doing so. + */ + qatomic_set(&cpu->exit_request, false); } if (icount_enabled() && all_cpu_threads_idle()) { diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 1f662a9c74..3bd9800504 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -82,8 +82,6 @@ int tcg_cpu_exec(CPUState *cpu) ret = cpu_exec(cpu); cpu_exec_end(cpu); - qatomic_set_mb(&cpu->exit_request, 0); - return ret; } diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 65f5da6c50..916f18754f 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -54,6 +54,7 @@ void qemu_cpu_kick(CPUState *cpu) void qemu_process_cpu_events(CPUState *cpu) { + qatomic_set(&cpu->exit_request, false); process_queued_cpu_work(cpu); } diff --git a/system/cpus.c b/system/cpus.c index fd804e0732..aa7bfcf56e 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -463,6 +463,7 @@ void qemu_process_cpu_events(CPUState *cpu) { bool slept = false; + qatomic_set(&cpu->exit_request, false); while (cpu_thread_is_idle(cpu)) { if (!slept) { slept = true; diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 7e36c42fbb..ed42425167 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -817,8 +817,6 @@ nvmm_vcpu_loop(CPUState *cpu) cpu_exec_end(cpu); bql_lock(); - qatomic_set(&cpu->exit_request, false); - return ret < 0; } diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 00fb7e2310..2a85168ed5 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -2050,8 +2050,6 @@ static int whpx_vcpu_run(CPUState *cpu) whpx_last_vcpu_stopping(cpu); } - qatomic_set(&cpu->exit_request, false); - return ret < 0; } |