diff options
254 files changed, 5667 insertions, 7838 deletions
diff --git a/.gitlab-ci.d/opensbi.yml b/.gitlab-ci.d/opensbi.yml index 04ed5a3ea1..9a651465d8 100644 --- a/.gitlab-ci.d/opensbi.yml +++ b/.gitlab-ci.d/opensbi.yml @@ -42,9 +42,9 @@ docker-opensbi: extends: .opensbi_job_rules stage: containers - image: docker:19.03.1 + image: docker:stable services: - - docker:19.03.1-dind + - docker:stable-dind variables: GIT_DEPTH: 3 IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build diff --git a/.gitlab-ci.d/opensbi/Dockerfile b/.gitlab-ci.d/opensbi/Dockerfile index 4ba8a4de86..5ccf4151f4 100644 --- a/.gitlab-ci.d/opensbi/Dockerfile +++ b/.gitlab-ci.d/opensbi/Dockerfile @@ -15,6 +15,7 @@ RUN apt update \ ca-certificates \ git \ make \ + python3 \ wget \ && \ \ diff --git a/MAINTAINERS b/MAINTAINERS index 76662969d7..5340de0515 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -999,12 +999,6 @@ S: Maintained F: hw/ssi/xlnx-versal-ospi.c F: include/hw/ssi/xlnx-versal-ospi.h -ARM ACPI Subsystem -M: Shannon Zhao <shannon.zhaosl@gmail.com> -L: qemu-arm@nongnu.org -S: Maintained -F: hw/arm/virt-acpi-build.c - STM32F100 M: Alexandre Iooss <erdnaxe@crans.org> L: qemu-arm@nongnu.org @@ -1892,6 +1886,18 @@ F: docs/specs/acpi_nvdimm.rst F: docs/specs/acpi_pci_hotplug.rst F: docs/specs/acpi_hw_reduced_hotplug.rst +ARM ACPI Subsystem +M: Shannon Zhao <shannon.zhaosl@gmail.com> +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/virt-acpi-build.c + +RISC-V ACPI Subsystem +M: Sunil V L <sunilvl@ventanamicro.com> +L: qemu-riscv@nongnu.org +S: Maintained +F: hw/riscv/virt-acpi-build.c + ACPI/VIOT M: Jean-Philippe Brucker <jean-philippe@linaro.org> S: Supported @@ -2490,6 +2496,7 @@ Subsystems ---------- Overall Audio backends M: Gerd Hoffmann <kraxel@redhat.com> +M: Marc-André Lureau <marcandre.lureau@redhat.com> S: Odd Fixes F: audio/ X: audio/alsaaudio.c @@ -2637,6 +2644,7 @@ T: git https://gitlab.com/vsementsov/qemu.git block Compute Express Link M: Ben Widawsky <ben.widawsky@intel.com> M: Jonathan Cameron <jonathan.cameron@huawei.com> +R: Fan Ni <fan.ni@samsung.com> S: Supported F: hw/cxl/ F: hw/mem/cxl_type3.c @@ -2784,6 +2792,7 @@ F: docs/spice-port-fqdn.txt Graphics M: Gerd Hoffmann <kraxel@redhat.com> +M: Marc-André Lureau <marcandre.lureau@redhat.com> S: Odd Fixes F: ui/ F: include/ui/ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 008ae7a66d..e984a98dc4 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1250,7 +1250,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, desc->fulltlb[index] = *full; desc->fulltlb[index].xlat_section = iotlb - vaddr_page; desc->fulltlb[index].phys_addr = paddr_page; - desc->fulltlb[index].prot = prot; /* Now calculate the new entry */ tn.addend = addend - vaddr_page; @@ -1768,6 +1767,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, CPUTLBEntry *tlbe; target_ulong tlb_addr; void *hostaddr; + CPUTLBEntryFull *full; tcg_debug_assert(mmu_idx < NB_MMU_MODES); @@ -1806,17 +1806,26 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; } - /* Let the guest notice RMW on a write-only page. */ - if ((prot & PAGE_READ) && - unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { - tlb_fill(env_cpu(env), addr, size, - MMU_DATA_LOAD, mmu_idx, retaddr); + if (prot & PAGE_READ) { /* - * Since we don't support reads and writes to different addresses, - * and we do have the proper page loaded for write, this shouldn't - * ever return. But just in case, handle via stop-the-world. + * Let the guest notice RMW on a write-only page. + * We have just verified that the page is writable. + * Subpage lookups may have left TLB_INVALID_MASK set, + * but addr_read will only be -1 if PAGE_READ was unset. */ - goto stop_the_world; + if (unlikely(tlbe->addr_read == -1)) { + tlb_fill(env_cpu(env), addr, size, + MMU_DATA_LOAD, mmu_idx, retaddr); + /* + * Since we don't support reads and writes to different + * addresses, and we do have the proper page loaded for + * write, this shouldn't ever return. But just in case, + * handle via stop-the-world. + */ + goto stop_the_world; + } + /* Collect TLB_WATCHPOINT for read. */ + tlb_addr |= tlbe->addr_read; } } else /* if (prot & PAGE_READ) */ { tlb_addr = tlbe->addr_read; @@ -1832,17 +1841,25 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } /* Notice an IO access or a needs-MMU-lookup access */ - if (unlikely(tlb_addr & TLB_MMIO)) { + if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; } hostaddr = (void *)((uintptr_t)addr + tlbe->addend); + full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; if (unlikely(tlb_addr & TLB_NOTDIRTY)) { - notdirty_write(env_cpu(env), addr, size, - &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr); + notdirty_write(env_cpu(env), addr, size, full, retaddr); + } + + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + QEMU_BUILD_BUG_ON(PAGE_READ != BP_MEM_READ); + QEMU_BUILD_BUG_ON(PAGE_WRITE != BP_MEM_WRITE); + /* therefore prot == watchpoint bits */ + cpu_check_watchpoint(env_cpu(env), addr, size, + full->attrs, prot, retaddr); } return hostaddr; diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index aeb1cbaf65..af35e0d092 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -31,6 +31,7 @@ #include "sysemu/cpu-timers.h" #include "qemu/main-loop.h" #include "qemu/guest-random.h" +#include "qemu/timer.h" #include "exec/exec-all.h" #include "exec/hwaddr.h" #include "exec/gdbstub.h" diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 4b5abc0f44..a5bea8f99c 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -51,6 +51,7 @@ #include "qemu/qemu-print.h" #include "qemu/main-loop.h" #include "qemu/cacheinfo.h" +#include "qemu/timer.h" #include "exec/log.h" #include "sysemu/cpus.h" #include "sysemu/cpu-timers.h" diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index d0babfea88..7bda43ff61 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -18,19 +18,6 @@ #include "exec/plugin-gen.h" #include "exec/replay-core.h" -/* Pairs with tcg_clear_temp_count. - To be called by #TranslatorOps.{translate_insn,tb_stop} if - (1) the target is sufficiently clean to support reporting, - (2) as and when all temporaries are known to be consumed. - For most targets, (2) is at the end of translate_insn. */ -void translator_loop_temp_check(DisasContextBase *db) -{ - if (tcg_check_temp_count()) { - qemu_log("warning: TCG temporary leaks before " - TARGET_FMT_lx "\n", db->pc_next); - } -} - bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest) { /* Suppress goto_tb if requested. */ @@ -67,9 +54,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, ops->init_disas_context(db, cpu); tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */ - /* Reset the temp count so that we can identify leaks */ - tcg_clear_temp_count(); - /* Start translating. */ gen_tb_start(db->tb); ops->tb_start(db, cpu); diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c index 714bfb6453..057571dd1e 100644 --- a/audio/alsaaudio.c +++ b/audio/alsaaudio.c @@ -222,11 +222,7 @@ static int alsa_poll_helper (snd_pcm_t *handle, struct pollhlp *hlp, int mask) return -1; } - pfds = audio_calloc ("alsa_poll_helper", count, sizeof (*pfds)); - if (!pfds) { - dolog ("Could not initialize poll mode\n"); - return -1; - } + pfds = g_new0(struct pollfd, count); err = snd_pcm_poll_descriptors (handle, pfds, count); if (err < 0) { @@ -917,28 +913,23 @@ static void *alsa_audio_init(Audiodev *dev) alsa_init_per_direction(aopts->in); alsa_init_per_direction(aopts->out); - /* - * need to define them, as otherwise alsa produces no sound - * doesn't set has_* so alsa_open can identify it wasn't set by the user - */ + /* don't set has_* so alsa_open can identify it wasn't set by the user */ if (!dev->u.alsa.out->has_period_length) { - /* 1024 frames assuming 44100Hz */ - dev->u.alsa.out->period_length = 1024 * 1000000 / 44100; + /* 256 frames assuming 44100Hz */ + dev->u.alsa.out->period_length = 5805; } if (!dev->u.alsa.out->has_buffer_length) { /* 4096 frames assuming 44100Hz */ - dev->u.alsa.out->buffer_length = 4096ll * 1000000 / 44100; + dev->u.alsa.out->buffer_length = 92880; } - /* - * OptsVisitor sets unspecified optional fields to zero, but do not depend - * on it... - */ if (!dev->u.alsa.in->has_period_length) { - dev->u.alsa.in->period_length = 0; + /* 256 frames assuming 44100Hz */ + dev->u.alsa.in->period_length = 5805; } if (!dev->u.alsa.in->has_buffer_length) { - dev->u.alsa.in->buffer_length = 0; + /* 4096 frames assuming 44100Hz */ + dev->u.alsa.in->buffer_length = 92880; } return dev; diff --git a/audio/audio.c b/audio/audio.c index 4290309d18..70b096713c 100644 --- a/audio/audio.c +++ b/audio/audio.c @@ -33,6 +33,7 @@ #include "qapi/qapi-visit-audio.h" #include "qapi/qapi-commands-audio.h" #include "qemu/cutils.h" +#include "qemu/log.h" #include "qemu/module.h" #include "qemu/help_option.h" #include "sysemu/sysemu.h" @@ -148,26 +149,6 @@ static inline int audio_bits_to_index (int bits) } } -void *audio_calloc (const char *funcname, int nmemb, size_t size) -{ - int cond; - size_t len; - - len = nmemb * size; - cond = !nmemb || !size; - cond |= nmemb < 0; - cond |= len < size; - - if (audio_bug ("audio_calloc", cond)) { - AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n", - funcname); - AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len); - return NULL; - } - - return g_malloc0 (len); -} - void AUD_vlog (const char *cap, const char *fmt, va_list ap) { if (cap) { @@ -400,13 +381,6 @@ void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len) /* * Capture */ -static void noop_conv (struct st_sample *dst, const void *src, int samples) -{ - (void) src; - (void) dst; - (void) samples; -} - static CaptureVoiceOut *audio_pcm_capture_find_specific(AudioState *s, struct audsettings *as) { @@ -504,15 +478,8 @@ static int audio_attach_capture (HWVoiceOut *hw) sw->info = hw->info; sw->empty = 1; sw->active = hw->enabled; - sw->conv = noop_conv; - sw->ratio = ((int64_t) hw_cap->info.freq << 32) / sw->info.freq; sw->vol = nominal_volume; sw->rate = st_rate_start (sw->info.freq, hw_cap->info.freq); - if (!sw->rate) { - dolog ("Could not start rate conversion for `%s'\n", SW_NAME (sw)); - g_free (sw); - return -1; - } QLIST_INSERT_HEAD (&hw_cap->sw_head, sw, entries); QLIST_INSERT_HEAD (&hw->cap_head, sc, entries); #ifdef DEBUG_CAPTURE @@ -547,8 +514,8 @@ static size_t audio_pcm_hw_find_min_in (HWVoiceIn *hw) static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw) { size_t live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw); - if (audio_bug(__func__, live > hw->conv_buf->size)) { - dolog("live=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size); + if (audio_bug(__func__, live > hw->conv_buf.size)) { + dolog("live=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size); return 0; } return live; @@ -557,13 +524,13 @@ static size_t audio_pcm_hw_get_live_in(HWVoiceIn *hw) static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples) { size_t conv = 0; - STSampleBuffer *conv_buf = hw->conv_buf; + STSampleBuffer *conv_buf = &hw->conv_buf; while (samples) { uint8_t *src = advance(pcm_buf, conv * hw->info.bytes_per_frame); size_t proc = MIN(samples, conv_buf->size - conv_buf->pos); - hw->conv(conv_buf->samples + conv_buf->pos, src, proc); + hw->conv(conv_buf->buffer + conv_buf->pos, src, proc); conv_buf->pos = (conv_buf->pos + proc) % conv_buf->size; samples -= proc; conv += proc; @@ -575,56 +542,65 @@ static size_t audio_pcm_hw_conv_in(HWVoiceIn *hw, void *pcm_buf, size_t samples) /* * Soft voice (capture) */ -static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t size) +static void audio_pcm_sw_resample_in(SWVoiceIn *sw, + size_t frames_in_max, size_t frames_out_max, + size_t *total_in, size_t *total_out) +{ + HWVoiceIn *hw = sw->hw; + struct st_sample *src, *dst; + size_t live, rpos, frames_in, frames_out; + + live = hw->total_samples_captured - sw->total_hw_samples_acquired; + rpos = audio_ring_posb(hw->conv_buf.pos, live, hw->conv_buf.size); + + /* resample conv_buf from rpos to end of buffer */ + src = hw->conv_buf.buffer + rpos; + frames_in = MIN(frames_in_max, hw->conv_buf.size - rpos); + dst = sw->resample_buf.buffer; + frames_out = frames_out_max; + st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out); + rpos += frames_in; + *total_in = frames_in; + *total_out = frames_out; + + /* resample conv_buf from start of buffer if there are input frames left */ + if (frames_in_max - frames_in && rpos == hw->conv_buf.size) { + src = hw->conv_buf.buffer; + frames_in = frames_in_max - frames_in; + dst += frames_out; + frames_out = frames_out_max - frames_out; + st_rate_flow(sw->rate, src, dst, &frames_in, &frames_out); + *total_in += frames_in; + *total_out += frames_out; + } +} + +static size_t audio_pcm_sw_read(SWVoiceIn *sw, void *buf, size_t buf_len) { HWVoiceIn *hw = sw->hw; - size_t samples, live, ret = 0, swlim, isamp, osamp, rpos, total = 0; - struct st_sample *src, *dst = sw->buf; + size_t live, frames_out_max, total_in, total_out; live = hw->total_samples_captured - sw->total_hw_samples_acquired; if (!live) { return 0; } - if (audio_bug(__func__, live > hw->conv_buf->size)) { - dolog("live_in=%zu hw->conv_buf->size=%zu\n", live, hw->conv_buf->size); + if (audio_bug(__func__, live > hw->conv_buf.size)) { + dolog("live_in=%zu hw->conv_buf.size=%zu\n", live, hw->conv_buf.size); return 0; } - rpos = audio_ring_posb(hw->conv_buf->pos, live, hw->conv_buf->size); - - samples = size / sw->info.bytes_per_frame; - - swlim = (live * sw->ratio) >> 32; - swlim = MIN (swlim, samples); - - while (swlim) { - src = hw->conv_buf->samples + rpos; - if (hw->conv_buf->pos > rpos) { - isamp = hw->conv_buf->pos - rpos; - } else { - isamp = hw->conv_buf->size - rpos; - } - - if (!isamp) { - break; - } - osamp = swlim; + frames_out_max = MIN(buf_len / sw->info.bytes_per_frame, + sw->resample_buf.size); - st_rate_flow (sw->rate, src, dst, &isamp, &osamp); - swlim -= osamp; - rpos = (rpos + isamp) % hw->conv_buf->size; - dst += osamp; - ret += osamp; - total += isamp; - } + audio_pcm_sw_resample_in(sw, live, frames_out_max, &total_in, &total_out); if (!hw->pcm_ops->volume_in) { - mixeng_volume (sw->buf, ret, &sw->vol); + mixeng_volume(sw->resample_buf.buffer, total_out, &sw->vol); } + sw->clip(buf, sw->resample_buf.buffer, total_out); - sw->clip (buf, sw->buf, ret); - sw->total_hw_samples_acquired += total; - return ret * sw->info.bytes_per_frame; + sw->total_hw_samples_acquired += total_in; + return total_out * sw->info.bytes_per_frame; } /* @@ -660,8 +636,8 @@ static size_t audio_pcm_hw_get_live_out (HWVoiceOut *hw, int *nb_live) if (nb_live1) { size_t live = smin; - if (audio_bug(__func__, live > hw->mix_buf->size)) { - dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size); + if (audio_bug(__func__, live > hw->mix_buf.size)) { + dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size); return 0; } return live; @@ -678,17 +654,17 @@ static size_t audio_pcm_hw_get_free(HWVoiceOut *hw) static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len) { size_t clipped = 0; - size_t pos = hw->mix_buf->pos; + size_t pos = hw->mix_buf.pos; while (len) { - st_sample *src = hw->mix_buf->samples + pos; + st_sample *src = hw->mix_buf.buffer + pos; uint8_t *dst = advance(pcm_buf, clipped * hw->info.bytes_per_frame); - size_t samples_till_end_of_buf = hw->mix_buf->size - pos; + size_t samples_till_end_of_buf = hw->mix_buf.size - pos; size_t samples_to_clip = MIN(len, samples_till_end_of_buf); hw->clip(dst, src, samples_to_clip); - pos = (pos + samples_to_clip) % hw->mix_buf->size; + pos = (pos + samples_to_clip) % hw->mix_buf.size; len -= samples_to_clip; clipped += samples_to_clip; } @@ -697,84 +673,113 @@ static void audio_pcm_hw_clip_out(HWVoiceOut *hw, void *pcm_buf, size_t len) /* * Soft voice (playback) */ -static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t size) +static void audio_pcm_sw_resample_out(SWVoiceOut *sw, + size_t frames_in_max, size_t frames_out_max, + size_t *total_in, size_t *total_out) { - size_t hwsamples, samples, isamp, osamp, wpos, live, dead, left, blck; - size_t hw_free; - size_t ret = 0, pos = 0, total = 0; + HWVoiceOut *hw = sw->hw; + struct st_sample *src, *dst; + size_t live, wpos, frames_in, frames_out; - if (!sw) { - return size; + live = sw->total_hw_samples_mixed; + wpos = (hw->mix_buf.pos + live) % hw->mix_buf.size; + + /* write to mix_buf from wpos to end of buffer */ + src = sw->resample_buf.buffer; + frames_in = frames_in_max; + dst = hw->mix_buf.buffer + wpos; + frames_out = MIN(frames_out_max, hw->mix_buf.size - wpos); + st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out); + wpos += frames_out; + *total_in = frames_in; + *total_out = frames_out; + + /* write to mix_buf from start of buffer if there are input frames left */ + if (frames_in_max - frames_in > 0 && wpos == hw->mix_buf.size) { + src += frames_in; + frames_in = frames_in_max - frames_in; + dst = hw->mix_buf.buffer; + frames_out = frames_out_max - frames_out; + st_rate_flow_mix(sw->rate, src, dst, &frames_in, &frames_out); + *total_in += frames_in; + *total_out += frames_out; } +} - hwsamples = sw->hw->mix_buf->size; +static size_t audio_pcm_sw_write(SWVoiceOut *sw, void *buf, size_t buf_len) +{ + HWVoiceOut *hw = sw->hw; + size_t live, dead, hw_free, sw_max, fe_max; + size_t frames_in_max, frames_out_max, total_in, total_out; live = sw->total_hw_samples_mixed; - if (audio_bug(__func__, live > hwsamples)) { - dolog("live=%zu hw->mix_buf->size=%zu\n", live, hwsamples); + if (audio_bug(__func__, live > hw->mix_buf.size)) { + dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size); return 0; } - if (live == hwsamples) { + if (live == hw->mix_buf.size) { #ifdef DEBUG_OUT dolog ("%s is full %zu\n", sw->name, live); #endif return 0; } - wpos = (sw->hw->mix_buf->pos + live) % hwsamples; - - dead = hwsamples - live; - hw_free = audio_pcm_hw_get_free(sw->hw); + dead = hw->mix_buf.size - live; + hw_free = audio_pcm_hw_get_free(hw); hw_free = hw_free > live ? hw_free - live : 0; - samples = ((int64_t)MIN(dead, hw_free) << 32) / sw->ratio; - samples = MIN(samples, size / sw->info.bytes_per_frame); - if (samples) { - sw->conv(sw->buf, buf, samples); + frames_out_max = MIN(dead, hw_free); + sw_max = st_rate_frames_in(sw->rate, frames_out_max); + fe_max = MIN(buf_len / sw->info.bytes_per_frame + sw->resample_buf.pos, + sw->resample_buf.size); + frames_in_max = MIN(sw_max, fe_max); + + if (!frames_in_max) { + return 0; + } + if (frames_in_max > sw->resample_buf.pos) { + sw->conv(sw->resample_buf.buffer + sw->resample_buf.pos, + buf, frames_in_max - sw->resample_buf.pos); if (!sw->hw->pcm_ops->volume_out) { - mixeng_volume(sw->buf, samples, &sw->vol); + mixeng_volume(sw->resample_buf.buffer + sw->resample_buf.pos, + frames_in_max - sw->resample_buf.pos, &sw->vol); } } - while (samples) { - dead = hwsamples - live; - left = hwsamples - wpos; - blck = MIN (dead, left); - if (!blck) { - break; - } - isamp = samples; - osamp = blck; - st_rate_flow_mix ( - sw->rate, - sw->buf + pos, - sw->hw->mix_buf->samples + wpos, - &isamp, - &osamp - ); - ret += isamp; - samples -= isamp; - pos += isamp; - live += osamp; - wpos = (wpos + osamp) % hwsamples; - total += osamp; - } - - sw->total_hw_samples_mixed += total; + audio_pcm_sw_resample_out(sw, frames_in_max, frames_out_max, + &total_in, &total_out); + + sw->total_hw_samples_mixed += total_out; sw->empty = sw->total_hw_samples_mixed == 0; + /* + * Upsampling may leave one audio frame in the resample buffer. Decrement + * total_in by one if there was a leftover frame from the previous resample + * pass in the resample buffer. Increment total_in by one if the current + * resample pass left one frame in the resample buffer. + */ + if (frames_in_max - total_in == 1) { + /* copy one leftover audio frame to the beginning of the buffer */ + *sw->resample_buf.buffer = *(sw->resample_buf.buffer + total_in); + total_in += 1 - sw->resample_buf.pos; + sw->resample_buf.pos = 1; + } else if (total_in >= sw->resample_buf.pos) { + total_in -= sw->resample_buf.pos; + sw->resample_buf.pos = 0; + } + #ifdef DEBUG_OUT dolog ( - "%s: write size %zu ret %zu total sw %zu\n", - SW_NAME (sw), - size / sw->info.bytes_per_frame, - ret, + "%s: write size %zu written %zu total mixed %zu\n", + SW_NAME(sw), + buf_len / sw->info.bytes_per_frame, + total_in, sw->total_hw_samples_mixed ); #endif - return ret * sw->info.bytes_per_frame; + return total_in * sw->info.bytes_per_frame; } #ifdef DEBUG_AUDIO @@ -992,18 +997,6 @@ void AUD_set_active_in (SWVoiceIn *sw, int on) } } -/** - * audio_frontend_frames_in() - returns the number of frames the resampling - * code generates from frames_in frames - * - * @sw: audio recording frontend - * @frames_in: number of frames - */ -static size_t audio_frontend_frames_in(SWVoiceIn *sw, size_t frames_in) -{ - return (int64_t)frames_in * sw->ratio >> 32; -} - static size_t audio_get_avail (SWVoiceIn *sw) { size_t live; @@ -1013,33 +1006,21 @@ static size_t audio_get_avail (SWVoiceIn *sw) } live = sw->hw->total_samples_captured - sw->total_hw_samples_acquired; - if (audio_bug(__func__, live > sw->hw->conv_buf->size)) { - dolog("live=%zu sw->hw->conv_buf->size=%zu\n", live, - sw->hw->conv_buf->size); + if (audio_bug(__func__, live > sw->hw->conv_buf.size)) { + dolog("live=%zu sw->hw->conv_buf.size=%zu\n", live, + sw->hw->conv_buf.size); return 0; } ldebug ( - "%s: get_avail live %zu frontend frames %zu\n", + "%s: get_avail live %zu frontend frames %u\n", SW_NAME (sw), - live, audio_frontend_frames_in(sw, live) + live, st_rate_frames_out(sw->rate, live) ); return live; } -/** - * audio_frontend_frames_out() - returns the number of frames needed to - * get frames_out frames after resampling - * - * @sw: audio playback frontend - * @frames_out: number of frames - */ -static size_t audio_frontend_frames_out(SWVoiceOut *sw, size_t frames_out) -{ - return ((int64_t)frames_out << 32) / sw->ratio; -} - static size_t audio_get_free(SWVoiceOut *sw) { size_t live, dead; @@ -1050,17 +1031,17 @@ static size_t audio_get_free(SWVoiceOut *sw) live = sw->total_hw_samples_mixed; - if (audio_bug(__func__, live > sw->hw->mix_buf->size)) { - dolog("live=%zu sw->hw->mix_buf->size=%zu\n", live, - sw->hw->mix_buf->size); + if (audio_bug(__func__, live > sw->hw->mix_buf.size)) { + dolog("live=%zu sw->hw->mix_buf.size=%zu\n", live, + sw->hw->mix_buf.size); return 0; } - dead = sw->hw->mix_buf->size - live; + dead = sw->hw->mix_buf.size - live; #ifdef DEBUG_OUT - dolog("%s: get_free live %zu dead %zu frontend frames %zu\n", - SW_NAME(sw), live, dead, audio_frontend_frames_out(sw, dead)); + dolog("%s: get_free live %zu dead %zu frontend frames %u\n", + SW_NAME(sw), live, dead, st_rate_frames_in(sw->rate, dead)); #endif return dead; @@ -1076,32 +1057,40 @@ static void audio_capture_mix_and_clear(HWVoiceOut *hw, size_t rpos, for (sc = hw->cap_head.lh_first; sc; sc = sc->entries.le_next) { SWVoiceOut *sw = &sc->sw; - int rpos2 = rpos; + size_t rpos2 = rpos; n = samples; while (n) { - size_t till_end_of_hw = hw->mix_buf->size - rpos2; - size_t to_write = MIN(till_end_of_hw, n); - size_t bytes = to_write * hw->info.bytes_per_frame; - size_t written; - - sw->buf = hw->mix_buf->samples + rpos2; - written = audio_pcm_sw_write (sw, NULL, bytes); - if (written - bytes) { - dolog("Could not mix %zu bytes into a capture " + size_t till_end_of_hw = hw->mix_buf.size - rpos2; + size_t to_read = MIN(till_end_of_hw, n); + size_t live, frames_in, frames_out; + + sw->resample_buf.buffer = hw->mix_buf.buffer + rpos2; + sw->resample_buf.size = to_read; + live = sw->total_hw_samples_mixed; + + audio_pcm_sw_resample_out(sw, + to_read, sw->hw->mix_buf.size - live, + &frames_in, &frames_out); + + sw->total_hw_samples_mixed += frames_out; + sw->empty = sw->total_hw_samples_mixed == 0; + + if (to_read - frames_in) { + dolog("Could not mix %zu frames into a capture " "buffer, mixed %zu\n", - bytes, written); + to_read, frames_in); break; } - n -= to_write; - rpos2 = (rpos2 + to_write) % hw->mix_buf->size; + n -= to_read; + rpos2 = (rpos2 + to_read) % hw->mix_buf.size; } } } - n = MIN(samples, hw->mix_buf->size - rpos); - mixeng_clear(hw->mix_buf->samples + rpos, n); - mixeng_clear(hw->mix_buf->samples, samples - n); + n = MIN(samples, hw->mix_buf.size - rpos); + mixeng_clear(hw->mix_buf.buffer + rpos, n); + mixeng_clear(hw->mix_buf.buffer, samples - n); } static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live) @@ -1127,7 +1116,7 @@ static size_t audio_pcm_hw_run_out(HWVoiceOut *hw, size_t live) live -= proc; clipped += proc; - hw->mix_buf->pos = (hw->mix_buf->pos + proc) % hw->mix_buf->size; + hw->mix_buf.pos = (hw->mix_buf.pos + proc) % hw->mix_buf.size; if (proc == 0 || proc < decr) { break; @@ -1181,12 +1170,14 @@ static void audio_run_out (AudioState *s) size_t free; if (hw_free > sw->total_hw_samples_mixed) { - free = audio_frontend_frames_out(sw, + free = st_rate_frames_in(sw->rate, MIN(sw_free, hw_free - sw->total_hw_samples_mixed)); } else { free = 0; } - if (free > 0) { + if (free > sw->resample_buf.pos) { + free = MIN(free, sw->resample_buf.size) + - sw->resample_buf.pos; sw->callback.fn(sw->callback.opaque, free * sw->info.bytes_per_frame); } @@ -1198,8 +1189,8 @@ static void audio_run_out (AudioState *s) live = 0; } - if (audio_bug(__func__, live > hw->mix_buf->size)) { - dolog("live=%zu hw->mix_buf->size=%zu\n", live, hw->mix_buf->size); + if (audio_bug(__func__, live > hw->mix_buf.size)) { + dolog("live=%zu hw->mix_buf.size=%zu\n", live, hw->mix_buf.size); continue; } @@ -1227,13 +1218,13 @@ static void audio_run_out (AudioState *s) continue; } - prev_rpos = hw->mix_buf->pos; + prev_rpos = hw->mix_buf.pos; played = audio_pcm_hw_run_out(hw, live); replay_audio_out(&played); - if (audio_bug(__func__, hw->mix_buf->pos >= hw->mix_buf->size)) { - dolog("hw->mix_buf->pos=%zu hw->mix_buf->size=%zu played=%zu\n", - hw->mix_buf->pos, hw->mix_buf->size, played); - hw->mix_buf->pos = 0; + if (audio_bug(__func__, hw->mix_buf.pos >= hw->mix_buf.size)) { + dolog("hw->mix_buf.pos=%zu hw->mix_buf.size=%zu played=%zu\n", + hw->mix_buf.pos, hw->mix_buf.size, played); + hw->mix_buf.pos = 0; } #ifdef DEBUG_OUT @@ -1314,10 +1305,10 @@ static void audio_run_in (AudioState *s) if (replay_mode != REPLAY_MODE_PLAY) { captured = audio_pcm_hw_run_in( - hw, hw->conv_buf->size - audio_pcm_hw_get_live_in(hw)); + hw, hw->conv_buf.size - audio_pcm_hw_get_live_in(hw)); } - replay_audio_in(&captured, hw->conv_buf->samples, &hw->conv_buf->pos, - hw->conv_buf->size); + replay_audio_in(&captured, hw->conv_buf.buffer, &hw->conv_buf.pos, + hw->conv_buf.size); min = audio_pcm_hw_find_min_in (hw); hw->total_samples_captured += captured - min; @@ -1330,8 +1321,9 @@ static void audio_run_in (AudioState *s) size_t sw_avail = audio_get_avail(sw); size_t avail; - avail = audio_frontend_frames_in(sw, sw_avail); + avail = st_rate_frames_out(sw->rate, sw_avail); if (avail > 0) { + avail = MIN(avail, sw->resample_buf.size); sw->callback.fn(sw->callback.opaque, avail * sw->info.bytes_per_frame); } @@ -1350,14 +1342,14 @@ static void audio_run_capture (AudioState *s) SWVoiceOut *sw; captured = live = audio_pcm_hw_get_live_out (hw, NULL); - rpos = hw->mix_buf->pos; + rpos = hw->mix_buf.pos; while (live) { - size_t left = hw->mix_buf->size - rpos; + size_t left = hw->mix_buf.size - rpos; size_t to_capture = MIN(live, left); struct st_sample *src; struct capture_callback *cb; - src = hw->mix_buf->samples + rpos; + src = hw->mix_buf.buffer + rpos; hw->clip (cap->buf, src, to_capture); mixeng_clear (src, to_capture); @@ -1365,10 +1357,10 @@ static void audio_run_capture (AudioState *s) cb->ops.capture (cb->opaque, cap->buf, to_capture * hw->info.bytes_per_frame); } - rpos = (rpos + to_capture) % hw->mix_buf->size; + rpos = (rpos + to_capture) % hw->mix_buf.size; live -= to_capture; } - hw->mix_buf->pos = rpos; + hw->mix_buf.pos = rpos; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (!sw->active && sw->empty) { @@ -1927,7 +1919,7 @@ CaptureVoiceOut *AUD_add_capture( audio_pcm_init_info (&hw->info, as); - cap->buf = g_malloc0_n(hw->mix_buf->size, hw->info.bytes_per_frame); + cap->buf = g_malloc0_n(hw->mix_buf.size, hw->info.bytes_per_frame); if (hw->info.is_float) { hw->clip = mixeng_clip_float[hw->info.nchannels == 2]; @@ -1979,7 +1971,7 @@ void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque) sw = sw1; } QLIST_REMOVE (cap, entries); - g_free (cap->hw.mix_buf); + g_free(cap->hw.mix_buf.buffer); g_free (cap->buf); g_free (cap); } diff --git a/audio/audio_int.h b/audio/audio_int.h index e87ce014a0..d51d63f08d 100644 --- a/audio/audio_int.h +++ b/audio/audio_int.h @@ -58,7 +58,7 @@ typedef struct SWVoiceCap SWVoiceCap; typedef struct STSampleBuffer { size_t pos, size; - st_sample samples[]; + st_sample *buffer; } STSampleBuffer; typedef struct HWVoiceOut { @@ -71,7 +71,7 @@ typedef struct HWVoiceOut { f_sample *clip; uint64_t ts_helper; - STSampleBuffer *mix_buf; + STSampleBuffer mix_buf; void *buf_emul; size_t pos_emul, pending_emul, size_emul; @@ -93,7 +93,7 @@ typedef struct HWVoiceIn { size_t total_samples_captured; uint64_t ts_helper; - STSampleBuffer *conv_buf; + STSampleBuffer conv_buf; void *buf_emul; size_t pos_emul, pending_emul, size_emul; @@ -108,8 +108,7 @@ struct SWVoiceOut { AudioState *s; struct audio_pcm_info info; t_sample *conv; - int64_t ratio; - struct st_sample *buf; + STSampleBuffer resample_buf; void *rate; size_t total_hw_samples_mixed; int active; @@ -126,10 +125,9 @@ struct SWVoiceIn { AudioState *s; int active; struct audio_pcm_info info; - int64_t ratio; void *rate; size_t total_hw_samples_acquired; - struct st_sample *buf; + STSampleBuffer resample_buf; f_sample *clip; HWVoiceIn *hw; char *name; @@ -151,8 +149,8 @@ struct audio_driver { int can_be_default; int max_voices_out; int max_voices_in; - int voice_size_out; - int voice_size_in; + size_t voice_size_out; + size_t voice_size_in; QLIST_ENTRY(audio_driver) next; }; @@ -251,7 +249,6 @@ void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as); void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len); int audio_bug (const char *funcname, int cond); -void *audio_calloc (const char *funcname, int nmemb, size_t size); void audio_run(AudioState *s, const char *msg); @@ -294,9 +291,6 @@ static inline size_t audio_ring_posb(size_t pos, size_t dist, size_t len) #define ldebug(fmt, ...) (void)0 #endif -#define AUDIO_STRINGIFY_(n) #n -#define AUDIO_STRINGIFY(n) AUDIO_STRINGIFY_(n) - typedef struct AudiodevListEntry { Audiodev *dev; QSIMPLEQ_ENTRY(AudiodevListEntry) next; diff --git a/audio/audio_template.h b/audio/audio_template.h index 42b4712acb..e42326c20d 100644 --- a/audio/audio_template.h +++ b/audio/audio_template.h @@ -40,7 +40,7 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s, struct audio_driver *drv) { int max_voices = glue (drv->max_voices_, TYPE); - int voice_size = glue (drv->voice_size_, TYPE); + size_t voice_size = glue(drv->voice_size_, TYPE); if (glue (s->nb_hw_voices_, TYPE) > max_voices) { if (!max_voices) { @@ -63,16 +63,17 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s, } if (audio_bug(__func__, voice_size && !max_voices)) { - dolog ("drv=`%s' voice_size=%d max_voices=0\n", - drv->name, voice_size); + dolog("drv=`%s' voice_size=%zu max_voices=0\n", + drv->name, voice_size); } } static void glue (audio_pcm_hw_free_resources_, TYPE) (HW *hw) { g_free(hw->buf_emul); - g_free (HWBUF); - HWBUF = NULL; + g_free(HWBUF.buffer); + HWBUF.buffer = NULL; + HWBUF.size = 0; } static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw) @@ -83,56 +84,67 @@ static void glue(audio_pcm_hw_alloc_resources_, TYPE)(HW *hw) dolog("Attempted to allocate empty buffer\n"); } - HWBUF = g_malloc0(sizeof(STSampleBuffer) + sizeof(st_sample) * samples); - HWBUF->size = samples; + HWBUF.buffer = g_new0(st_sample, samples); + HWBUF.size = samples; + HWBUF.pos = 0; } else { - HWBUF = NULL; + HWBUF.buffer = NULL; + HWBUF.size = 0; } } static void glue (audio_pcm_sw_free_resources_, TYPE) (SW *sw) { - g_free (sw->buf); + g_free(sw->resample_buf.buffer); + sw->resample_buf.buffer = NULL; + sw->resample_buf.size = 0; if (sw->rate) { st_rate_stop (sw->rate); } - - sw->buf = NULL; sw->rate = NULL; } static int glue (audio_pcm_sw_alloc_resources_, TYPE) (SW *sw) { - int samples; + HW *hw = sw->hw; + uint64_t samples; if (!glue(audio_get_pdo_, TYPE)(sw->s->dev)->mixing_engine) { return 0; } -#ifdef DAC - samples = ((int64_t) sw->HWBUF->size << 32) / sw->ratio; -#else - samples = (int64_t)sw->HWBUF->size * sw->ratio >> 32; -#endif + samples = muldiv64(HWBUF.size, sw->info.freq, hw->info.freq); + if (samples == 0) { + uint64_t f_fe_min; + uint64_t f_be = (uint32_t)hw->info.freq; - sw->buf = audio_calloc(__func__, samples, sizeof(struct st_sample)); - if (!sw->buf) { - dolog ("Could not allocate buffer for `%s' (%d samples)\n", - SW_NAME (sw), samples); + /* f_fe_min = ceil(1 [frames] * f_be [Hz] / size_be [frames]) */ + f_fe_min = (f_be + HWBUF.size - 1) / HWBUF.size; + qemu_log_mask(LOG_UNIMP, + AUDIO_CAP ": The guest selected a " NAME " sample rate" + " of %d Hz for %s. Only sample rates >= %" PRIu64 " Hz" + " are supported.\n", + sw->info.freq, sw->name, f_fe_min); return -1; } + /* + * Allocate one additional audio frame that is needed for upsampling + * if the resample buffer size is small. For large buffer sizes take + * care of overflows and truncation. + */ + samples = samples < SIZE_MAX ? samples + 1 : SIZE_MAX; + sw->resample_buf.buffer = g_new0(st_sample, samples); + sw->resample_buf.size = samples; + sw->resample_buf.pos = 0; + #ifdef DAC - sw->rate = st_rate_start (sw->info.freq, sw->hw->info.freq); + sw->rate = st_rate_start(sw->info.freq, hw->info.freq); #else - sw->rate = st_rate_start (sw->hw->info.freq, sw->info.freq); + sw->rate = st_rate_start(hw->info.freq, sw->info.freq); #endif - if (!sw->rate) { - g_free (sw->buf); - sw->buf = NULL; - return -1; - } + return 0; } @@ -149,11 +161,8 @@ static int glue (audio_pcm_sw_init_, TYPE) ( sw->hw = hw; sw->active = 0; #ifdef DAC - sw->ratio = ((int64_t) sw->hw->info.freq << 32) / sw->info.freq; sw->total_hw_samples_mixed = 0; sw->empty = 1; -#else - sw->ratio = ((int64_t) sw->info.freq << 32) / sw->hw->info.freq; #endif if (sw->info.is_float) { @@ -264,13 +273,11 @@ static HW *glue(audio_pcm_hw_add_new_, TYPE)(AudioState *s, return NULL; } - hw = audio_calloc(__func__, 1, glue(drv->voice_size_, TYPE)); - if (!hw) { - dolog ("Can not allocate voice `%s' size %d\n", - drv->name, glue (drv->voice_size_, TYPE)); - return NULL; - } - + /* + * Since glue(s->nb_hw_voices_, TYPE) is != 0, glue(drv->voice_size_, TYPE) + * is guaranteed to be != 0. See the audio_init_nb_voices_* functions. + */ + hw = g_malloc0(glue(drv->voice_size_, TYPE)); hw->s = s; hw->pcm_ops = drv->pcm_ops; @@ -418,33 +425,28 @@ static SW *glue(audio_pcm_create_voice_pair_, TYPE)( hw_as = *as; } - sw = audio_calloc(__func__, 1, sizeof(*sw)); - if (!sw) { - dolog ("Could not allocate soft voice `%s' (%zu bytes)\n", - sw_name ? sw_name : "unknown", sizeof (*sw)); - goto err1; - } + sw = g_new0(SW, 1); sw->s = s; hw = glue(audio_pcm_hw_add_, TYPE)(s, &hw_as); if (!hw) { - goto err2; + dolog("Could not create a backend for voice `%s'\n", sw_name); + goto err1; } glue (audio_pcm_hw_add_sw_, TYPE) (hw, sw); if (glue (audio_pcm_sw_init_, TYPE) (sw, hw, sw_name, as)) { - goto err3; + goto err2; } return sw; -err3: +err2: glue (audio_pcm_hw_del_sw_, TYPE) (sw); glue (audio_pcm_hw_gc_, TYPE) (&hw); -err2: - g_free (sw); err1: + g_free(sw); return NULL; } @@ -515,8 +517,8 @@ SW *glue (AUD_open_, TYPE) ( HW *hw = sw->hw; if (!hw) { - dolog ("Internal logic error voice `%s' has no hardware store\n", - SW_NAME (sw)); + dolog("Internal logic error: voice `%s' has no backend\n", + SW_NAME(sw)); goto fail; } @@ -527,7 +529,6 @@ SW *glue (AUD_open_, TYPE) ( } else { sw = glue(audio_pcm_create_voice_pair_, TYPE)(s, name, as); if (!sw) { - dolog ("Failed to create voice `%s'\n", name); return NULL; } } diff --git a/audio/mixeng.c b/audio/mixeng.c index 100a306d6f..69f6549224 100644 --- a/audio/mixeng.c +++ b/audio/mixeng.c @@ -414,12 +414,7 @@ struct rate { */ void *st_rate_start (int inrate, int outrate) { - struct rate *rate = audio_calloc(__func__, 1, sizeof(*rate)); - - if (!rate) { - dolog ("Could not allocate resampler (%zu bytes)\n", sizeof (*rate)); - return NULL; - } + struct rate *rate = g_new0(struct rate, 1); rate->opos = 0; @@ -445,6 +440,86 @@ void st_rate_stop (void *opaque) g_free (opaque); } +/** + * st_rate_frames_out() - returns the number of frames the resampling code + * generates from frames_in frames + * + * @opaque: pointer to struct rate + * @frames_in: number of frames + * + * When upsampling, there may be more than one correct result. In this case, + * the function returns the maximum number of output frames the resampling + * code can generate. + */ +uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in) +{ + struct rate *rate = opaque; + uint64_t opos_end, opos_delta; + uint32_t ipos_end; + uint32_t frames_out; + + if (rate->opos_inc == 1ULL << 32) { + return frames_in; + } + + /* no output frame without at least one input frame */ + if (!frames_in) { + return 0; + } + + /* last frame read was at rate->ipos - 1 */ + ipos_end = rate->ipos - 1 + frames_in; + opos_end = (uint64_t)ipos_end << 32; + + /* last frame written was at rate->opos - rate->opos_inc */ + if (opos_end + rate->opos_inc <= rate->opos) { + return 0; + } + opos_delta = opos_end - rate->opos + rate->opos_inc; + frames_out = opos_delta / rate->opos_inc; + + return opos_delta % rate->opos_inc ? frames_out : frames_out - 1; +} + +/** + * st_rate_frames_in() - returns the number of frames needed to + * get frames_out frames after resampling + * + * @opaque: pointer to struct rate + * @frames_out: number of frames + * + * When downsampling, there may be more than one correct result. In this + * case, the function returns the maximum number of input frames needed. + */ +uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out) +{ + struct rate *rate = opaque; + uint64_t opos_start, opos_end; + uint32_t ipos_start, ipos_end; + + if (rate->opos_inc == 1ULL << 32) { + return frames_out; + } + + if (frames_out) { + opos_start = rate->opos; + ipos_start = rate->ipos; + } else { + uint64_t offset; + + /* add offset = ceil(opos_inc) to opos and ipos to avoid an underflow */ + offset = (rate->opos_inc + (1ULL << 32) - 1) & ~((1ULL << 32) - 1); + opos_start = rate->opos + offset; + ipos_start = rate->ipos + (offset >> 32); + } + /* last frame written was at opos_start - rate->opos_inc */ + opos_end = opos_start - rate->opos_inc + rate->opos_inc * frames_out; + ipos_end = (opos_end >> 32) + 1; + + /* last frame read was at ipos_start - 1 */ + return ipos_end + 1 > ipos_start ? ipos_end + 1 - ipos_start : 0; +} + void mixeng_clear (struct st_sample *buf, int len) { memset (buf, 0, len * sizeof (struct st_sample)); diff --git a/audio/mixeng.h b/audio/mixeng.h index 2dcd6df245..f9de7cffeb 100644 --- a/audio/mixeng.h +++ b/audio/mixeng.h @@ -52,6 +52,8 @@ void st_rate_flow(void *opaque, st_sample *ibuf, st_sample *obuf, void st_rate_flow_mix(void *opaque, st_sample *ibuf, st_sample *obuf, size_t *isamp, size_t *osamp); void st_rate_stop (void *opaque); +uint32_t st_rate_frames_out(void *opaque, uint32_t frames_in); +uint32_t st_rate_frames_in(void *opaque, uint32_t frames_out); void mixeng_clear (struct st_sample *buf, int len); void mixeng_volume (struct st_sample *buf, int len, struct mixeng_volume *vol); diff --git a/audio/rate_template.h b/audio/rate_template.h index b432719ebb..6648f0d2e5 100644 --- a/audio/rate_template.h +++ b/audio/rate_template.h @@ -40,8 +40,6 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, int64_t t; #endif - ilast = rate->ilast; - istart = ibuf; iend = ibuf + *isamp; @@ -59,15 +57,17 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, return; } - while (obuf < oend) { + /* without input samples, there's nothing to do */ + if (ibuf >= iend) { + *osamp = 0; + return; + } - /* Safety catch to make sure we have input samples. */ - if (ibuf >= iend) { - break; - } + ilast = rate->ilast; - /* read as many input samples so that ipos > opos */ + while (true) { + /* read as many input samples so that ipos > opos */ while (rate->ipos <= (rate->opos >> 32)) { ilast = *ibuf++; rate->ipos++; @@ -78,6 +78,11 @@ void NAME (void *opaque, struct st_sample *ibuf, struct st_sample *obuf, } } + /* make sure that the next output sample can be written */ + if (obuf >= oend) { + break; + } + icur = *ibuf; /* wrap ipos and opos around long before they overflow */ diff --git a/backends/vhost-user.c b/backends/vhost-user.c index 0596223ac4..94c6a82d52 100644 --- a/backends/vhost-user.c +++ b/backends/vhost-user.c @@ -20,12 +20,6 @@ #include "io/channel-command.h" #include "hw/virtio/virtio-bus.h" -static bool -ioeventfd_enabled(void) -{ - return kvm_enabled() && kvm_eventfds_enabled(); -} - int vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev, unsigned nvqs, Error **errp) @@ -34,11 +28,6 @@ vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev, assert(!b->vdev && vdev); - if (!ioeventfd_enabled()) { - error_setg(errp, "vhost initialization failed: requires kvm"); - return -1; - } - if (!vhost_user_init(&b->vhost_user, &b->chr, errp)) { return -1; } diff --git a/chardev/char-socket.c b/chardev/char-socket.c index c2265436ac..8c58532171 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -1065,6 +1065,7 @@ static void char_socket_finalize(Object *obj) qio_net_listener_set_client_func_full(s->listener, NULL, NULL, NULL, chr->gcontext); object_unref(OBJECT(s->listener)); + s->listener = NULL; } if (s->tls_creds) { object_unref(OBJECT(s->tls_creds)); diff --git a/configs/targets/aarch64-linux-user.mak b/configs/targets/aarch64-linux-user.mak index db552f1839..ba8bc5fe3f 100644 --- a/configs/targets/aarch64-linux-user.mak +++ b/configs/targets/aarch64-linux-user.mak @@ -1,6 +1,6 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml TARGET_HAS_BFLT=y CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/aarch64-softmmu.mak b/configs/targets/aarch64-softmmu.mak index d489e6da83..b4338e9568 100644 --- a/configs/targets/aarch64-softmmu.mak +++ b/configs/targets/aarch64-softmmu.mak @@ -1,5 +1,5 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_SUPPORTS_MTTCG=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml gdb-xml/aarch64-pauth.xml TARGET_NEED_FDT=y diff --git a/configs/targets/aarch64_be-linux-user.mak b/configs/targets/aarch64_be-linux-user.mak index dc78044fb1..acb5620cdb 100644 --- a/configs/targets/aarch64_be-linux-user.mak +++ b/configs/targets/aarch64_be-linux-user.mak @@ -1,7 +1,7 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_BIG_ENDIAN=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/aarch64-pauth.xml TARGET_HAS_BFLT=y CONFIG_SEMIHOSTING=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configure b/configure index 50a0b80b27..219ff13748 100755 --- a/configure +++ b/configure @@ -1230,7 +1230,10 @@ add_to warn_flags -Wendif-labels add_to warn_flags -Wexpansion-to-defined add_to warn_flags -Wimplicit-fallthrough=2 add_to warn_flags -Wmissing-format-attribute -add_to warn_flags -Wthread-safety + +if test "$targetos" != "darwin"; then + add_to warn_flags -Wthread-safety +fi nowarn_flags= add_to nowarn_flags -Wno-initializer-overrides diff --git a/disas/riscv.c b/disas/riscv.c index ddda687c13..54455aaaa8 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -1645,7 +1645,7 @@ const rv_opcode_data opcode_data[] = { { "max", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "maxu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "ctzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, { "cpopw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, { "slli.uw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, { "add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst index 561c416574..f3f451b77f 100644 --- a/docs/devel/tcg-ops.rst +++ b/docs/devel/tcg-ops.rst @@ -951,10 +951,6 @@ Recommended coding rules for best performance often modified, e.g. the integer registers and the condition codes. TCG will be able to use host registers to store them. -- Free temporaries when they are no longer used (``tcg_temp_free``). - Since ``tcg_const_x`` also creates a temporary, you should free it - after it is used. - - Don't hesitate to use helpers for complicated or seldom used guest instructions. There is little performance advantage in using TCG to implement guest instructions taking more than about twenty TCG diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst index 3f18ab424e..8a5924ea75 100644 --- a/docs/interop/vhost-user.rst +++ b/docs/interop/vhost-user.rst @@ -315,7 +315,7 @@ in the ancillary data: * ``VHOST_USER_SET_VRING_KICK`` * ``VHOST_USER_SET_VRING_CALL`` * ``VHOST_USER_SET_VRING_ERR`` -* ``VHOST_USER_SET_SLAVE_REQ_FD`` +* ``VHOST_USER_SET_BACKEND_REQ_FD`` (previous name ``VHOST_USER_SET_SLAVE_REQ_FD``) * ``VHOST_USER_SET_INFLIGHT_FD`` (if ``VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD``) If *front-end* is unable to send the full message or receives a wrong @@ -516,7 +516,7 @@ expected to reply with a zero payload, non-zero otherwise. The back-end relies on the back-end communication channel (see :ref:`Back-end communication <backend_communication>` section below) to send IOTLB miss -and access failure events, by sending ``VHOST_USER_SLAVE_IOTLB_MSG`` +and access failure events, by sending ``VHOST_USER_BACKEND_IOTLB_MSG`` requests to the front-end with a ``struct vhost_iotlb_msg`` as payload. For miss events, the iotlb payload has to be filled with the miss message type (1), the I/O virtual address and the permissions @@ -540,15 +540,15 @@ Back-end communication ---------------------- An optional communication channel is provided if the back-end declares -``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` protocol feature, to allow the +``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` protocol feature, to allow the back-end to make requests to the front-end. -The fd is provided via ``VHOST_USER_SET_SLAVE_REQ_FD`` ancillary data. +The fd is provided via ``VHOST_USER_SET_BACKEND_REQ_FD`` ancillary data. -A back-end may then send ``VHOST_USER_SLAVE_*`` messages to the front-end +A back-end may then send ``VHOST_USER_BACKEND_*`` messages to the front-end using this fd communication channel. -If ``VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD`` protocol feature is +If ``VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD`` protocol feature is negotiated, back-end can send file descriptors (at most 8 descriptors in each message) to front-end via ancillary data using this fd communication channel. @@ -835,7 +835,7 @@ Note that due to the fact that too many messages on the sockets can cause the sending application(s) to block, it is not advised to use this feature unless absolutely necessary. It is also considered an error to negotiate this feature without also negotiating -``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` and ``VHOST_USER_PROTOCOL_F_REPLY_ACK``, +``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` and ``VHOST_USER_PROTOCOL_F_REPLY_ACK``, the former is necessary for getting a message channel from the back-end to the front-end, while the latter needs to be used with the in-band notification messages to block until they are processed, both to avoid @@ -855,12 +855,12 @@ Protocol features #define VHOST_USER_PROTOCOL_F_RARP 2 #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3 #define VHOST_USER_PROTOCOL_F_MTU 4 - #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5 + #define VHOST_USER_PROTOCOL_F_BACKEND_REQ 5 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6 #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7 #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8 #define VHOST_USER_PROTOCOL_F_CONFIG 9 - #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10 + #define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11 #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12 #define VHOST_USER_PROTOCOL_F_RESET_DEVICE 13 @@ -1059,8 +1059,8 @@ Front-end message types in the ancillary data. This signals that polling will be used instead of waiting for the call. Note that if the protocol features ``VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS`` and - ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` have been negotiated this message - isn't necessary as the ``VHOST_USER_SLAVE_VRING_CALL`` message can be + ``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` have been negotiated this message + isn't necessary as the ``VHOST_USER_BACKEND_VRING_CALL`` message can be used, it may however still be used to set an event file descriptor or to enable polling. @@ -1077,8 +1077,8 @@ Front-end message types invalid FD flag. This flag is set when there is no file descriptor in the ancillary data. Note that if the protocol features ``VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS`` and - ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` have been negotiated this message - isn't necessary as the ``VHOST_USER_SLAVE_VRING_ERR`` message can be + ``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` have been negotiated this message + isn't necessary as the ``VHOST_USER_BACKEND_VRING_ERR`` message can be used, it may however still be used to set an event file descriptor (which will be preferred over the message). @@ -1139,7 +1139,7 @@ Front-end message types respond with zero in case the specified MTU is valid, or non-zero otherwise. -``VHOST_USER_SET_SLAVE_REQ_FD`` +``VHOST_USER_SET_BACKEND_REQ_FD`` (previous name ``VHOST_USER_SET_SLAVE_REQ_FD``) :id: 21 :equivalent ioctl: N/A :request payload: N/A @@ -1150,7 +1150,7 @@ Front-end message types This request should be sent only when ``VHOST_USER_F_PROTOCOL_FEATURES`` has been negotiated, and protocol - feature bit ``VHOST_USER_PROTOCOL_F_SLAVE_REQ`` bit is present in + feature bit ``VHOST_USER_PROTOCOL_F_BACKEND_REQ`` bit is present in ``VHOST_USER_GET_PROTOCOL_FEATURES``. If ``VHOST_USER_PROTOCOL_F_REPLY_ACK`` is negotiated, the back-end must respond with zero for success, non-zero otherwise. @@ -1429,7 +1429,7 @@ Back-end message types For this type of message, the request is sent by the back-end and the reply is sent by the front-end. -``VHOST_USER_SLAVE_IOTLB_MSG`` +``VHOST_USER_BACKEND_IOTLB_MSG`` (previous name ``VHOST_USER_SLAVE_IOTLB_MSG``) :id: 1 :equivalent ioctl: N/A (equivalent to ``VHOST_IOTLB_MSG`` message type) :request payload: ``struct vhost_iotlb_msg`` @@ -1444,7 +1444,7 @@ is sent by the front-end. ``VIRTIO_F_IOMMU_PLATFORM`` feature has been successfully negotiated. -``VHOST_USER_SLAVE_CONFIG_CHANGE_MSG`` +``VHOST_USER_BACKEND_CONFIG_CHANGE_MSG`` (previous name ``VHOST_USER_SLAVE_CONFIG_CHANGE_MSG``) :id: 2 :equivalent ioctl: N/A :request payload: N/A @@ -1459,7 +1459,7 @@ is sent by the front-end. ``VHOST_USER_NEED_REPLY`` flag, the front-end must respond with zero when operation is successfully completed, or non-zero otherwise. -``VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG`` +``VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG`` (previous name ``VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG``) :id: 3 :equivalent ioctl: N/A :request payload: vring area description @@ -1482,7 +1482,7 @@ is sent by the front-end. ``VHOST_USER_PROTOCOL_F_HOST_NOTIFIER`` protocol feature has been successfully negotiated. -``VHOST_USER_SLAVE_VRING_CALL`` +``VHOST_USER_BACKEND_VRING_CALL`` (previous name ``VHOST_USER_SLAVE_VRING_CALL``) :id: 4 :equivalent ioctl: N/A :request payload: vring state description @@ -1496,7 +1496,7 @@ is sent by the front-end. The state.num field is currently reserved and must be set to 0. -``VHOST_USER_SLAVE_VRING_ERR`` +``VHOST_USER_BACKEND_VRING_ERR`` (previous name ``VHOST_USER_SLAVE_VRING_ERR``) :id: 5 :equivalent ioctl: N/A :request payload: vring state description diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst index 6c5b05128e..d4e293e7f9 100644 --- a/docs/system/arm/aspeed.rst +++ b/docs/system/arm/aspeed.rst @@ -24,6 +24,8 @@ AST2500 SoC based machines : - ``sonorapass-bmc`` OCP SonoraPass BMC - ``fp5280g2-bmc`` Inspur FP5280G2 BMC - ``g220a-bmc`` Bytedance G220A BMC +- ``yosemitev2-bmc`` Facebook YosemiteV2 BMC +- ``tiogapass-bmc`` Facebook Tiogapass BMC AST2600 SoC based machines : diff --git a/docs/system/loongarch/loongson3.rst b/docs/system/loongarch/virt.rst index 489ea20f8f..c37268b404 100644 --- a/docs/system/loongarch/loongson3.rst +++ b/docs/system/loongarch/virt.rst @@ -19,14 +19,14 @@ The ``virt`` machine supports: - Fw_cfg device - PCI/PCIe devices - Memory device -- CPU device. Type: la464-loongarch-cpu. +- CPU device. Type: la464. CPU and machine Type -------------------- The ``qemu-system-loongarch64`` provides emulation for virt machine. You can specify the machine type ``virt`` and -cpu type ``la464-loongarch-cpu``. +cpu type ``la464``. Boot options ------------ @@ -35,95 +35,74 @@ We can boot the LoongArch virt machine by specifying the uefi bios, initrd, and linux kernel. And those source codes and binary files can be accessed by following steps. -(1) booting command: +(1) Build qemu-system-loongarch64: .. code-block:: bash - $ qemu-system-loongarch64 -machine virt -m 4G -cpu la464-loongarch-cpu \ - -smp 1 -bios QEMU_EFI.fd -kernel vmlinuz.efi -initrd initrd.img \ - -append "root=/dev/ram rdinit=/sbin/init console=ttyS0,115200" \ - --nographic - -Note: The running speed may be a little slow, as the performance of our -qemu and uefi bios is not perfect, and it is being fixed. - -(2) cross compiler tools: - -.. code-block:: bash - - wget https://github.com/loongson/build-tools/releases/download/ \ - 2022.05.29/loongarch64-clfs-5.0-cross-tools-gcc-full.tar.xz - - tar -vxf loongarch64-clfs-5.0-cross-tools-gcc-full.tar.xz - -(3) qemu compile configure option: - -.. code-block:: bash - - ./configure --disable-rdma --disable-pvrdma --prefix=usr \ + ./configure --disable-rdma --disable-pvrdma --prefix=/usr \ --target-list="loongarch64-softmmu" \ --disable-libiscsi --disable-libnfs --disable-libpmem \ --disable-glusterfs --enable-libusb --enable-usb-redir \ --disable-opengl --disable-xen --enable-spice \ --enable-debug --disable-capstone --disable-kvm \ --enable-profiler - make + make -j8 -(4) uefi bios source code and compile method: +(2) Set cross tools: .. code-block:: bash - git clone https://github.com/loongson/edk2-LoongarchVirt.git - - cd edk2-LoongarchVirt - - git submodule update --init - - export PATH=$YOUR_COMPILER_PATH/bin:$PATH - - export WORKSPACE=`pwd` + wget https://github.com/loongson/build-tools/releases/download/2022.09.06/loongarch64-clfs-6.3-cross-tools-gcc-glibc.tar.xz - export PACKAGES_PATH=$WORKSPACE/edk2-LoongarchVirt + tar -vxf loongarch64-clfs-6.3-cross-tools-gcc-glibc.tar.xz -C /opt - export GCC5_LOONGARCH64_PREFIX=loongarch64-unknown-linux-gnu- + export PATH=/opt/cross-tools/bin:$PATH + export LD_LIBRARY_PATH=/opt/cross-tools/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=/opt/cross-tools/loongarch64-unknown-linux-gnu/lib/:$LD_LIBRARY_PATH - edk2-LoongarchVirt/edksetup.sh +Note: You need get the latest cross-tools at https://github.com/loongson/build-tools - make -C edk2-LoongarchVirt/BaseTools +(3) Build BIOS: - build --buildtarget=DEBUG --tagname=GCC5 --arch=LOONGARCH64 --platform=OvmfPkg/LoongArchQemu/Loongson.dsc + See: https://github.com/tianocore/edk2-platforms/tree/master/Platform/Loongson/LoongArchQemuPkg#readme - build --buildtarget=RELEASE --tagname=GCC5 --arch=LOONGARCH64 --platform=OvmfPkg/LoongArchQemu/Loongson.dsc +Note: To build the release version of the bios, set --buildtarget=RELEASE, + the bios file path: Build/LoongArchQemu/RELEASE_GCC5/FV/QEMU_EFI.fd -The efi binary file path: - - Build/LoongArchQemu/DEBUG_GCC5/FV/QEMU_EFI.fd - - Build/LoongArchQemu/RELEASE_GCC5/FV/QEMU_EFI.fd - -(5) linux kernel source code and compile method: +(4) Build kernel: .. code-block:: bash git clone https://github.com/loongson/linux.git - export PATH=$YOUR_COMPILER_PATH/bin:$PATH - - export LD_LIBRARY_PATH=$YOUR_COMPILER_PATH/lib:$LD_LIBRARY_PATH + cd linux - export LD_LIBRARY_PATH=$YOUR_COMPILER_PATH/loongarch64-unknown-linux-gnu/lib/:$LD_LIBRARY_PATH + git checkout loongarch-next make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- loongson3_defconfig - make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- - - make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- install - - make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- modules_install + make ARCH=loongarch CROSS_COMPILE=loongarch64-unknown-linux-gnu- -j32 Note: The branch of linux source code is loongarch-next. + the kernel file: arch/loongarch/boot/vmlinuz.efi -(6) initrd file: +(5) Get initrd: You can use busybox tool and the linux modules to make a initrd file. Or you can access the binary files: https://github.com/yangxiaojuan-loongson/qemu-binary + +.. code-block:: bash + + git clone https://github.com/yangxiaojuan-loongson/qemu-binary + +Note: the initrd file is ramdisk + +(6) Booting LoongArch: + +.. code-block:: bash + + $ ./build/qemu-system-loongarch64 -machine virt -m 4G -cpu la464 \ + -smp 1 -bios QEMU_EFI.fd -kernel vmlinuz.efi -initrd ramdisk \ + -serial stdio -monitor telnet:localhost:4495,server,nowait \ + -append "root=/dev/ram rdinit=/sbin/init console=ttyS0,115200" \ + --nographic diff --git a/gdb-xml/aarch64-pauth.xml b/gdb-xml/aarch64-pauth.xml new file mode 100644 index 0000000000..24af5f903c --- /dev/null +++ b/gdb-xml/aarch64-pauth.xml @@ -0,0 +1,15 @@ +<?xml version="1.0"?> +<!-- Copyright (C) 2018-2022 Free Software Foundation, Inc. + + Copying and distribution of this file, with or without modification, + are permitted in any medium without royalty provided the copyright + notice and this notice are preserved. --> + +<!DOCTYPE feature SYSTEM "gdb-target.dtd"> +<feature name="org.gnu.gdb.aarch64.pauth"> + <reg name="pauth_dmask" bitsize="64"/> + <reg name="pauth_cmask" bitsize="64"/> + <reg name="pauth_dmask_high" bitsize="64"/> + <reg name="pauth_cmask_high" bitsize="64"/> +</feature> + diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c index bfce3c8d92..69d0ad6f50 100644 --- a/hw/arm/allwinner-h3.c +++ b/hw/arm/allwinner-h3.c @@ -54,6 +54,8 @@ const hwaddr allwinner_h3_memmap[] = { [AW_H3_DEV_UART2] = 0x01c28800, [AW_H3_DEV_UART3] = 0x01c28c00, [AW_H3_DEV_TWI0] = 0x01c2ac00, + [AW_H3_DEV_TWI1] = 0x01c2b000, + [AW_H3_DEV_TWI2] = 0x01c2b400, [AW_H3_DEV_EMAC] = 0x01c30000, [AW_H3_DEV_DRAMCOM] = 0x01c62000, [AW_H3_DEV_DRAMCTL] = 0x01c63000, @@ -64,6 +66,7 @@ const hwaddr allwinner_h3_memmap[] = { [AW_H3_DEV_GIC_VCPU] = 0x01c86000, [AW_H3_DEV_RTC] = 0x01f00000, [AW_H3_DEV_CPUCFG] = 0x01f01c00, + [AW_H3_DEV_R_TWI] = 0x01f02400, [AW_H3_DEV_SDRAM] = 0x40000000 }; @@ -107,8 +110,6 @@ struct AwH3Unimplemented { { "uart1", 0x01c28400, 1 * KiB }, { "uart2", 0x01c28800, 1 * KiB }, { "uart3", 0x01c28c00, 1 * KiB }, - { "twi1", 0x01c2b000, 1 * KiB }, - { "twi2", 0x01c2b400, 1 * KiB }, { "scr", 0x01c2c400, 1 * KiB }, { "gpu", 0x01c40000, 64 * KiB }, { "hstmr", 0x01c60000, 4 * KiB }, @@ -123,7 +124,6 @@ struct AwH3Unimplemented { { "r_prcm", 0x01f01400, 1 * KiB }, { "r_twd", 0x01f01800, 1 * KiB }, { "r_cir-rx", 0x01f02000, 1 * KiB }, - { "r_twi", 0x01f02400, 1 * KiB }, { "r_uart", 0x01f02800, 1 * KiB }, { "r_pio", 0x01f02c00, 1 * KiB }, { "r_pwm", 0x01f03800, 1 * KiB }, @@ -151,8 +151,11 @@ enum { AW_H3_GIC_SPI_UART2 = 2, AW_H3_GIC_SPI_UART3 = 3, AW_H3_GIC_SPI_TWI0 = 6, + AW_H3_GIC_SPI_TWI1 = 7, + AW_H3_GIC_SPI_TWI2 = 8, AW_H3_GIC_SPI_TIMER0 = 18, AW_H3_GIC_SPI_TIMER1 = 19, + AW_H3_GIC_SPI_R_TWI = 44, AW_H3_GIC_SPI_MMC0 = 60, AW_H3_GIC_SPI_EHCI0 = 72, AW_H3_GIC_SPI_OHCI0 = 73, @@ -227,7 +230,10 @@ static void allwinner_h3_init(Object *obj) object_initialize_child(obj, "rtc", &s->rtc, TYPE_AW_RTC_SUN6I); - object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C); + object_initialize_child(obj, "twi0", &s->i2c0, TYPE_AW_I2C_SUN6I); + object_initialize_child(obj, "twi1", &s->i2c1, TYPE_AW_I2C_SUN6I); + object_initialize_child(obj, "twi2", &s->i2c2, TYPE_AW_I2C_SUN6I); + object_initialize_child(obj, "r_twi", &s->r_twi, TYPE_AW_I2C_SUN6I); } static void allwinner_h3_realize(DeviceState *dev, Error **errp) @@ -432,6 +438,21 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c0), 0, qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI0)); + sysbus_realize(SYS_BUS_DEVICE(&s->i2c1), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c1), 0, s->memmap[AW_H3_DEV_TWI1]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c1), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI1)); + + sysbus_realize(SYS_BUS_DEVICE(&s->i2c2), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c2), 0, s->memmap[AW_H3_DEV_TWI2]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c2), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_TWI2)); + + sysbus_realize(SYS_BUS_DEVICE(&s->r_twi), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->r_twi), 0, s->memmap[AW_H3_DEV_R_TWI]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->r_twi), 0, + qdev_get_gpio_in(DEVICE(&s->gic), AW_H3_GIC_SPI_R_TWI)); + /* Unimplemented devices */ for (i = 0; i < ARRAY_SIZE(unimplemented); i++) { create_unimplemented_device(unimplemented[i].device_name, diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index 27dda58338..86601cb1a5 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -241,12 +241,9 @@ static void aspeed_reset_secondary(ARMCPU *cpu, cpu_set_pc(cs, info->smp_loader_start); } -#define FIRMWARE_ADDR 0x0 - -static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size, +static void write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size, Error **errp) { - BlockBackend *blk = blk_by_legacy_dinfo(dinfo); g_autofree void *storage = NULL; int64_t size; @@ -272,6 +269,22 @@ static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size, rom_add_blob_fixed("aspeed.boot_rom", storage, rom_size, addr); } +/* + * Create a ROM and copy the flash contents at the expected address + * (0x0). Boots faster than execute-in-place. + */ +static void aspeed_install_boot_rom(AspeedSoCState *soc, BlockBackend *blk, + uint64_t rom_size) +{ + MemoryRegion *boot_rom = g_new(MemoryRegion, 1); + + memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", rom_size, + &error_abort); + memory_region_add_subregion_overlap(&soc->spi_boot_container, 0, + boot_rom, 1); + write_boot_rom(blk, ASPEED_SOC_SPI_BOOT_ADDR, rom_size, &error_abort); +} + void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, unsigned int count, int unit0) { @@ -293,7 +306,7 @@ void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, qdev_realize_and_unref(dev, BUS(s->spi), &error_fatal); cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); - sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line); + qdev_connect_gpio_out_named(DEVICE(s), "cs", i, cs_line); } } @@ -332,7 +345,6 @@ static void aspeed_machine_init(MachineState *machine) AspeedMachineState *bmc = ASPEED_MACHINE(machine); AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(machine); AspeedSoCClass *sc; - DriveInfo *drive0 = drive_get(IF_MTD, 0, 0); int i; NICInfo *nd = &nd_table[0]; @@ -382,32 +394,6 @@ static void aspeed_machine_init(MachineState *machine) bmc->spi_model ? bmc->spi_model : amc->spi_model, 1, amc->num_cs); - /* Install first FMC flash content as a boot rom. */ - if (drive0) { - AspeedSMCFlash *fl = &bmc->soc.fmc.flashes[0]; - MemoryRegion *boot_rom = g_new(MemoryRegion, 1); - uint64_t size = memory_region_size(&fl->mmio); - - /* - * create a ROM region using the default mapping window size of - * the flash module. The window size is 64MB for the AST2400 - * SoC and 128MB for the AST2500 SoC, which is twice as big as - * needed by the flash modules of the Aspeed machines. - */ - if (ASPEED_MACHINE(machine)->mmio_exec) { - memory_region_init_alias(boot_rom, NULL, "aspeed.boot_rom", - &fl->mmio, 0, size); - memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, - boot_rom); - } else { - memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", - size, &error_abort); - memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, - boot_rom); - write_boot_rom(drive0, FIRMWARE_ADDR, size, &error_abort); - } - } - if (machine->kernel_filename && sc->num_cpus > 1) { /* With no u-boot we must set up a boot stub for the secondary CPU */ MemoryRegion *smpboot = g_new(MemoryRegion, 1); @@ -438,6 +424,16 @@ static void aspeed_machine_init(MachineState *machine) drive_get(IF_SD, 0, bmc->soc.sdhci.num_slots)); } + if (!bmc->mmio_exec) { + DriveInfo *mtd0 = drive_get(IF_MTD, 0, 0); + + if (mtd0) { + uint64_t rom_size = memory_region_size(&bmc->soc.spi_boot); + aspeed_install_boot_rom(&bmc->soc, blk_by_legacy_dinfo(mtd0), + rom_size); + } + } + arm_load_kernel(ARM_CPU(first_cpu), machine, &aspeed_board_binfo); } @@ -521,6 +517,15 @@ static void ast2600_evb_i2c_init(AspeedMachineState *bmc) TYPE_TMP105, 0x4d); } +static void yosemitev2_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x51, 128 * KiB); + at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 128 * KiB, + yosemitev2_bmc_fruid, yosemitev2_bmc_fruid_len); +} + static void romulus_bmc_i2c_init(AspeedMachineState *bmc) { AspeedSoCState *soc = &bmc->soc; @@ -530,6 +535,15 @@ static void romulus_bmc_i2c_init(AspeedMachineState *bmc) i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), "ds1338", 0x32); } +static void tiogapass_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + + at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 4), 0x54, 128 * KiB); + at24c_eeprom_init_rom(aspeed_i2c_get_bus(&soc->i2c, 6), 0x54, 128 * KiB, + tiogapass_bmc_fruid, tiogapass_bmc_fruid_len); +} + static void create_pca9552(AspeedSoCState *soc, int bus_id, int addr) { i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, bus_id), @@ -840,42 +854,46 @@ static void fuji_bmc_i2c_init(AspeedMachineState *bmc) i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4c); i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4d); - at24c_eeprom_init(i2c[19], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[20], 0x50, 2 * KiB); - at24c_eeprom_init(i2c[22], 0x52, 2 * KiB); + /* + * EEPROM 24c64 size is 64Kbits or 8 Kbytes + * 24c02 size is 2Kbits or 256 bytes + */ + at24c_eeprom_init(i2c[19], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[20], 0x50, 256); + at24c_eeprom_init(i2c[22], 0x52, 256); i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x48); i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x49); i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x4a); i2c_slave_create_simple(i2c[3], TYPE_TMP422, 0x4c); - at24c_eeprom_init(i2c[8], 0x51, 64 * KiB); + at24c_eeprom_init(i2c[8], 0x51, 8 * KiB); i2c_slave_create_simple(i2c[8], TYPE_LM75, 0x4a); i2c_slave_create_simple(i2c[50], TYPE_LM75, 0x4c); - at24c_eeprom_init(i2c[50], 0x52, 64 * KiB); + at24c_eeprom_init(i2c[50], 0x52, 8 * KiB); i2c_slave_create_simple(i2c[51], TYPE_TMP75, 0x48); i2c_slave_create_simple(i2c[52], TYPE_TMP75, 0x49); i2c_slave_create_simple(i2c[59], TYPE_TMP75, 0x48); i2c_slave_create_simple(i2c[60], TYPE_TMP75, 0x49); - at24c_eeprom_init(i2c[65], 0x53, 64 * KiB); + at24c_eeprom_init(i2c[65], 0x53, 8 * KiB); i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x49); i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x48); - at24c_eeprom_init(i2c[68], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[69], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[70], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[71], 0x52, 64 * KiB); + at24c_eeprom_init(i2c[68], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[69], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[70], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[71], 0x52, 8 * KiB); - at24c_eeprom_init(i2c[73], 0x53, 64 * KiB); + at24c_eeprom_init(i2c[73], 0x53, 8 * KiB); i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x49); i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x48); - at24c_eeprom_init(i2c[76], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[77], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[78], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[79], 0x52, 64 * KiB); - at24c_eeprom_init(i2c[28], 0x50, 2 * KiB); + at24c_eeprom_init(i2c[76], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[77], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[78], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[79], 0x52, 8 * KiB); + at24c_eeprom_init(i2c[28], 0x50, 256); for (int i = 0; i < 8; i++) { at24c_eeprom_init(i2c[81 + i * 8], 0x56, 64 * KiB); @@ -1174,6 +1192,24 @@ static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data) aspeed_soc_num_cpus(amc->soc_name); }; +static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook YosemiteV2 BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = AST2500_EVB_HW_STRAP1; + amc->hw_strap2 = 0; + amc->fmc_model = "n25q256a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 2; + amc->i2c_init = yosemitev2_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1191,6 +1227,25 @@ static void aspeed_machine_romulus_class_init(ObjectClass *oc, void *data) aspeed_soc_num_cpus(amc->soc_name); }; +static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Facebook Tiogapass BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = AST2500_EVB_HW_STRAP1; + amc->hw_strap2 = 0; + amc->fmc_model = "n25q256a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 2; + amc->i2c_init = tiogapass_bmc_i2c_init; + mc->default_ram_size = 1 * GiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); + aspeed_soc_num_cpus(amc->soc_name); +}; + static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1563,10 +1618,18 @@ static const TypeInfo aspeed_machine_types[] = { .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_ast2600_evb_class_init, }, { + .name = MACHINE_TYPE_NAME("yosemitev2-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_yosemitev2_class_init, + }, { .name = MACHINE_TYPE_NAME("tacoma-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_tacoma_class_init, }, { + .name = MACHINE_TYPE_NAME("tiogapass-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_tiogapass_class_init, + }, { .name = MACHINE_TYPE_NAME("g220a-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_g220a_class_init, diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index bb2769df04..1bf1246148 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -21,6 +21,7 @@ #define ASPEED_SOC_DPMCU_SIZE 0x00040000 static const hwaddr aspeed_soc_ast2600_memmap[] = { + [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, [ASPEED_DEV_SRAM] = 0x10000000, [ASPEED_DEV_DPMCU] = 0x18000000, /* 0x16000000 0x17FFFFFF : AHB BUS do LPC Bus bridge */ @@ -282,6 +283,12 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) qemu_irq irq; g_autofree char *sram_name = NULL; + /* Default boot region (SPI memory or ROMs) */ + memory_region_init(&s->spi_boot_container, OBJECT(s), + "aspeed.spi_boot_container", 0x10000000); + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SPI_BOOT], + &s->spi_boot_container); + /* IO space */ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", sc->memmap[ASPEED_DEV_IOMEM], @@ -431,6 +438,12 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + /* Set up an alias on the FMC CE0 region (boot default) */ + MemoryRegion *fmc0_mmio = &s->fmc.flashes[0].mmio; + memory_region_init_alias(&s->spi_boot, OBJECT(s), "aspeed.spi_boot", + fmc0_mmio, 0, memory_region_size(fmc0_mmio)); + memory_region_add_subregion(&s->spi_boot_container, 0x0, &s->spi_boot); + /* SPI */ for (i = 0; i < sc->spis_num; i++) { object_property_set_link(OBJECT(&s->spi[i]), "dram", diff --git a/hw/arm/aspeed_eeprom.c b/hw/arm/aspeed_eeprom.c index 04463acc9d..2fb2d5dbb7 100644 --- a/hw/arm/aspeed_eeprom.c +++ b/hw/arm/aspeed_eeprom.c @@ -6,6 +6,27 @@ #include "aspeed_eeprom.h" +/* Tiogapass BMC FRU */ +const uint8_t tiogapass_bmc_fruid[] = { + 0x01, 0x00, 0x00, 0x01, 0x0d, 0x00, 0x00, 0xf1, 0x01, 0x0c, 0x00, 0x36, + 0xe6, 0xd0, 0xc6, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x42, 0x4d, + 0x43, 0x20, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x4d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0xcd, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, + 0x30, 0xc9, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc1, 0x39, 0x01, 0x0c, 0x00, 0xc6, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x54, 0x69, 0x6f, 0x67, 0x61, + 0x20, 0x50, 0x61, 0x73, 0x73, 0x20, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, + 0x32, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0xc4, 0x58, 0x58, 0x58, 0x32, 0xcd, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc7, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, 0x30, 0xc9, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc8, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x20, 0x41, 0xc1, 0x45, +}; + const uint8_t fby35_nic_fruid[] = { 0x01, 0x00, 0x00, 0x01, 0x0f, 0x20, 0x00, 0xcf, 0x01, 0x0e, 0x19, 0xd7, 0x5e, 0xcf, 0xc8, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xdd, @@ -77,6 +98,30 @@ const uint8_t fby35_bmc_fruid[] = { 0x6e, 0x66, 0x69, 0x67, 0x20, 0x41, 0xc1, 0x45, }; +/* Yosemite V2 BMC FRU */ +const uint8_t yosemitev2_bmc_fruid[] = { + 0x01, 0x00, 0x00, 0x01, 0x0d, 0x00, 0x00, 0xf1, 0x01, 0x0c, 0x00, 0x36, + 0xe6, 0xd0, 0xc6, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x42, 0x4d, + 0x43, 0x20, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x4d, 0x6f, + 0x64, 0x75, 0x6c, 0x65, 0xcd, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, + 0x30, 0xc9, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc1, 0x39, 0x01, 0x0c, 0x00, 0xc6, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xd2, 0x59, 0x6f, 0x73, 0x65, 0x6d, + 0x69, 0x74, 0x65, 0x20, 0x56, 0x32, 0x2e, 0x30, 0x20, 0x45, 0x56, 0x54, + 0x32, 0xce, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0xc4, 0x45, 0x56, 0x54, 0x32, 0xcd, 0x58, 0x58, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc7, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc3, 0x31, 0x2e, 0x30, 0xc9, + 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0x58, 0xc8, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x20, 0x41, 0xc1, 0x45, +}; + +const size_t tiogapass_bmc_fruid_len = sizeof(tiogapass_bmc_fruid); const size_t fby35_nic_fruid_len = sizeof(fby35_nic_fruid); const size_t fby35_bb_fruid_len = sizeof(fby35_bb_fruid); const size_t fby35_bmc_fruid_len = sizeof(fby35_bmc_fruid); + +const size_t yosemitev2_bmc_fruid_len = sizeof(yosemitev2_bmc_fruid); diff --git a/hw/arm/aspeed_eeprom.h b/hw/arm/aspeed_eeprom.h index a0f848fa6e..86db6f0479 100644 --- a/hw/arm/aspeed_eeprom.h +++ b/hw/arm/aspeed_eeprom.h @@ -9,6 +9,9 @@ #include "qemu/osdep.h" +extern const uint8_t tiogapass_bmc_fruid[]; +extern const size_t tiogapass_bmc_fruid_len; + extern const uint8_t fby35_nic_fruid[]; extern const uint8_t fby35_bb_fruid[]; extern const uint8_t fby35_bmc_fruid[]; @@ -16,4 +19,7 @@ extern const size_t fby35_nic_fruid_len; extern const size_t fby35_bb_fruid_len; extern const size_t fby35_bmc_fruid_len; +extern const uint8_t yosemitev2_bmc_fruid[]; +extern const size_t yosemitev2_bmc_fruid_len; + #endif diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c index e884d6badc..bf22258de9 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_soc.c @@ -25,6 +25,7 @@ #define ASPEED_SOC_IOMEM_SIZE 0x00200000 static const hwaddr aspeed_soc_ast2400_memmap[] = { + [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, [ASPEED_DEV_IOMEM] = 0x1E600000, [ASPEED_DEV_FMC] = 0x1E620000, [ASPEED_DEV_SPI1] = 0x1E630000, @@ -59,6 +60,7 @@ static const hwaddr aspeed_soc_ast2400_memmap[] = { }; static const hwaddr aspeed_soc_ast2500_memmap[] = { + [ASPEED_DEV_SPI_BOOT] = ASPEED_SOC_SPI_BOOT_ADDR, [ASPEED_DEV_IOMEM] = 0x1E600000, [ASPEED_DEV_FMC] = 0x1E620000, [ASPEED_DEV_SPI1] = 0x1E630000, @@ -245,6 +247,12 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) Error *err = NULL; g_autofree char *sram_name = NULL; + /* Default boot region (SPI memory or ROMs) */ + memory_region_init(&s->spi_boot_container, OBJECT(s), + "aspeed.spi_boot_container", 0x10000000); + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SPI_BOOT], + &s->spi_boot_container); + /* IO space */ aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", sc->memmap[ASPEED_DEV_IOMEM], @@ -354,6 +362,12 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + /* Set up an alias on the FMC CE0 region (boot default) */ + MemoryRegion *fmc0_mmio = &s->fmc.flashes[0].mmio; + memory_region_init_alias(&s->spi_boot, OBJECT(s), "aspeed.spi_boot", + fmc0_mmio, 0, memory_region_size(fmc0_mmio)); + memory_region_add_subregion(&s->spi_boot_container, 0x0, &s->spi_boot); + /* SPI */ for (i = 0; i < sc->spis_num; i++) { if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 1e021c4a34..50e5141116 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -926,6 +926,12 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base, return -1; } size = len; + + /* Unpack the image if it is a EFI zboot image */ + if (unpack_efi_zboot_image(&buffer, &size) < 0) { + g_free(buffer); + return -1; + } } /* check the arm64 magic header value -- very old kernels may not have it */ diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c index 90c04bbc33..f4600c290b 100644 --- a/hw/arm/fby35.c +++ b/hw/arm/fby35.c @@ -100,13 +100,7 @@ static void fby35_bmc_init(Fby35State *s) MemoryRegion *boot_rom = g_new(MemoryRegion, 1); uint64_t size = memory_region_size(&fl->mmio); - if (s->mmio_exec) { - memory_region_init_alias(boot_rom, NULL, "aspeed.boot_rom", - &fl->mmio, 0, size); - memory_region_add_subregion(&s->bmc_memory, FBY35_BMC_FIRMWARE_ADDR, - boot_rom); - } else { - + if (!s->mmio_exec) { memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", size, &error_abort); memory_region_add_subregion(&s->bmc_memory, FBY35_BMC_FIRMWARE_ADDR, diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 0a5a60ca1e..e7f1c1f219 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -467,20 +467,6 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid) return NULL; } -/* Unmap the whole notifier's range */ -static void smmu_unmap_notifier_range(IOMMUNotifier *n) -{ - IOMMUTLBEvent event; - - event.type = IOMMU_NOTIFIER_UNMAP; - event.entry.target_as = &address_space_memory; - event.entry.iova = n->start; - event.entry.perm = IOMMU_NONE; - event.entry.addr_mask = n->end - n->start; - - memory_region_notify_iommu_one(n, &event); -} - /* Unmap all notifiers attached to @mr */ static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr) { @@ -488,7 +474,7 @@ static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr) trace_smmu_inv_notifiers_mr(mr->parent_obj.name); IOMMU_NOTIFIER_FOREACH(n, mr) { - smmu_unmap_notifier_range(n); + memory_region_unmap_iommu_notifier_range(n); } } diff --git a/hw/core/loader.c b/hw/core/loader.c index 173f8f67f6..cd53235fed 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -857,6 +857,97 @@ ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz) return bytes; } +/* The PE/COFF MS-DOS stub magic number */ +#define EFI_PE_MSDOS_MAGIC "MZ" + +/* + * The Linux header magic number for a EFI PE/COFF + * image targetting an unspecified architecture. + */ +#define EFI_PE_LINUX_MAGIC "\xcd\x23\x82\x81" + +/* + * Bootable Linux kernel images may be packaged as EFI zboot images, which are + * self-decompressing executables when loaded via EFI. The compressed payload + * can also be extracted from the image and decompressed by a non-EFI loader. + * + * The de facto specification for this format is at the following URL: + * + * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/firmware/efi/libstub/zboot-header.S + * + * This definition is based on Linux upstream commit 29636a5ce87beba. + */ +struct linux_efi_zboot_header { + uint8_t msdos_magic[2]; /* PE/COFF 'MZ' magic number */ + uint8_t reserved0[2]; + uint8_t zimg[4]; /* "zimg" for Linux EFI zboot images */ + uint32_t payload_offset; /* LE offset to compressed payload */ + uint32_t payload_size; /* LE size of the compressed payload */ + uint8_t reserved1[8]; + char compression_type[32]; /* Compression type, NUL terminated */ + uint8_t linux_magic[4]; /* Linux header magic */ + uint32_t pe_header_offset; /* LE offset to the PE header */ +}; + +/* + * Check whether *buffer points to a Linux EFI zboot image in memory. + * + * If it does, attempt to decompress it to a new buffer, and free the old one. + * If any of this fails, return an error to the caller. + * + * If the image is not a Linux EFI zboot image, do nothing and return success. + */ +ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size) +{ + const struct linux_efi_zboot_header *header; + uint8_t *data = NULL; + int ploff, plsize; + ssize_t bytes; + + /* ignore if this is too small to be a EFI zboot image */ + if (*size < sizeof(*header)) { + return 0; + } + + header = (struct linux_efi_zboot_header *)*buffer; + + /* ignore if this is not a Linux EFI zboot image */ + if (memcmp(&header->msdos_magic, EFI_PE_MSDOS_MAGIC, 2) != 0 || + memcmp(&header->zimg, "zimg", 4) != 0 || + memcmp(&header->linux_magic, EFI_PE_LINUX_MAGIC, 4) != 0) { + return 0; + } + + if (strcmp(header->compression_type, "gzip") != 0) { + fprintf(stderr, + "unable to handle EFI zboot image with \"%.*s\" compression\n", + (int)sizeof(header->compression_type) - 1, + header->compression_type); + return -1; + } + + ploff = ldl_le_p(&header->payload_offset); + plsize = ldl_le_p(&header->payload_size); + + if (ploff < 0 || plsize < 0 || ploff + plsize > *size) { + fprintf(stderr, "unable to handle corrupt EFI zboot image\n"); + return -1; + } + + data = g_malloc(LOAD_IMAGE_MAX_GUNZIP_BYTES); + bytes = gunzip(data, LOAD_IMAGE_MAX_GUNZIP_BYTES, *buffer + ploff, plsize); + if (bytes < 0) { + fprintf(stderr, "failed to decompress EFI zboot image\n"); + g_free(data); + return -1; + } + + g_free(*buffer); + *buffer = g_realloc(data, bytes); + *size = bytes; + return bytes; +} + /* * Functions for reboot-persistent memory regions. * - used for vga bios and option roms. diff --git a/hw/core/machine.c b/hw/core/machine.c index f29e700ee4..1cf6822e06 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -47,6 +47,8 @@ const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); GlobalProperty hw_compat_7_1[] = { { "virtio-device", "queue_reset", "false" }, { "virtio-rng-pci", "vectors", "0" }, + { "virtio-rng-pci-transitional", "vectors", "0" }, + { "virtio-rng-pci-non-transitional", "vectors", "0" }, }; const size_t hw_compat_7_1_len = G_N_ELEMENTS(hw_compat_7_1); diff --git a/hw/cxl/cxl-device-utils.c b/hw/cxl/cxl-device-utils.c index 83ce7a8270..4c5e88aaf5 100644 --- a/hw/cxl/cxl-device-utils.c +++ b/hw/cxl/cxl-device-utils.c @@ -267,5 +267,5 @@ void cxl_device_register_init_common(CXLDeviceState *cxl_dstate) cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000); memdev_reg_init_common(cxl_dstate); - assert(cxl_initialize_mailbox(cxl_dstate) == 0); + cxl_initialize_mailbox(cxl_dstate); } diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index bc1bb18844..206e04a4b8 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -12,8 +12,11 @@ #include "hw/pci/pci.h" #include "qemu/cutils.h" #include "qemu/log.h" +#include "qemu/units.h" #include "qemu/uuid.h" +#define CXL_CAPACITY_MULTIPLIER (256 * MiB) + /* * How to add a new command, example. The command set FOO, with cmd BAR. * 1. Add the command set and cmd to the enum. @@ -138,7 +141,7 @@ static ret_code cmd_firmware_update_get_info(struct cxl_cmd *cmd, } QEMU_PACKED *fw_info; QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); - if (cxl_dstate->pmem_size < (256 << 20)) { + if (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) { return CXL_MBOX_INTERNAL_ERROR; } @@ -190,7 +193,11 @@ static ret_code cmd_timestamp_set(struct cxl_cmd *cmd, return CXL_MBOX_SUCCESS; } -static QemuUUID cel_uuid; +/* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */ +static const QemuUUID cel_uuid = { + .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, + 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17) +}; /* 8.2.9.4.1 */ static ret_code cmd_logs_get_supported(struct cxl_cmd *cmd, @@ -283,7 +290,7 @@ static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); uint64_t size = cxl_dstate->pmem_size; - if (!QEMU_IS_ALIGNED(size, 256 << 20)) { + if (!QEMU_IS_ALIGNED(size, CXL_CAPACITY_MULTIPLIER)) { return CXL_MBOX_INTERNAL_ERROR; } @@ -293,8 +300,8 @@ static ret_code cmd_identify_memory_device(struct cxl_cmd *cmd, /* PMEM only */ snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0); - id->total_capacity = size / (256 << 20); - id->persistent_capacity = size / (256 << 20); + id->total_capacity = size / CXL_CAPACITY_MULTIPLIER; + id->persistent_capacity = size / CXL_CAPACITY_MULTIPLIER; id->lsa_size = cvc->get_lsa_size(ct3d); *len = sizeof(*id); @@ -314,14 +321,14 @@ static ret_code cmd_ccls_get_partition_info(struct cxl_cmd *cmd, QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20); uint64_t size = cxl_dstate->pmem_size; - if (!QEMU_IS_ALIGNED(size, 256 << 20)) { + if (!QEMU_IS_ALIGNED(size, CXL_CAPACITY_MULTIPLIER)) { return CXL_MBOX_INTERNAL_ERROR; } /* PMEM only */ part_info->active_vmem = 0; part_info->next_vmem = 0; - part_info->active_pmem = size / (256 << 20); + part_info->active_pmem = size / CXL_CAPACITY_MULTIPLIER; part_info->next_pmem = 0; *len = sizeof(*part_info); @@ -455,11 +462,8 @@ void cxl_process_mailbox(CXLDeviceState *cxl_dstate) DOORBELL, 0); } -int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate) +void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate) { - /* CXL 2.0: Table 169 Get Supported Logs Log Entry */ - const char *cel_uuidstr = "0da9c0b5-bf41-4b78-8f79-96b1623b3f17"; - for (int set = 0; set < 256; set++) { for (int cmd = 0; cmd < 256; cmd++) { if (cxl_cmd_set[set][cmd].handler) { @@ -473,6 +477,4 @@ int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate) } } } - - return qemu_uuid_parse(cel_uuidstr, &cel_uuid); } diff --git a/hw/i2c/allwinner-i2c.c b/hw/i2c/allwinner-i2c.c index a435965836..f24c3ac6f0 100644 --- a/hw/i2c/allwinner-i2c.c +++ b/hw/i2c/allwinner-i2c.c @@ -357,10 +357,16 @@ static void allwinner_i2c_write(void *opaque, hwaddr offset, s->stat = STAT_FROM_STA(STAT_IDLE); s->cntr &= ~TWI_CNTR_M_STP; } - if ((s->cntr & TWI_CNTR_INT_FLAG) == 0) { - /* Interrupt flag cleared */ + + if (!s->irq_clear_inverted && !(s->cntr & TWI_CNTR_INT_FLAG)) { + /* Write 0 to clear this flag */ + qemu_irq_lower(s->irq); + } else if (s->irq_clear_inverted && (s->cntr & TWI_CNTR_INT_FLAG)) { + /* Write 1 to clear this flag */ + s->cntr &= ~TWI_CNTR_INT_FLAG; qemu_irq_lower(s->irq); } + if ((s->cntr & TWI_CNTR_A_ACK) == 0) { if (STAT_TO_STA(s->stat) == STAT_M_DATA_RX_ACK) { s->stat = STAT_FROM_STA(STAT_M_DATA_RX_NACK); @@ -451,9 +457,25 @@ static const TypeInfo allwinner_i2c_type_info = { .class_init = allwinner_i2c_class_init, }; +static void allwinner_i2c_sun6i_init(Object *obj) +{ + AWI2CState *s = AW_I2C(obj); + + s->irq_clear_inverted = true; +} + +static const TypeInfo allwinner_i2c_sun6i_type_info = { + .name = TYPE_AW_I2C_SUN6I, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AWI2CState), + .instance_init = allwinner_i2c_sun6i_init, + .class_init = allwinner_i2c_class_init, +}; + static void allwinner_i2c_register_types(void) { type_register_static(&allwinner_i2c_type_info); + type_register_static(&allwinner_i2c_sun6i_type_info); } type_init(allwinner_i2c_register_types) diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c index c166fd20fa..1f071a3811 100644 --- a/hw/i2c/aspeed_i2c.c +++ b/hw/i2c/aspeed_i2c.c @@ -550,6 +550,8 @@ static void aspeed_i2c_bus_handle_cmd(AspeedI2CBus *bus, uint64_t value) } SHARED_ARRAY_FIELD_DP32(bus->regs, reg_cmd, M_STOP_CMD, 0); aspeed_i2c_set_state(bus, I2CD_IDLE); + + i2c_schedule_pending_master(bus->bus); } if (aspeed_i2c_bus_pkt_mode_en(bus)) { diff --git a/hw/i2c/core.c b/hw/i2c/core.c index d4ba8146bf..bed594fe59 100644 --- a/hw/i2c/core.c +++ b/hw/i2c/core.c @@ -185,22 +185,39 @@ int i2c_start_transfer(I2CBus *bus, uint8_t address, bool is_recv) void i2c_bus_master(I2CBus *bus, QEMUBH *bh) { - if (i2c_bus_busy(bus)) { - I2CPendingMaster *node = g_new(struct I2CPendingMaster, 1); - node->bh = bh; + I2CPendingMaster *node = g_new(struct I2CPendingMaster, 1); + node->bh = bh; + + QSIMPLEQ_INSERT_TAIL(&bus->pending_masters, node, entry); +} + +void i2c_schedule_pending_master(I2CBus *bus) +{ + I2CPendingMaster *node; - QSIMPLEQ_INSERT_TAIL(&bus->pending_masters, node, entry); + if (i2c_bus_busy(bus)) { + /* someone is already controlling the bus; wait for it to release it */ + return; + } + if (QSIMPLEQ_EMPTY(&bus->pending_masters)) { return; } - bus->bh = bh; + node = QSIMPLEQ_FIRST(&bus->pending_masters); + bus->bh = node->bh; + + QSIMPLEQ_REMOVE_HEAD(&bus->pending_masters, entry); + g_free(node); + qemu_bh_schedule(bus->bh); } void i2c_bus_release(I2CBus *bus) { bus->bh = NULL; + + i2c_schedule_pending_master(bus); } int i2c_start_recv(I2CBus *bus, uint8_t address) @@ -234,16 +251,6 @@ void i2c_end_transfer(I2CBus *bus) g_free(node); } bus->broadcast = false; - - if (!QSIMPLEQ_EMPTY(&bus->pending_masters)) { - I2CPendingMaster *node = QSIMPLEQ_FIRST(&bus->pending_masters); - bus->bh = node->bh; - - QSIMPLEQ_REMOVE_HEAD(&bus->pending_masters, entry); - g_free(node); - - qemu_bh_schedule(bus->bh); - } } int i2c_send(I2CBus *bus, uint8_t data) diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index d27921fd8f..b19fb4259e 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -1514,7 +1514,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(pkg, aml_eisaid("PNP0A03")); aml_append(dev, aml_name_decl("_CID", pkg)); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); - aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); build_cxl_osc_method(dev); } else if (pci_bus_is_express(bus)) { aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 98a5c304a7..faade7def8 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1530,13 +1530,17 @@ static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, return vtd_page_walk(s, ce, addr, addr + size, &info, vtd_as->pasid); } -static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) +static int vtd_address_space_sync(VTDAddressSpace *vtd_as) { int ret; VTDContextEntry ce; IOMMUNotifier *n; - if (!(vtd_as->iommu.iommu_notify_flags & IOMMU_NOTIFIER_IOTLB_EVENTS)) { + /* If no MAP notifier registered, we simply invalidate all the cache */ + if (!vtd_as_has_map_notifier(vtd_as)) { + IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { + memory_region_unmap_iommu_notifier_range(n); + } return 0; } @@ -2000,7 +2004,7 @@ static void vtd_iommu_replay_all(IntelIOMMUState *s) VTDAddressSpace *vtd_as; QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { - vtd_sync_shadow_page_table(vtd_as); + vtd_address_space_sync(vtd_as); } } @@ -2082,7 +2086,7 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s, * framework will skip MAP notifications if that * happened. */ - vtd_sync_shadow_page_table(vtd_as); + vtd_address_space_sync(vtd_as); } } } @@ -2140,7 +2144,7 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce) && domain_id == vtd_get_domain_id(s, &ce, vtd_as->pasid)) { - vtd_sync_shadow_page_table(vtd_as); + vtd_address_space_sync(vtd_as); } } } @@ -3179,6 +3183,7 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); IntelIOMMUState *s = vtd_as->iommu_state; + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); /* TODO: add support for VFIO and vhost users */ if (s->snoop_control) { @@ -3186,6 +3191,20 @@ static int vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, "Snoop Control with vhost or VFIO is not supported"); return -ENOTSUP; } + if (!s->caching_mode && (new & IOMMU_NOTIFIER_MAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires caching mode", + pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + return -ENOTSUP; + } + if (!x86_iommu->dt_supported && (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires device IOTLB mode", + pci_bus_num(vtd_as->bus), PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + return -ENOTSUP; + } /* Update per-address-space notifier flags */ vtd_as->notifier_flags = new; @@ -3831,7 +3850,7 @@ static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) .domain_id = vtd_get_domain_id(s, &ce, vtd_as->pasid), }; - vtd_page_walk(s, &ce, 0, ~0ULL, &info, vtd_as->pasid); + vtd_page_walk(s, &ce, n->start, n->end, &info, vtd_as->pasid); } } else { trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index 68c22016d2..3d606a20b4 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -328,7 +328,7 @@ static void microvm_memory_init(MicrovmMachineState *mms) rom_set_fw(fw_cfg); if (machine->kernel_filename != NULL) { - x86_load_linux(x86ms, fw_cfg, 0, true, false); + x86_load_linux(x86ms, fw_cfg, 0, true); } if (mms->option_roms) { @@ -376,8 +376,7 @@ static void microvm_fix_kernel_cmdline(MachineState *machine) MicrovmMachineState *mms = MICROVM_MACHINE(machine); BusState *bus; BusChild *kid; - char *cmdline, *existing_cmdline; - size_t len; + char *cmdline; /* * Find MMIO transports with attached devices, and add them to the kernel @@ -386,8 +385,7 @@ static void microvm_fix_kernel_cmdline(MachineState *machine) * Yes, this is a hack, but one that heavily improves the UX without * introducing any significant issues. */ - existing_cmdline = fw_cfg_read_bytes_ptr(x86ms->fw_cfg, FW_CFG_CMDLINE_DATA); - cmdline = g_strdup(existing_cmdline); + cmdline = g_strdup(machine->kernel_cmdline); bus = sysbus_get_default(); QTAILQ_FOREACH(kid, &bus->children, sibling) { DeviceState *dev = kid->child; @@ -411,12 +409,9 @@ static void microvm_fix_kernel_cmdline(MachineState *machine) } } - len = strlen(cmdline); - if (len > VIRTIO_CMDLINE_TOTAL_MAX_LEN + strlen(existing_cmdline)) { - fprintf(stderr, "qemu: virtio mmio cmdline too large, skipping\n"); - } else { - memcpy(existing_cmdline, cmdline, len + 1); - } + fw_cfg_modify_i32(x86ms->fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(cmdline) + 1); + fw_cfg_modify_string(x86ms->fw_cfg, FW_CFG_CMDLINE_DATA, cmdline); + g_free(cmdline); } diff --git a/hw/i386/pc.c b/hw/i386/pc.c index fd17ce7a94..7bebea57e3 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -810,7 +810,7 @@ void xen_load_linux(PCMachineState *pcms) rom_set_fw(fw_cfg); x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->legacy_no_rng_seed); + pcmc->pvh_enabled); for (i = 0; i < nb_option_roms; i++) { assert(!strcmp(option_rom[i].name, "linuxboot.bin") || !strcmp(option_rom[i].name, "linuxboot_dma.bin") || @@ -1130,7 +1130,7 @@ void pc_memory_init(PCMachineState *pcms, if (linux_boot) { x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->legacy_no_rng_seed); + pcmc->pvh_enabled); } for (i = 0; i < nb_option_roms; i++) { diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 126b6c11df..4bf15f9c1f 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -422,6 +422,7 @@ static void pc_xen_hvm_init(MachineState *machine) } pc_xen_hvm_init_pci(machine); + xen_igd_reserve_slot(pcms->bus); pci_create_simple(pcms->bus, -1, "xen-platform"); } #endif @@ -477,9 +478,7 @@ DEFINE_I440FX_MACHINE(v7_2, "pc-i440fx-7.2", NULL, static void pc_i440fx_7_1_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_7_2_machine_options(m); - pcmc->legacy_no_rng_seed = true; compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len); compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len); } diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 09004f3f1f..797ba347fd 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -395,9 +395,7 @@ DEFINE_Q35_MACHINE(v7_2, "pc-q35-7.2", NULL, static void pc_q35_7_1_machine_options(MachineClass *m) { - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_q35_7_2_machine_options(m); - pcmc->legacy_no_rng_seed = true; compat_props_add(m->compat_props, hw_compat_7_1, hw_compat_7_1_len); compat_props_add(m->compat_props, pc_compat_7_1, pc_compat_7_1_len); } diff --git a/hw/i386/x86.c b/hw/i386/x86.c index a56b10b2fb..a88a126123 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -26,7 +26,6 @@ #include "qemu/cutils.h" #include "qemu/units.h" #include "qemu/datadir.h" -#include "qemu/guest-random.h" #include "qapi/error.h" #include "qapi/qapi-visit-common.h" #include "qapi/clone-visitor.h" @@ -36,7 +35,6 @@ #include "sysemu/whpx.h" #include "sysemu/numa.h" #include "sysemu/replay.h" -#include "sysemu/reset.h" #include "sysemu/sysemu.h" #include "sysemu/cpu-timers.h" #include "sysemu/xen.h" @@ -49,7 +47,6 @@ #include "hw/intc/i8259.h" #include "hw/rtc/mc146818rtc.h" #include "target/i386/sev.h" -#include "hw/i386/microvm.h" #include "hw/acpi/cpu_hotplug.h" #include "hw/irq.h" @@ -675,12 +672,12 @@ DeviceState *ioapic_init_secondary(GSIState *gsi_state) return dev; } -typedef struct SetupData { +struct setup_data { uint64_t next; uint32_t type; uint32_t len; uint8_t data[]; -} __attribute__((packed)) SetupData; +} __attribute__((packed)); /* @@ -787,35 +784,10 @@ static bool load_elfboot(const char *kernel_filename, return true; } -typedef struct SetupDataFixup { - void *pos; - hwaddr orig_val, new_val; - uint32_t addr; -} SetupDataFixup; - -static void fixup_setup_data(void *opaque) -{ - SetupDataFixup *fixup = opaque; - stq_p(fixup->pos, fixup->new_val); -} - -static void reset_setup_data(void *opaque) -{ - SetupDataFixup *fixup = opaque; - stq_p(fixup->pos, fixup->orig_val); -} - -static void reset_rng_seed(void *opaque) -{ - SetupData *setup_data = opaque; - qemu_guest_getrandom_nofail(setup_data->data, le32_to_cpu(setup_data->len)); -} - void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool legacy_no_rng_seed) + bool pvh_enabled) { bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; uint16_t protocol; @@ -823,26 +795,19 @@ void x86_load_linux(X86MachineState *x86ms, int dtb_size, setup_data_offset; uint32_t initrd_max; uint8_t header[8192], *setup, *kernel; - hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0, first_setup_data = 0; + hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0; FILE *f; char *vmode; MachineState *machine = MACHINE(x86ms); - SetupData *setup_data; + struct setup_data *setup_data; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; const char *dtb_filename = machine->dtb; - char *kernel_cmdline; + const char *kernel_cmdline = machine->kernel_cmdline; SevKernelLoaderContext sev_load_ctx = {}; - enum { RNG_SEED_LENGTH = 32 }; - /* - * Add the NUL terminator, some padding for the microvm cmdline fiddling - * hack, and then align to 16 bytes as a paranoia measure - */ - cmdline_size = (strlen(machine->kernel_cmdline) + 1 + - VIRTIO_CMDLINE_TOTAL_MAX_LEN + 16) & ~15; - /* Make a copy, since we might append arbitrary bytes to it later. */ - kernel_cmdline = g_strndup(machine->kernel_cmdline, cmdline_size); + /* Align to 16 bytes as a paranoia measure */ + cmdline_size = (strlen(kernel_cmdline) + 16) & ~15; /* load the kernel header */ f = fopen(kernel_filename, "rb"); @@ -983,6 +948,12 @@ void x86_load_linux(X86MachineState *x86ms, initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1; } + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + sev_load_ctx.cmdline_data = (char *)kernel_cmdline; + sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1; + if (protocol >= 0x202) { stl_p(header + 0x228, cmdline_addr); } else { @@ -1109,45 +1080,20 @@ void x86_load_linux(X86MachineState *x86ms, exit(1); } - setup_data_offset = cmdline_size; - cmdline_size += sizeof(SetupData) + dtb_size; - kernel_cmdline = g_realloc(kernel_cmdline, cmdline_size); - setup_data = (void *)kernel_cmdline + setup_data_offset; - setup_data->next = cpu_to_le64(first_setup_data); - first_setup_data = cmdline_addr + setup_data_offset; + setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16); + kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size; + kernel = g_realloc(kernel, kernel_size); + + stq_p(header + 0x250, prot_addr + setup_data_offset); + + setup_data = (struct setup_data *)(kernel + setup_data_offset); + setup_data->next = 0; setup_data->type = cpu_to_le32(SETUP_DTB); setup_data->len = cpu_to_le32(dtb_size); - load_image_size(dtb_filename, setup_data->data, dtb_size); - } - if (!legacy_no_rng_seed && protocol >= 0x209) { - setup_data_offset = cmdline_size; - cmdline_size += sizeof(SetupData) + RNG_SEED_LENGTH; - kernel_cmdline = g_realloc(kernel_cmdline, cmdline_size); - setup_data = (void *)kernel_cmdline + setup_data_offset; - setup_data->next = cpu_to_le64(first_setup_data); - first_setup_data = cmdline_addr + setup_data_offset; - setup_data->type = cpu_to_le32(SETUP_RNG_SEED); - setup_data->len = cpu_to_le32(RNG_SEED_LENGTH); - qemu_guest_getrandom_nofail(setup_data->data, RNG_SEED_LENGTH); - qemu_register_reset_nosnapshotload(reset_rng_seed, setup_data); - fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_KERNEL_DATA, reset_rng_seed, NULL, - setup_data, kernel, kernel_size, true); - } else { - fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); + load_image_size(dtb_filename, setup_data->data, dtb_size); } - fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, cmdline_size); - fw_cfg_add_bytes(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline, cmdline_size); - sev_load_ctx.cmdline_data = (char *)kernel_cmdline; - sev_load_ctx.cmdline_size = cmdline_size; - - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); - fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); - sev_load_ctx.kernel_data = (char *)kernel; - sev_load_ctx.kernel_size = kernel_size; - /* * If we're starting an encrypted VM, it will be OVMF based, which uses the * efi stub for booting and doesn't require any values to be placed in the @@ -1155,21 +1101,17 @@ void x86_load_linux(X86MachineState *x86ms, * kernel on the other side of the fw_cfg interface matches the hash of the * file the user passed in. */ - if (!sev_enabled() && first_setup_data) { - SetupDataFixup *fixup = g_malloc(sizeof(*fixup)); - + if (!sev_enabled()) { memcpy(setup, header, MIN(sizeof(header), setup_size)); - /* Offset 0x250 is a pointer to the first setup_data link. */ - fixup->pos = setup + 0x250; - fixup->orig_val = ldq_p(fixup->pos); - fixup->new_val = first_setup_data; - fixup->addr = cpu_to_le32(real_addr); - fw_cfg_add_bytes_callback(fw_cfg, FW_CFG_SETUP_ADDR, fixup_setup_data, NULL, - fixup, &fixup->addr, sizeof(fixup->addr), true); - qemu_register_reset(reset_setup_data, fixup); - } else { - fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); } + + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); + fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); + fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); + sev_load_ctx.kernel_data = (char *)kernel; + sev_load_ctx.kernel_size = kernel_size; + + fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size); sev_load_ctx.setup_data = (char *)setup; diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index eee04643cb..b466a6abaf 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -130,7 +130,7 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr, addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { size_t hartid = mtimer->hartid_base + ((addr - mtimer->timecmp_base) >> 3); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { qemu_log_mask(LOG_GUEST_ERROR, @@ -173,7 +173,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { size_t hartid = mtimer->hartid_base + ((addr - mtimer->timecmp_base) >> 3); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { qemu_log_mask(LOG_GUEST_ERROR, @@ -231,7 +231,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, /* Check if timer interrupt is triggered for each hart. */ for (i = 0; i < mtimer->num_harts; i++) { - CPUState *cpu = qemu_get_cpu(mtimer->hartid_base + i); + CPUState *cpu = cpu_by_arch_id(mtimer->hartid_base + i); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { continue; @@ -292,7 +292,7 @@ static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp) s->timecmp = g_new0(uint64_t, s->num_harts); /* Claim timer interrupt bits */ for (i = 0; i < s->num_harts; i++) { - RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i)); + RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(s->hartid_base + i)); if (riscv_cpu_claim_interrupts(cpu, MIP_MTIP) < 0) { error_report("MTIP already claimed"); exit(1); @@ -372,7 +372,7 @@ DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size, sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); for (i = 0; i < num_harts; i++) { - CPUState *cpu = qemu_get_cpu(hartid_base + i); + CPUState *cpu = cpu_by_arch_id(hartid_base + i); RISCVCPU *rvcpu = RISCV_CPU(cpu); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; riscv_aclint_mtimer_callback *cb = @@ -407,7 +407,7 @@ static uint64_t riscv_aclint_swi_read(void *opaque, hwaddr addr, if (addr < (swi->num_harts << 2)) { size_t hartid = swi->hartid_base + (addr >> 2); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { qemu_log_mask(LOG_GUEST_ERROR, @@ -430,7 +430,7 @@ static void riscv_aclint_swi_write(void *opaque, hwaddr addr, uint64_t value, if (addr < (swi->num_harts << 2)) { size_t hartid = swi->hartid_base + (addr >> 2); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; if (!env) { qemu_log_mask(LOG_GUEST_ERROR, @@ -545,7 +545,7 @@ DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base, sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); for (i = 0; i < num_harts; i++) { - CPUState *cpu = qemu_get_cpu(hartid_base + i); + CPUState *cpu = cpu_by_arch_id(hartid_base + i); RISCVCPU *rvcpu = RISCV_CPU(cpu); qdev_connect_gpio_out(dev, i, diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c index cfd007e629..cd7efc4ad4 100644 --- a/hw/intc/riscv_aplic.c +++ b/hw/intc/riscv_aplic.c @@ -833,7 +833,7 @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp) /* Claim the CPU interrupt to be triggered by this APLIC */ for (i = 0; i < aplic->num_harts; i++) { - RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(aplic->hartid_base + i)); + RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i)); if (riscv_cpu_claim_interrupts(cpu, (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { error_report("%s already claimed", @@ -966,7 +966,7 @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size, if (!msimode) { for (i = 0; i < num_harts; i++) { - CPUState *cpu = qemu_get_cpu(hartid_base + i); + CPUState *cpu = cpu_by_arch_id(hartid_base + i); qdev_connect_gpio_out_named(dev, NULL, i, qdev_get_gpio_in(DEVICE(cpu), diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c index 4d4d5b50ca..fea3385b51 100644 --- a/hw/intc/riscv_imsic.c +++ b/hw/intc/riscv_imsic.c @@ -316,8 +316,8 @@ static const MemoryRegionOps riscv_imsic_ops = { static void riscv_imsic_realize(DeviceState *dev, Error **errp) { RISCVIMSICState *imsic = RISCV_IMSIC(dev); - RISCVCPU *rcpu = RISCV_CPU(qemu_get_cpu(imsic->hartid)); - CPUState *cpu = qemu_get_cpu(imsic->hartid); + RISCVCPU *rcpu = RISCV_CPU(cpu_by_arch_id(imsic->hartid)); + CPUState *cpu = cpu_by_arch_id(imsic->hartid); CPURISCVState *env = cpu ? cpu->env_ptr : NULL; imsic->num_eistate = imsic->num_pages * imsic->num_irqs; @@ -413,7 +413,7 @@ DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode, uint32_t num_pages, uint32_t num_ids) { DeviceState *dev = qdev_new(TYPE_RISCV_IMSIC); - CPUState *cpu = qemu_get_cpu(hartid); + CPUState *cpu = cpu_by_arch_id(hartid); uint32_t i; assert(!(addr & (IMSIC_MMIO_PAGE_SZ - 1))); diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c index f551296a0e..6cb2472d33 100644 --- a/hw/loongarch/acpi-build.c +++ b/hw/loongarch/acpi-build.c @@ -260,6 +260,7 @@ build_la_ged_aml(Aml *dsdt, MachineState *machine) AML_SYSTEM_MEMORY, VIRT_GED_MEM_ADDR); } + acpi_dsdt_add_power_button(dsdt); } static void build_pci_device_aml(Aml *scope, LoongArchMachineState *lams) @@ -271,7 +272,7 @@ static void build_pci_device_aml(Aml *scope, LoongArchMachineState *lams) .pio.size = VIRT_PCI_IO_SIZE, .ecam.base = VIRT_PCI_CFG_BASE, .ecam.size = VIRT_PCI_CFG_SIZE, - .irq = PCH_PIC_IRQ_OFFSET + VIRT_DEVICE_IRQS, + .irq = VIRT_GSI_BASE + VIRT_DEVICE_IRQS, .bus = lams->pci_bus, }; diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index 66be925068..38ef7cc49f 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -316,6 +316,14 @@ static void virt_machine_done(Notifier *notifier, void *data) loongarch_acpi_setup(lams); } +static void virt_powerdown_req(Notifier *notifier, void *opaque) +{ + LoongArchMachineState *s = container_of(notifier, + LoongArchMachineState, powerdown_notifier); + + acpi_send_event(s->acpi_ged, ACPI_POWER_DOWN_STATUS); +} + struct memmap_entry { uint64_t address; uint64_t length; @@ -432,7 +440,7 @@ static DeviceState *create_acpi_ged(DeviceState *pch_pic, LoongArchMachineState sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, VIRT_GED_REG_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, - qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - PCH_PIC_IRQ_OFFSET)); + qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - VIRT_GSI_BASE)); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); return dev; } @@ -452,7 +460,7 @@ static DeviceState *create_platform_bus(DeviceState *pch_pic) sysbus = SYS_BUS_DEVICE(dev); for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) { - irq = VIRT_PLATFORM_BUS_IRQ - PCH_PIC_IRQ_OFFSET + i; + irq = VIRT_PLATFORM_BUS_IRQ - VIRT_GSI_BASE + i; sysbus_connect_irq(sysbus, i, qdev_get_gpio_in(pch_pic, irq)); } @@ -509,7 +517,7 @@ static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState * serial_mm_init(get_system_memory(), VIRT_UART_BASE, 0, qdev_get_gpio_in(pch_pic, - VIRT_UART_IRQ - PCH_PIC_IRQ_OFFSET), + VIRT_UART_IRQ - VIRT_GSI_BASE), 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); fdt_add_uart_node(lams); @@ -531,7 +539,7 @@ static void loongarch_devices_init(DeviceState *pch_pic, LoongArchMachineState * create_unimplemented_device("pci-dma-cfg", 0x1001041c, 0x4); sysbus_create_simple("ls7a_rtc", VIRT_RTC_REG_BASE, qdev_get_gpio_in(pch_pic, - VIRT_RTC_IRQ - PCH_PIC_IRQ_OFFSET)); + VIRT_RTC_IRQ - VIRT_GSI_BASE)); fdt_add_rtc_node(lams); pm_mem = g_new(MemoryRegion, 1); @@ -859,6 +867,10 @@ static void loongarch_init(MachineState *machine) VIRT_PLATFORM_BUS_IRQ); lams->machine_done.notify = virt_machine_done; qemu_add_machine_init_done_notifier(&lams->machine_done); + /* connect powerdown request */ + lams->powerdown_notifier.notify = virt_powerdown_req; + qemu_register_powerdown_notifier(&lams->powerdown_notifier); + fdt_add_pcie_node(lams); /* * Since lowmem region starts from 0 and Linux kernel legacy start address diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index dae4fd89ca..217a5e639b 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -401,14 +401,13 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) MemoryRegion *mr = ®s->component_registers; uint8_t *pci_conf = pci_dev->config; unsigned short msix_num = 1; - int i; + int i, rc; if (!cxl_setup_memory(ct3d, errp)) { return; } pci_config_set_prog_interface(pci_conf, 0x10); - pci_config_set_class(pci_conf, PCI_CLASS_MEMORY_CXL); pcie_endpoint_cap_init(pci_dev, 0x80); if (ct3d->sn != UI64_NULL) { @@ -438,7 +437,10 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) &ct3d->cxl_dstate.device_registers); /* MSI(-X) Initailization */ - msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); + rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL); + if (rc) { + goto err_address_space_free; + } for (i = 0; i < msix_num; i++) { msix_vector_use(pci_dev, i); } @@ -450,6 +452,11 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table; cxl_cstate->cdat.private = ct3d; cxl_doe_cdat_init(cxl_cstate, errp); + return; + +err_address_space_free: + address_space_destroy(&ct3d->hostmem_as); + return; } static void ct3_exit(PCIDevice *pci_dev) @@ -619,7 +626,7 @@ static void ct3_class_init(ObjectClass *oc, void *data) pc->realize = ct3_realize; pc->exit = ct3_exit; - pc->class_id = PCI_CLASS_STORAGE_EXPRESS; + pc->class_id = PCI_CLASS_MEMORY_CXL; pc->vendor_id = PCI_VENDOR_ID_INTEL; pc->device_id = 0xd93; /* LVF for now */ pc->revision = 1; diff --git a/hw/misc/i2c-echo.c b/hw/misc/i2c-echo.c new file mode 100644 index 0000000000..5705ab5d73 --- /dev/null +++ b/hw/misc/i2c-echo.c @@ -0,0 +1,156 @@ +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "qemu/main-loop.h" +#include "block/aio.h" +#include "hw/i2c/i2c.h" + +#define TYPE_I2C_ECHO "i2c-echo" +OBJECT_DECLARE_SIMPLE_TYPE(I2CEchoState, I2C_ECHO) + +enum i2c_echo_state { + I2C_ECHO_STATE_IDLE, + I2C_ECHO_STATE_START_SEND, + I2C_ECHO_STATE_ACK, +}; + +typedef struct I2CEchoState { + I2CSlave parent_obj; + + I2CBus *bus; + + enum i2c_echo_state state; + QEMUBH *bh; + + unsigned int pos; + uint8_t data[3]; +} I2CEchoState; + +static void i2c_echo_bh(void *opaque) +{ + I2CEchoState *state = opaque; + + switch (state->state) { + case I2C_ECHO_STATE_IDLE: + return; + + case I2C_ECHO_STATE_START_SEND: + if (i2c_start_send_async(state->bus, state->data[0])) { + goto release_bus; + } + + state->pos++; + state->state = I2C_ECHO_STATE_ACK; + return; + + case I2C_ECHO_STATE_ACK: + if (state->pos > 2) { + break; + } + + if (i2c_send_async(state->bus, state->data[state->pos++])) { + break; + } + + return; + } + + + i2c_end_transfer(state->bus); +release_bus: + i2c_bus_release(state->bus); + + state->state = I2C_ECHO_STATE_IDLE; +} + +static int i2c_echo_event(I2CSlave *s, enum i2c_event event) +{ + I2CEchoState *state = I2C_ECHO(s); + + switch (event) { + case I2C_START_RECV: + state->pos = 0; + + break; + + case I2C_START_SEND: + state->pos = 0; + + break; + + case I2C_FINISH: + state->pos = 0; + state->state = I2C_ECHO_STATE_START_SEND; + i2c_bus_master(state->bus, state->bh); + + break; + + case I2C_NACK: + break; + + default: + return -1; + } + + return 0; +} + +static uint8_t i2c_echo_recv(I2CSlave *s) +{ + I2CEchoState *state = I2C_ECHO(s); + + if (state->pos > 2) { + return 0xff; + } + + return state->data[state->pos++]; +} + +static int i2c_echo_send(I2CSlave *s, uint8_t data) +{ + I2CEchoState *state = I2C_ECHO(s); + + if (state->pos > 2) { + return -1; + } + + state->data[state->pos++] = data; + + return 0; +} + +static void i2c_echo_realize(DeviceState *dev, Error **errp) +{ + I2CEchoState *state = I2C_ECHO(dev); + BusState *bus = qdev_get_parent_bus(dev); + + state->bus = I2C_BUS(bus); + state->bh = qemu_bh_new(i2c_echo_bh, state); + + return; +} + +static void i2c_echo_class_init(ObjectClass *oc, void *data) +{ + I2CSlaveClass *sc = I2C_SLAVE_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = i2c_echo_realize; + + sc->event = i2c_echo_event; + sc->recv = i2c_echo_recv; + sc->send = i2c_echo_send; +} + +static const TypeInfo i2c_echo = { + .name = TYPE_I2C_ECHO, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(I2CEchoState), + .class_init = i2c_echo_class_init, +}; + +static void register_types(void) +{ + type_register_static(&i2c_echo); +} + +type_init(register_types); diff --git a/hw/misc/meson.build b/hw/misc/meson.build index fe869b98ca..a40245ad44 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -128,6 +128,8 @@ softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_rng.c')) softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_ahb_apb_pnp.c')) +softmmu_ss.add(when: 'CONFIG_I2C', if_true: files('i2c-echo.c')) + specific_ss.add(when: 'CONFIG_AVR_POWER', if_true: files('avr_power.c')) specific_ss.add(when: 'CONFIG_MAC_VIA', if_true: files('mac_via.c')) diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 3ae909041a..09d5c7a664 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -820,6 +820,21 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, features |= (1ULL << VIRTIO_NET_F_MTU); } + /* + * Since GUEST_ANNOUNCE is emulated the feature bit could be set without + * enabled. This happens in the vDPA case. + * + * Make sure the feature set is not incoherent, as the driver could refuse + * to start. + * + * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes, + * helping guest to notify the new location with vDPA devices that does not + * support it. + */ + if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) { + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE); + } + return features; } diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index f25cc2c235..49c1210fce 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -238,6 +238,8 @@ static const bool nvme_feature_support[NVME_FID_MAX] = { [NVME_TIMESTAMP] = true, [NVME_HOST_BEHAVIOR_SUPPORT] = true, [NVME_COMMAND_SET_PROFILE] = true, + [NVME_FDP_MODE] = true, + [NVME_FDP_EVENTS] = true, }; static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { @@ -249,6 +251,8 @@ static const uint32_t nvme_feature_cap[NVME_FID_MAX] = { [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE, [NVME_HOST_BEHAVIOR_SUPPORT] = NVME_FEAT_CAP_CHANGE, [NVME_COMMAND_SET_PROFILE] = NVME_FEAT_CAP_CHANGE, + [NVME_FDP_MODE] = NVME_FEAT_CAP_CHANGE, + [NVME_FDP_EVENTS] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS, }; static const uint32_t nvme_cse_acs[256] = { @@ -266,6 +270,8 @@ static const uint32_t nvme_cse_acs[256] = { [NVME_ADM_CMD_VIRT_MNGMT] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_DBBUF_CONFIG] = NVME_CMD_EFF_CSUPP, [NVME_ADM_CMD_FORMAT_NVM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, + [NVME_ADM_CMD_DIRECTIVE_RECV] = NVME_CMD_EFF_CSUPP, + [NVME_ADM_CMD_DIRECTIVE_SEND] = NVME_CMD_EFF_CSUPP, }; static const uint32_t nvme_cse_iocs_none[256]; @@ -279,6 +285,8 @@ static const uint32_t nvme_cse_iocs_nvm[256] = { [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP, [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_IO_MGMT_RECV] = NVME_CMD_EFF_CSUPP, + [NVME_CMD_IO_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC, }; static const uint32_t nvme_cse_iocs_zoned[256] = { @@ -297,12 +305,66 @@ static const uint32_t nvme_cse_iocs_zoned[256] = { static void nvme_process_sq(void *opaque); static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst); +static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n); static uint16_t nvme_sqid(NvmeRequest *req) { return le16_to_cpu(req->sq->sqid); } +static inline uint16_t nvme_make_pid(NvmeNamespace *ns, uint16_t rg, + uint16_t ph) +{ + uint16_t rgif = ns->endgrp->fdp.rgif; + + if (!rgif) { + return ph; + } + + return (rg << (16 - rgif)) | ph; +} + +static inline bool nvme_ph_valid(NvmeNamespace *ns, uint16_t ph) +{ + return ph < ns->fdp.nphs; +} + +static inline bool nvme_rg_valid(NvmeEnduranceGroup *endgrp, uint16_t rg) +{ + return rg < endgrp->fdp.nrg; +} + +static inline uint16_t nvme_pid2ph(NvmeNamespace *ns, uint16_t pid) +{ + uint16_t rgif = ns->endgrp->fdp.rgif; + + if (!rgif) { + return pid; + } + + return pid & ((1 << (15 - rgif)) - 1); +} + +static inline uint16_t nvme_pid2rg(NvmeNamespace *ns, uint16_t pid) +{ + uint16_t rgif = ns->endgrp->fdp.rgif; + + if (!rgif) { + return 0; + } + + return pid >> (16 - rgif); +} + +static inline bool nvme_parse_pid(NvmeNamespace *ns, uint16_t pid, + uint16_t *ph, uint16_t *rg) +{ + *rg = nvme_pid2rg(ns, pid); + *ph = nvme_pid2ph(ns, pid); + + return nvme_ph_valid(ns, *ph) && nvme_rg_valid(ns->endgrp, *rg); +} + static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone, NvmeZoneState state) { @@ -376,6 +438,69 @@ static uint16_t nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn) return nvme_zns_check_resources(ns, act, opn, 0); } +static NvmeFdpEvent *nvme_fdp_alloc_event(NvmeCtrl *n, NvmeFdpEventBuffer *ebuf) +{ + NvmeFdpEvent *ret = NULL; + bool is_full = ebuf->next == ebuf->start && ebuf->nelems; + + ret = &ebuf->events[ebuf->next++]; + if (unlikely(ebuf->next == NVME_FDP_MAX_EVENTS)) { + ebuf->next = 0; + } + if (is_full) { + ebuf->start = ebuf->next; + } else { + ebuf->nelems++; + } + + memset(ret, 0, sizeof(NvmeFdpEvent)); + ret->timestamp = nvme_get_timestamp(n); + + return ret; +} + +static inline int log_event(NvmeRuHandle *ruh, uint8_t event_type) +{ + return (ruh->event_filter >> nvme_fdp_evf_shifts[event_type]) & 0x1; +} + +static bool nvme_update_ruh(NvmeCtrl *n, NvmeNamespace *ns, uint16_t pid) +{ + NvmeEnduranceGroup *endgrp = ns->endgrp; + NvmeRuHandle *ruh; + NvmeReclaimUnit *ru; + NvmeFdpEvent *e = NULL; + uint16_t ph, rg, ruhid; + + if (!nvme_parse_pid(ns, pid, &ph, &rg)) { + return false; + } + + ruhid = ns->fdp.phs[ph]; + + ruh = &endgrp->fdp.ruhs[ruhid]; + ru = &ruh->rus[rg]; + + if (ru->ruamw) { + if (log_event(ruh, FDP_EVT_RU_NOT_FULLY_WRITTEN)) { + e = nvme_fdp_alloc_event(n, &endgrp->fdp.host_events); + e->type = FDP_EVT_RU_NOT_FULLY_WRITTEN; + e->flags = FDPEF_PIV | FDPEF_NSIDV | FDPEF_LV; + e->pid = cpu_to_le16(pid); + e->nsid = cpu_to_le32(ns->params.nsid); + e->rgid = cpu_to_le16(rg); + e->ruhid = cpu_to_le16(ruhid); + } + + /* log (eventual) GC overhead of prematurely swapping the RU */ + nvme_fdp_stat_inc(&endgrp->fdp.mbmw, nvme_l2b(ns, ru->ruamw)); + } + + ru->ruamw = ruh->ruamw; + + return true; +} + static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) { hwaddr hi, lo; @@ -3320,6 +3445,41 @@ invalid: return status | NVME_DNR; } +static void nvme_do_write_fdp(NvmeCtrl *n, NvmeRequest *req, uint64_t slba, + uint32_t nlb) +{ + NvmeNamespace *ns = req->ns; + NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; + uint64_t data_size = nvme_l2b(ns, nlb); + uint32_t dw12 = le32_to_cpu(req->cmd.cdw12); + uint8_t dtype = (dw12 >> 20) & 0xf; + uint16_t pid = le16_to_cpu(rw->dspec); + uint16_t ph, rg, ruhid; + NvmeReclaimUnit *ru; + + if (dtype != NVME_DIRECTIVE_DATA_PLACEMENT || + !nvme_parse_pid(ns, pid, &ph, &rg)) { + ph = 0; + rg = 0; + } + + ruhid = ns->fdp.phs[ph]; + ru = &ns->endgrp->fdp.ruhs[ruhid].rus[rg]; + + nvme_fdp_stat_inc(&ns->endgrp->fdp.hbmw, data_size); + nvme_fdp_stat_inc(&ns->endgrp->fdp.mbmw, data_size); + + while (nlb) { + if (nlb < ru->ruamw) { + ru->ruamw -= nlb; + break; + } + + nlb -= ru->ruamw; + nvme_update_ruh(n, ns, pid); + } +} + static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, bool wrz) { @@ -3429,6 +3589,8 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) { zone->w_ptr += nlb; } + } else if (ns->endgrp && ns->endgrp->fdp.enabled) { + nvme_do_write_fdp(n, req, slba, nlb); } data_offset = nvme_l2b(ns, slba); @@ -4086,6 +4248,126 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) return status; } +static uint16_t nvme_io_mgmt_recv_ruhs(NvmeCtrl *n, NvmeRequest *req, + size_t len) +{ + NvmeNamespace *ns = req->ns; + NvmeEnduranceGroup *endgrp; + NvmeRuhStatus *hdr; + NvmeRuhStatusDescr *ruhsd; + unsigned int nruhsd; + uint16_t rg, ph, *ruhid; + size_t trans_len; + g_autofree uint8_t *buf = NULL; + + if (!n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (ns->params.nsid == 0 || ns->params.nsid == 0xffffffff) { + return NVME_INVALID_NSID | NVME_DNR; + } + + if (!n->subsys->endgrp.fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + endgrp = ns->endgrp; + + nruhsd = ns->fdp.nphs * endgrp->fdp.nrg; + trans_len = sizeof(NvmeRuhStatus) + nruhsd * sizeof(NvmeRuhStatusDescr); + buf = g_malloc(trans_len); + + trans_len = MIN(trans_len, len); + + hdr = (NvmeRuhStatus *)buf; + ruhsd = (NvmeRuhStatusDescr *)(buf + sizeof(NvmeRuhStatus)); + + hdr->nruhsd = cpu_to_le16(nruhsd); + + ruhid = ns->fdp.phs; + + for (ph = 0; ph < ns->fdp.nphs; ph++, ruhid++) { + NvmeRuHandle *ruh = &endgrp->fdp.ruhs[*ruhid]; + + for (rg = 0; rg < endgrp->fdp.nrg; rg++, ruhsd++) { + uint16_t pid = nvme_make_pid(ns, rg, ph); + + ruhsd->pid = cpu_to_le16(pid); + ruhsd->ruhid = *ruhid; + ruhsd->earutr = 0; + ruhsd->ruamw = cpu_to_le64(ruh->rus[rg].ruamw); + } + } + + return nvme_c2h(n, buf, trans_len, req); +} + +static uint16_t nvme_io_mgmt_recv(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t cdw10 = le32_to_cpu(cmd->cdw10); + uint32_t numd = le32_to_cpu(cmd->cdw11); + uint8_t mo = (cdw10 & 0xff); + size_t len = (numd + 1) << 2; + + switch (mo) { + case NVME_IOMR_MO_NOP: + return 0; + case NVME_IOMR_MO_RUH_STATUS: + return nvme_io_mgmt_recv_ruhs(n, req, len); + default: + return NVME_INVALID_FIELD | NVME_DNR; + }; +} + +static uint16_t nvme_io_mgmt_send_ruh_update(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + NvmeNamespace *ns = req->ns; + uint32_t cdw10 = le32_to_cpu(cmd->cdw10); + uint16_t ret = NVME_SUCCESS; + uint32_t npid = (cdw10 >> 1) + 1; + unsigned int i = 0; + g_autofree uint16_t *pids = NULL; + uint32_t maxnpid = n->subsys->endgrp.fdp.nrg * n->subsys->endgrp.fdp.nruh; + + if (unlikely(npid >= MIN(NVME_FDP_MAXPIDS, maxnpid))) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + pids = g_new(uint16_t, npid); + + ret = nvme_h2c(n, pids, npid * sizeof(uint16_t), req); + if (ret) { + return ret; + } + + for (; i < npid; i++) { + if (!nvme_update_ruh(n, ns, pids[i])) { + return NVME_INVALID_FIELD | NVME_DNR; + } + } + + return ret; +} + +static uint16_t nvme_io_mgmt_send(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t cdw10 = le32_to_cpu(cmd->cdw10); + uint8_t mo = (cdw10 & 0xff); + + switch (mo) { + case NVME_IOMS_MO_NOP: + return 0; + case NVME_IOMS_MO_RUH_UPDATE: + return nvme_io_mgmt_send_ruh_update(n, req); + default: + return NVME_INVALID_FIELD | NVME_DNR; + }; +} + static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns; @@ -4162,6 +4444,10 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req) return nvme_zone_mgmt_send(n, req); case NVME_CMD_ZONE_MGMT_RECV: return nvme_zone_mgmt_recv(n, req); + case NVME_CMD_IO_MGMT_RECV: + return nvme_io_mgmt_recv(n, req); + case NVME_CMD_IO_MGMT_SEND: + return nvme_io_mgmt_send(n, req); default: assert(false); } @@ -4386,8 +4672,8 @@ static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats) { BlockAcctStats *s = blk_get_stats(ns->blkconf.blk); - stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS; - stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS; + stats->units_read += s->nr_bytes[BLOCK_ACCT_READ]; + stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE]; stats->read_commands += s->nr_ops[BLOCK_ACCT_READ]; stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE]; } @@ -4401,6 +4687,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, uint32_t trans_len; NvmeNamespace *ns; time_t current_ms; + uint64_t u_read, u_written; if (off >= sizeof(smart)) { return NVME_INVALID_FIELD | NVME_DNR; @@ -4427,10 +4714,11 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, trans_len = MIN(sizeof(smart) - off, buf_len); smart.critical_warning = n->smart_critical_warning; - smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read, - 1000)); - smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written, - 1000)); + u_read = DIV_ROUND_UP(stats.units_read >> BDRV_SECTOR_BITS, 1000); + u_written = DIV_ROUND_UP(stats.units_written >> BDRV_SECTOR_BITS, 1000); + + smart.data_units_read[0] = cpu_to_le64(u_read); + smart.data_units_written[0] = cpu_to_le64(u_written); smart.host_read_commands[0] = cpu_to_le64(stats.read_commands); smart.host_write_commands[0] = cpu_to_le64(stats.write_commands); @@ -4452,6 +4740,48 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req); } +static uint16_t nvme_endgrp_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); + uint16_t endgrpid = (dw11 >> 16) & 0xffff; + struct nvme_stats stats = {}; + NvmeEndGrpLog info = {}; + int i; + + if (!n->subsys || endgrpid != 0x1) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (off >= sizeof(info)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + for (i = 1; i <= NVME_MAX_NAMESPACES; i++) { + NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i); + if (!ns) { + continue; + } + + nvme_set_blk_stats(ns, &stats); + } + + info.data_units_read[0] = + cpu_to_le64(DIV_ROUND_UP(stats.units_read / 1000000000, 1000000000)); + info.data_units_written[0] = + cpu_to_le64(DIV_ROUND_UP(stats.units_written / 1000000000, 1000000000)); + info.media_units_written[0] = + cpu_to_le64(DIV_ROUND_UP(stats.units_written / 1000000000, 1000000000)); + + info.host_read_commands[0] = cpu_to_le64(stats.read_commands); + info.host_write_commands[0] = cpu_to_le64(stats.write_commands); + + buf_len = MIN(sizeof(info) - off, buf_len); + + return nvme_c2h(n, (uint8_t *)&info + off, buf_len, req); +} + + static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off, NvmeRequest *req) { @@ -4577,6 +4907,207 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len, return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req); } +static size_t sizeof_fdp_conf_descr(size_t nruh, size_t vss) +{ + size_t entry_siz = sizeof(NvmeFdpDescrHdr) + nruh * sizeof(NvmeRuhDescr) + + vss; + return ROUND_UP(entry_siz, 8); +} + +static uint16_t nvme_fdp_confs(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + uint32_t log_size, trans_len; + g_autofree uint8_t *buf = NULL; + NvmeFdpDescrHdr *hdr; + NvmeRuhDescr *ruhd; + NvmeEnduranceGroup *endgrp; + NvmeFdpConfsHdr *log; + size_t nruh, fdp_descr_size; + int i; + + if (endgrpid != 1 || !n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + endgrp = &n->subsys->endgrp; + + if (endgrp->fdp.enabled) { + nruh = endgrp->fdp.nruh; + } else { + nruh = 1; + } + + fdp_descr_size = sizeof_fdp_conf_descr(nruh, FDPVSS); + log_size = sizeof(NvmeFdpConfsHdr) + fdp_descr_size; + + if (off >= log_size) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + trans_len = MIN(log_size - off, buf_len); + + buf = g_malloc0(log_size); + log = (NvmeFdpConfsHdr *)buf; + hdr = (NvmeFdpDescrHdr *)(log + 1); + ruhd = (NvmeRuhDescr *)(buf + sizeof(*log) + sizeof(*hdr)); + + log->num_confs = cpu_to_le16(0); + log->size = cpu_to_le32(log_size); + + hdr->descr_size = cpu_to_le16(fdp_descr_size); + if (endgrp->fdp.enabled) { + hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, VALID, 1); + hdr->fdpa = FIELD_DP8(hdr->fdpa, FDPA, RGIF, endgrp->fdp.rgif); + hdr->nrg = cpu_to_le16(endgrp->fdp.nrg); + hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); + hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); + hdr->nnss = cpu_to_le32(NVME_MAX_NAMESPACES); + hdr->runs = cpu_to_le64(endgrp->fdp.runs); + + for (i = 0; i < nruh; i++) { + ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; + ruhd++; + } + } else { + /* 1 bit for RUH in PIF -> 2 RUHs max. */ + hdr->nrg = cpu_to_le16(1); + hdr->nruh = cpu_to_le16(1); + hdr->maxpids = cpu_to_le16(NVME_FDP_MAXPIDS - 1); + hdr->nnss = cpu_to_le32(1); + hdr->runs = cpu_to_le64(96 * MiB); + + ruhd->ruht = NVME_RUHT_INITIALLY_ISOLATED; + } + + return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req); +} + +static uint16_t nvme_fdp_ruh_usage(NvmeCtrl *n, uint32_t endgrpid, + uint32_t dw10, uint32_t dw12, + uint32_t buf_len, uint64_t off, + NvmeRequest *req) +{ + NvmeRuHandle *ruh; + NvmeRuhuLog *hdr; + NvmeRuhuDescr *ruhud; + NvmeEnduranceGroup *endgrp; + g_autofree uint8_t *buf = NULL; + uint32_t log_size, trans_len; + uint16_t i; + + if (endgrpid != 1 || !n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + endgrp = &n->subsys->endgrp; + + if (!endgrp->fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + log_size = sizeof(NvmeRuhuLog) + endgrp->fdp.nruh * sizeof(NvmeRuhuDescr); + + if (off >= log_size) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + trans_len = MIN(log_size - off, buf_len); + + buf = g_malloc0(log_size); + hdr = (NvmeRuhuLog *)buf; + ruhud = (NvmeRuhuDescr *)(hdr + 1); + + ruh = endgrp->fdp.ruhs; + hdr->nruh = cpu_to_le16(endgrp->fdp.nruh); + + for (i = 0; i < endgrp->fdp.nruh; i++, ruhud++, ruh++) { + ruhud->ruha = ruh->ruha; + } + + return nvme_c2h(n, (uint8_t *)buf + off, trans_len, req); +} + +static uint16_t nvme_fdp_stats(NvmeCtrl *n, uint32_t endgrpid, uint32_t buf_len, + uint64_t off, NvmeRequest *req) +{ + NvmeEnduranceGroup *endgrp; + NvmeFdpStatsLog log = {}; + uint32_t trans_len; + + if (off >= sizeof(NvmeFdpStatsLog)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (endgrpid != 1 || !n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if (!n->subsys->endgrp.fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + endgrp = &n->subsys->endgrp; + + trans_len = MIN(sizeof(log) - off, buf_len); + + /* spec value is 128 bit, we only use 64 bit */ + log.hbmw[0] = cpu_to_le64(endgrp->fdp.hbmw); + log.mbmw[0] = cpu_to_le64(endgrp->fdp.mbmw); + log.mbe[0] = cpu_to_le64(endgrp->fdp.mbe); + + return nvme_c2h(n, (uint8_t *)&log + off, trans_len, req); +} + +static uint16_t nvme_fdp_events(NvmeCtrl *n, uint32_t endgrpid, + uint32_t buf_len, uint64_t off, + NvmeRequest *req) +{ + NvmeEnduranceGroup *endgrp; + NvmeCmd *cmd = &req->cmd; + bool host_events = (cmd->cdw10 >> 8) & 0x1; + uint32_t log_size, trans_len; + NvmeFdpEventBuffer *ebuf; + g_autofree NvmeFdpEventsLog *elog = NULL; + NvmeFdpEvent *event; + + if (endgrpid != 1 || !n->subsys) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + endgrp = &n->subsys->endgrp; + + if (!endgrp->fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + if (host_events) { + ebuf = &endgrp->fdp.host_events; + } else { + ebuf = &endgrp->fdp.ctrl_events; + } + + log_size = sizeof(NvmeFdpEventsLog) + ebuf->nelems * sizeof(NvmeFdpEvent); + trans_len = MIN(log_size - off, buf_len); + elog = g_malloc0(log_size); + elog->num_events = cpu_to_le32(ebuf->nelems); + event = (NvmeFdpEvent *)(elog + 1); + + if (ebuf->nelems && ebuf->start == ebuf->next) { + unsigned int nelems = (NVME_FDP_MAX_EVENTS - ebuf->start); + /* wrap over, copy [start;NVME_FDP_MAX_EVENTS[ and [0; next[ */ + memcpy(event, &ebuf->events[ebuf->start], + sizeof(NvmeFdpEvent) * nelems); + memcpy(event + nelems, ebuf->events, + sizeof(NvmeFdpEvent) * ebuf->next); + } else if (ebuf->start < ebuf->next) { + memcpy(event, &ebuf->events[ebuf->start], + sizeof(NvmeFdpEvent) * (ebuf->next - ebuf->start)); + } + + return nvme_c2h(n, (uint8_t *)elog + off, trans_len, req); +} + static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) { NvmeCmd *cmd = &req->cmd; @@ -4589,13 +5120,14 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) uint8_t lsp = (dw10 >> 8) & 0xf; uint8_t rae = (dw10 >> 15) & 0x1; uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24; - uint32_t numdl, numdu; + uint32_t numdl, numdu, lspi; uint64_t off, lpol, lpou; size_t len; uint16_t status; numdl = (dw10 >> 16); numdu = (dw11 & 0xffff); + lspi = (dw11 >> 16); lpol = dw12; lpou = dw13; @@ -4624,6 +5156,16 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req) return nvme_changed_nslist(n, rae, len, off, req); case NVME_LOG_CMD_EFFECTS: return nvme_cmd_effects(n, csi, len, off, req); + case NVME_LOG_ENDGRP: + return nvme_endgrp_info(n, rae, len, off, req); + case NVME_LOG_FDP_CONFS: + return nvme_fdp_confs(n, lspi, len, off, req); + case NVME_LOG_FDP_RUH_USAGE: + return nvme_fdp_ruh_usage(n, lspi, dw10, dw12, len, off, req); + case NVME_LOG_FDP_STATS: + return nvme_fdp_stats(n, lspi, len, off, req); + case NVME_LOG_FDP_EVENTS: + return nvme_fdp_events(n, lspi, len, off, req); default: trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid); return NVME_INVALID_FIELD | NVME_DNR; @@ -5210,6 +5752,84 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) return nvme_c2h(n, (uint8_t *)×tamp, sizeof(timestamp), req); } +static int nvme_get_feature_fdp(NvmeCtrl *n, uint32_t endgrpid, + uint32_t *result) +{ + *result = 0; + + if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + *result = FIELD_DP16(0, FEAT_FDP, FDPE, 1); + *result = FIELD_DP16(*result, FEAT_FDP, CONF_NDX, 0); + + return NVME_SUCCESS; +} + +static uint16_t nvme_get_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns, + NvmeRequest *req, uint32_t *result) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t cdw11 = le32_to_cpu(cmd->cdw11); + uint16_t ph = cdw11 & 0xffff; + uint8_t noet = (cdw11 >> 16) & 0xff; + uint16_t ruhid, ret; + uint32_t nentries = 0; + uint8_t s_events_ndx = 0; + size_t s_events_siz = sizeof(NvmeFdpEventDescr) * noet; + g_autofree NvmeFdpEventDescr *s_events = g_malloc0(s_events_siz); + NvmeRuHandle *ruh; + NvmeFdpEventDescr *s_event; + + if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + if (!nvme_ph_valid(ns, ph)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ruhid = ns->fdp.phs[ph]; + ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; + + assert(ruh); + + if (unlikely(noet == 0)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + for (uint8_t event_type = 0; event_type < FDP_EVT_MAX; event_type++) { + uint8_t shift = nvme_fdp_evf_shifts[event_type]; + if (!shift && event_type) { + /* + * only first entry (event_type == 0) has a shift value of 0 + * other entries are simply unpopulated. + */ + continue; + } + + nentries++; + + s_event = &s_events[s_events_ndx]; + s_event->evt = event_type; + s_event->evta = (ruh->event_filter >> shift) & 0x1; + + /* break if all `noet` entries are filled */ + if ((++s_events_ndx) == noet) { + break; + } + } + + ret = nvme_c2h(n, s_events, s_events_siz, req); + if (ret) { + return ret; + } + + *result = nentries; + return NVME_SUCCESS; +} + static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) { NvmeCmd *cmd = &req->cmd; @@ -5222,6 +5842,7 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) uint16_t iv; NvmeNamespace *ns; int i; + uint16_t endgrpid = 0, ret = NVME_SUCCESS; static const uint32_t nvme_feature_default[NVME_FID_MAX] = { [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT, @@ -5319,6 +5940,33 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req) case NVME_HOST_BEHAVIOR_SUPPORT: return nvme_c2h(n, (uint8_t *)&n->features.hbs, sizeof(n->features.hbs), req); + case NVME_FDP_MODE: + endgrpid = dw11 & 0xff; + + if (endgrpid != 0x1) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ret = nvme_get_feature_fdp(n, endgrpid, &result); + if (ret) { + return ret; + } + goto out; + case NVME_FDP_EVENTS: + if (!nvme_nsid_valid(n, nsid)) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ret = nvme_get_feature_fdp_events(n, ns, req, &result); + if (ret) { + return ret; + } + goto out; default: break; } @@ -5352,6 +6000,20 @@ defaults: result |= NVME_INTVC_NOCOALESCING; } break; + case NVME_FDP_MODE: + endgrpid = dw11 & 0xff; + + if (endgrpid != 0x1) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ret = nvme_get_feature_fdp(n, endgrpid, &result); + if (ret) { + return ret; + } + goto out; + + break; default: result = nvme_feature_default[fid]; break; @@ -5359,7 +6021,7 @@ defaults: out: req->cqe.result = cpu_to_le32(result); - return NVME_SUCCESS; + return ret; } static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) @@ -5377,6 +6039,51 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req) return NVME_SUCCESS; } +static uint16_t nvme_set_feature_fdp_events(NvmeCtrl *n, NvmeNamespace *ns, + NvmeRequest *req) +{ + NvmeCmd *cmd = &req->cmd; + uint32_t cdw11 = le32_to_cpu(cmd->cdw11); + uint16_t ph = cdw11 & 0xffff; + uint8_t noet = (cdw11 >> 16) & 0xff; + uint16_t ret, ruhid; + uint8_t enable = le32_to_cpu(cmd->cdw12) & 0x1; + uint8_t event_mask = 0; + unsigned int i; + g_autofree uint8_t *events = g_malloc0(noet); + NvmeRuHandle *ruh = NULL; + + assert(ns); + + if (!n->subsys || !n->subsys->endgrp.fdp.enabled) { + return NVME_FDP_DISABLED | NVME_DNR; + } + + if (!nvme_ph_valid(ns, ph)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ruhid = ns->fdp.phs[ph]; + ruh = &n->subsys->endgrp.fdp.ruhs[ruhid]; + + ret = nvme_h2c(n, events, noet, req); + if (ret) { + return ret; + } + + for (i = 0; i < noet; i++) { + event_mask |= (1 << nvme_fdp_evf_shifts[events[i]]); + } + + if (enable) { + ruh->event_filter |= event_mask; + } else { + ruh->event_filter = ruh->event_filter & ~event_mask; + } + + return NVME_SUCCESS; +} + static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) { NvmeNamespace *ns = NULL; @@ -5536,6 +6243,11 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req) return NVME_CMD_SET_CMB_REJECTED | NVME_DNR; } break; + case NVME_FDP_MODE: + /* spec: abort with cmd seq err if there's one or more NS' in endgrp */ + return NVME_CMD_SEQ_ERROR | NVME_DNR; + case NVME_FDP_EVENTS: + return nvme_set_feature_fdp_events(n, ns, req); default: return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR; } @@ -6104,6 +6816,61 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req) return NVME_SUCCESS; } +static uint16_t nvme_directive_send(NvmeCtrl *n, NvmeRequest *req) +{ + return NVME_INVALID_FIELD | NVME_DNR; +} + +static uint16_t nvme_directive_receive(NvmeCtrl *n, NvmeRequest *req) +{ + NvmeNamespace *ns; + uint32_t dw10 = le32_to_cpu(req->cmd.cdw10); + uint32_t dw11 = le32_to_cpu(req->cmd.cdw11); + uint32_t nsid = le32_to_cpu(req->cmd.nsid); + uint8_t doper, dtype; + uint32_t numd, trans_len; + NvmeDirectiveIdentify id = { + .supported = 1 << NVME_DIRECTIVE_IDENTIFY, + .enabled = 1 << NVME_DIRECTIVE_IDENTIFY, + }; + + numd = dw10 + 1; + doper = dw11 & 0xff; + dtype = (dw11 >> 8) & 0xff; + + trans_len = MIN(sizeof(NvmeDirectiveIdentify), numd << 2); + + if (nsid == NVME_NSID_BROADCAST || dtype != NVME_DIRECTIVE_IDENTIFY || + doper != NVME_DIRECTIVE_RETURN_PARAMS) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (!ns) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + switch (dtype) { + case NVME_DIRECTIVE_IDENTIFY: + switch (doper) { + case NVME_DIRECTIVE_RETURN_PARAMS: + if (ns->endgrp->fdp.enabled) { + id.supported |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; + id.enabled |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; + id.persistent |= 1 << NVME_DIRECTIVE_DATA_PLACEMENT; + } + + return nvme_c2h(n, (uint8_t *)&id, trans_len, req); + + default: + return NVME_INVALID_FIELD | NVME_DNR; + } + + default: + return NVME_INVALID_FIELD; + } +} + static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) { trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode, @@ -6152,6 +6919,10 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req) return nvme_dbbuf_config(n, req); case NVME_ADM_CMD_FORMAT_NVM: return nvme_format(n, req); + case NVME_ADM_CMD_DIRECTIVE_SEND: + return nvme_directive_send(n, req); + case NVME_ADM_CMD_DIRECTIVE_RECV: + return nvme_directive_receive(n, req); default: assert(false); } @@ -7380,6 +8151,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) uint8_t *pci_conf = pci_dev->config; uint64_t cap = ldq_le_p(&n->bar.cap); NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); + uint32_t ctratt; id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); @@ -7390,7 +8162,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->cntlid = cpu_to_le16(n->cntlid); id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); - id->ctratt |= cpu_to_le32(NVME_CTRATT_ELBAS); + ctratt = NVME_CTRATT_ELBAS; id->rab = 6; @@ -7407,7 +8179,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->mdts = n->params.mdts; id->ver = cpu_to_le32(NVME_SPEC_VER); id->oacs = - cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF); + cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT | NVME_OACS_DBBUF | + NVME_OACS_DIRECTIVES); id->cntrltype = 0x1; /* @@ -7457,8 +8230,17 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) if (n->subsys) { id->cmic |= NVME_CMIC_MULTI_CTRL; + ctratt |= NVME_CTRATT_ENDGRPS; + + id->endgidmax = cpu_to_le16(0x1); + + if (n->subsys->endgrp.fdp.enabled) { + ctratt |= NVME_CTRATT_FDPS; + } } + id->ctratt = cpu_to_le32(ctratt); + NVME_CAP_SET_MQES(cap, 0x7ff); NVME_CAP_SET_CQR(cap, 1); NVME_CAP_SET_TO(cap, 0xf); diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c index 62a1f97be0..cfac960dcf 100644 --- a/hw/nvme/ns.c +++ b/hw/nvme/ns.c @@ -14,8 +14,10 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/cutils.h" #include "qemu/error-report.h" #include "qapi/error.h" +#include "qemu/bitops.h" #include "sysemu/sysemu.h" #include "sysemu/block-backend.h" @@ -377,6 +379,130 @@ static void nvme_zoned_ns_shutdown(NvmeNamespace *ns) assert(ns->nr_open_zones == 0); } +static NvmeRuHandle *nvme_find_ruh_by_attr(NvmeEnduranceGroup *endgrp, + uint8_t ruha, uint16_t *ruhid) +{ + for (uint16_t i = 0; i < endgrp->fdp.nruh; i++) { + NvmeRuHandle *ruh = &endgrp->fdp.ruhs[i]; + + if (ruh->ruha == ruha) { + *ruhid = i; + return ruh; + } + } + + return NULL; +} + +static bool nvme_ns_init_fdp(NvmeNamespace *ns, Error **errp) +{ + NvmeEnduranceGroup *endgrp = ns->endgrp; + NvmeRuHandle *ruh; + uint8_t lbafi = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); + unsigned int *ruhid, *ruhids; + char *r, *p, *token; + uint16_t *ph; + + if (!ns->params.fdp.ruhs) { + ns->fdp.nphs = 1; + ph = ns->fdp.phs = g_new(uint16_t, 1); + + ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_CTRL, ph); + if (!ruh) { + ruh = nvme_find_ruh_by_attr(endgrp, NVME_RUHA_UNUSED, ph); + if (!ruh) { + error_setg(errp, "no unused reclaim unit handles left"); + return false; + } + + ruh->ruha = NVME_RUHA_CTRL; + ruh->lbafi = lbafi; + ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds; + + for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) { + ruh->rus[rg].ruamw = ruh->ruamw; + } + } else if (ruh->lbafi != lbafi) { + error_setg(errp, "lba format index of controller assigned " + "reclaim unit handle does not match namespace lba " + "format index"); + return false; + } + + return true; + } + + ruhid = ruhids = g_new0(unsigned int, endgrp->fdp.nruh); + r = p = strdup(ns->params.fdp.ruhs); + + /* parse the placement handle identifiers */ + while ((token = qemu_strsep(&p, ";")) != NULL) { + ns->fdp.nphs += 1; + if (ns->fdp.nphs > NVME_FDP_MAXPIDS || + ns->fdp.nphs == endgrp->fdp.nruh) { + error_setg(errp, "too many placement handles"); + free(r); + return false; + } + + if (qemu_strtoui(token, NULL, 0, ruhid++) < 0) { + error_setg(errp, "cannot parse reclaim unit handle identifier"); + free(r); + return false; + } + } + + free(r); + + ph = ns->fdp.phs = g_new(uint16_t, ns->fdp.nphs); + + ruhid = ruhids; + + /* verify the identifiers */ + for (unsigned int i = 0; i < ns->fdp.nphs; i++, ruhid++, ph++) { + if (*ruhid >= endgrp->fdp.nruh) { + error_setg(errp, "invalid reclaim unit handle identifier"); + return false; + } + + ruh = &endgrp->fdp.ruhs[*ruhid]; + + switch (ruh->ruha) { + case NVME_RUHA_UNUSED: + ruh->ruha = NVME_RUHA_HOST; + ruh->lbafi = lbafi; + ruh->ruamw = endgrp->fdp.runs >> ns->lbaf.ds; + + for (uint16_t rg = 0; rg < endgrp->fdp.nrg; rg++) { + ruh->rus[rg].ruamw = ruh->ruamw; + } + + break; + + case NVME_RUHA_HOST: + if (ruh->lbafi != lbafi) { + error_setg(errp, "lba format index of host assigned" + "reclaim unit handle does not match namespace " + "lba format index"); + return false; + } + + break; + + case NVME_RUHA_CTRL: + error_setg(errp, "reclaim unit handle is controller assigned"); + return false; + + default: + abort(); + } + + *ph = *ruhid; + } + + return true; +} + static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp) { unsigned int pi_size; @@ -417,6 +543,11 @@ static int nvme_ns_check_constraints(NvmeNamespace *ns, Error **errp) return -1; } + if (ns->params.zoned && ns->endgrp && ns->endgrp->fdp.enabled) { + error_setg(errp, "cannot be a zoned- in an FDP configuration"); + return -1; + } + if (ns->params.zoned) { if (ns->params.max_active_zones) { if (ns->params.max_open_zones > ns->params.max_active_zones) { @@ -502,6 +633,12 @@ int nvme_ns_setup(NvmeNamespace *ns, Error **errp) nvme_ns_init_zoned(ns); } + if (ns->endgrp && ns->endgrp->fdp.enabled) { + if (!nvme_ns_init_fdp(ns, errp)) { + return -1; + } + } + return 0; } @@ -525,6 +662,10 @@ void nvme_ns_cleanup(NvmeNamespace *ns) g_free(ns->zone_array); g_free(ns->zd_extensions); } + + if (ns->endgrp && ns->endgrp->fdp.enabled) { + g_free(ns->fdp.phs); + } } static void nvme_ns_unrealize(DeviceState *dev) @@ -561,6 +702,8 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) if (!qdev_set_parent_bus(dev, &subsys->bus.parent_bus, errp)) { return; } + ns->subsys = subsys; + ns->endgrp = &subsys->endgrp; } if (nvme_ns_setup(ns, errp)) { @@ -591,6 +734,8 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) if (subsys) { subsys->namespaces[nsid] = ns; + ns->id_ns.endgid = cpu_to_le16(0x1); + if (ns->params.detached) { return; } @@ -606,6 +751,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) return; } + } nvme_attach_ns(n, ns); @@ -644,6 +790,7 @@ static Property nvme_ns_props[] = { DEFINE_PROP_SIZE("zoned.zrwafg", NvmeNamespace, params.zrwafg, -1), DEFINE_PROP_BOOL("eui64-default", NvmeNamespace, params.eui64_default, false), + DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace, params.fdp.ruhs), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h index 16da27a69b..209e8f5b4c 100644 --- a/hw/nvme/nvme.h +++ b/hw/nvme/nvme.h @@ -27,6 +27,8 @@ #define NVME_MAX_CONTROLLERS 256 #define NVME_MAX_NAMESPACES 256 #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000) +#define NVME_FDP_MAX_EVENTS 63 +#define NVME_FDP_MAXPIDS 128 QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1); @@ -45,17 +47,68 @@ typedef struct NvmeBus { OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS) #define SUBSYS_SLOT_RSVD (void *)0xFFFF +typedef struct NvmeReclaimUnit { + uint64_t ruamw; +} NvmeReclaimUnit; + +typedef struct NvmeRuHandle { + uint8_t ruht; + uint8_t ruha; + uint64_t event_filter; + uint8_t lbafi; + uint64_t ruamw; + + /* reclaim units indexed by reclaim group */ + NvmeReclaimUnit *rus; +} NvmeRuHandle; + +typedef struct NvmeFdpEventBuffer { + NvmeFdpEvent events[NVME_FDP_MAX_EVENTS]; + unsigned int nelems; + unsigned int start; + unsigned int next; +} NvmeFdpEventBuffer; + +typedef struct NvmeEnduranceGroup { + uint8_t event_conf; + + struct { + NvmeFdpEventBuffer host_events, ctrl_events; + + uint16_t nruh; + uint16_t nrg; + uint8_t rgif; + uint64_t runs; + + uint64_t hbmw; + uint64_t mbmw; + uint64_t mbe; + + bool enabled; + + NvmeRuHandle *ruhs; + } fdp; +} NvmeEnduranceGroup; + typedef struct NvmeSubsystem { DeviceState parent_obj; NvmeBus bus; uint8_t subnqn[256]; char *serial; - NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; - NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; + NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS]; + NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1]; + NvmeEnduranceGroup endgrp; struct { char *nqn; + + struct { + bool enabled; + uint64_t runs; + uint16_t nruh; + uint32_t nrg; + } fdp; } params; } NvmeSubsystem; @@ -96,6 +149,21 @@ typedef struct NvmeZone { QTAILQ_ENTRY(NvmeZone) entry; } NvmeZone; +#define FDP_EVT_MAX 0xff +#define NVME_FDP_MAX_NS_RUHS 32u +#define FDPVSS 0 + +static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = { + /* Host events */ + [FDP_EVT_RU_NOT_FULLY_WRITTEN] = 0, + [FDP_EVT_RU_ATL_EXCEEDED] = 1, + [FDP_EVT_CTRL_RESET_RUH] = 2, + [FDP_EVT_INVALID_PID] = 3, + /* CTRL events */ + [FDP_EVT_MEDIA_REALLOC] = 32, + [FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33, +}; + typedef struct NvmeNamespaceParams { bool detached; bool shared; @@ -125,6 +193,10 @@ typedef struct NvmeNamespaceParams { uint32_t numzrwa; uint64_t zrwas; uint64_t zrwafg; + + struct { + char *ruhs; + } fdp; } NvmeNamespaceParams; typedef struct NvmeNamespace { @@ -167,10 +239,18 @@ typedef struct NvmeNamespace { int32_t nr_active_zones; NvmeNamespaceParams params; + NvmeSubsystem *subsys; + NvmeEnduranceGroup *endgrp; struct { uint32_t err_rec; } features; + + struct { + uint16_t nphs; + /* reclaim unit handle identifiers indexed by placement handle */ + uint16_t *phs; + } fdp; } NvmeNamespace; static inline uint32_t nvme_nsid(NvmeNamespace *ns) @@ -274,6 +354,12 @@ static inline void nvme_aor_dec_active(NvmeNamespace *ns) assert(ns->nr_active_zones >= 0); } +static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b) +{ + uint64_t ret = *a + b; + *a = ret < *a ? UINT64_MAX : ret; +} + void nvme_ns_init_format(NvmeNamespace *ns); int nvme_ns_setup(NvmeNamespace *ns, Error **errp); void nvme_ns_drain(NvmeNamespace *ns); @@ -340,7 +426,9 @@ static inline const char *nvme_adm_opc_str(uint8_t opc) case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES"; case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ"; case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT"; + case NVME_ADM_CMD_DIRECTIVE_SEND: return "NVME_ADM_CMD_DIRECTIVE_SEND"; case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT"; + case NVME_ADM_CMD_DIRECTIVE_RECV: return "NVME_ADM_CMD_DIRECTIVE_RECV"; case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG"; case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM"; default: return "NVME_ADM_CMD_UNKNOWN"; diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c index 9d2643678b..24ddec860e 100644 --- a/hw/nvme/subsys.c +++ b/hw/nvme/subsys.c @@ -7,10 +7,13 @@ */ #include "qemu/osdep.h" +#include "qemu/units.h" #include "qapi/error.h" #include "nvme.h" +#define NVME_DEFAULT_RU_SIZE (96 * MiB) + static int nvme_subsys_reserve_cntlids(NvmeCtrl *n, int start, int num) { NvmeSubsystem *subsys = n->subsys; @@ -109,13 +112,95 @@ void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n) n->cntlid = -1; } -static void nvme_subsys_setup(NvmeSubsystem *subsys) +static bool nvme_calc_rgif(uint16_t nruh, uint16_t nrg, uint8_t *rgif) +{ + uint16_t val; + unsigned int i; + + if (unlikely(nrg == 1)) { + /* PIDRG_NORGI scenario, all of pid is used for PHID */ + *rgif = 0; + return true; + } + + val = nrg; + i = 0; + while (val) { + val >>= 1; + i++; + } + *rgif = i; + + /* ensure remaining bits suffice to represent number of phids in a RG */ + if (unlikely((UINT16_MAX >> i) < nruh)) { + *rgif = 0; + return false; + } + + return true; +} + +static bool nvme_subsys_setup_fdp(NvmeSubsystem *subsys, Error **errp) +{ + NvmeEnduranceGroup *endgrp = &subsys->endgrp; + + if (!subsys->params.fdp.runs) { + error_setg(errp, "fdp.runs must be non-zero"); + return false; + } + + endgrp->fdp.runs = subsys->params.fdp.runs; + + if (!subsys->params.fdp.nrg) { + error_setg(errp, "fdp.nrg must be non-zero"); + return false; + } + + endgrp->fdp.nrg = subsys->params.fdp.nrg; + + if (!subsys->params.fdp.nruh) { + error_setg(errp, "fdp.nruh must be non-zero"); + return false; + } + + endgrp->fdp.nruh = subsys->params.fdp.nruh; + + if (!nvme_calc_rgif(endgrp->fdp.nruh, endgrp->fdp.nrg, &endgrp->fdp.rgif)) { + error_setg(errp, + "cannot derive a valid rgif (nruh %"PRIu16" nrg %"PRIu32")", + endgrp->fdp.nruh, endgrp->fdp.nrg); + return false; + } + + endgrp->fdp.ruhs = g_new(NvmeRuHandle, endgrp->fdp.nruh); + + for (uint16_t ruhid = 0; ruhid < endgrp->fdp.nruh; ruhid++) { + endgrp->fdp.ruhs[ruhid] = (NvmeRuHandle) { + .ruht = NVME_RUHT_INITIALLY_ISOLATED, + .ruha = NVME_RUHA_UNUSED, + }; + + endgrp->fdp.ruhs[ruhid].rus = g_new(NvmeReclaimUnit, endgrp->fdp.nrg); + } + + endgrp->fdp.enabled = true; + + return true; +} + +static bool nvme_subsys_setup(NvmeSubsystem *subsys, Error **errp) { const char *nqn = subsys->params.nqn ? subsys->params.nqn : subsys->parent_obj.id; snprintf((char *)subsys->subnqn, sizeof(subsys->subnqn), "nqn.2019-08.org.qemu:%s", nqn); + + if (subsys->params.fdp.enabled && !nvme_subsys_setup_fdp(subsys, errp)) { + return false; + } + + return true; } static void nvme_subsys_realize(DeviceState *dev, Error **errp) @@ -124,11 +209,16 @@ static void nvme_subsys_realize(DeviceState *dev, Error **errp) qbus_init(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id); - nvme_subsys_setup(subsys); + nvme_subsys_setup(subsys, errp); } static Property nvme_subsystem_props[] = { DEFINE_PROP_STRING("nqn", NvmeSubsystem, params.nqn), + DEFINE_PROP_BOOL("fdp", NvmeSubsystem, params.fdp.enabled, false), + DEFINE_PROP_SIZE("fdp.runs", NvmeSubsystem, params.fdp.runs, + NVME_DEFAULT_RU_SIZE), + DEFINE_PROP_UINT32("fdp.nrg", NvmeSubsystem, params.fdp.nrg, 1), + DEFINE_PROP_UINT16("fdp.nruh", NvmeSubsystem, params.fdp.nruh, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events index b16f2260b4..7f7837e1a2 100644 --- a/hw/nvme/trace-events +++ b/hw/nvme/trace-events @@ -117,6 +117,7 @@ pci_nvme_clear_ns_reset(uint32_t state, uint64_t slba) "zone state=%"PRIu32", sl pci_nvme_zoned_zrwa_implicit_flush(uint64_t zslba, uint32_t nlb) "zslba 0x%"PRIx64" nlb %"PRIu32"" pci_nvme_pci_reset(void) "PCI Function Level Reset" pci_nvme_virt_mngmt(uint16_t cid, uint16_t act, uint16_t cntlid, const char* rt, uint16_t nr) "cid %"PRIu16", act=0x%"PRIx16", ctrlid=%"PRIu16" %s nr=%"PRIu16"" +pci_nvme_fdp_ruh_change(uint16_t rgid, uint16_t ruhid) "change RU on RUH rgid=%"PRIu16", ruhid=%"PRIu16"" # error conditions pci_nvme_err_mdts(size_t len) "len %zu" diff --git a/hw/nvram/eeprom_at24c.c b/hw/nvram/eeprom_at24c.c index 3328c32814..613c4929e3 100644 --- a/hw/nvram/eeprom_at24c.c +++ b/hw/nvram/eeprom_at24c.c @@ -41,6 +41,13 @@ struct EEPROMState { uint16_t cur; /* total size in bytes */ uint32_t rsize; + /* + * address byte number + * for 24c01, 24c02 size <= 256 byte, use only 1 byte + * otherwise size > 256, use 2 byte + */ + uint8_t asize; + bool writable; /* cells changed since last START? */ bool changed; @@ -91,7 +98,11 @@ uint8_t at24c_eeprom_recv(I2CSlave *s) EEPROMState *ee = AT24C_EE(s); uint8_t ret; - if (ee->haveaddr == 1) { + /* + * If got the byte address but not completely with address size + * will return the invalid value + */ + if (ee->haveaddr > 0 && ee->haveaddr < ee->asize) { return 0xff; } @@ -108,11 +119,11 @@ int at24c_eeprom_send(I2CSlave *s, uint8_t data) { EEPROMState *ee = AT24C_EE(s); - if (ee->haveaddr < 2) { + if (ee->haveaddr < ee->asize) { ee->cur <<= 8; ee->cur |= data; ee->haveaddr++; - if (ee->haveaddr == 2) { + if (ee->haveaddr == ee->asize) { ee->cur %= ee->rsize; DPRINTK("Set pointer %04x\n", ee->cur); } @@ -199,6 +210,18 @@ static void at24c_eeprom_realize(DeviceState *dev, Error **errp) } DPRINTK("Reset read backing file\n"); } + + /* + * If address size didn't define with property set + * value is 0 as default, setting it by Rom size detecting. + */ + if (ee->asize == 0) { + if (ee->rsize <= 256) { + ee->asize = 1; + } else { + ee->asize = 2; + } + } } static @@ -213,6 +236,7 @@ void at24c_eeprom_reset(DeviceState *state) static Property at24c_eeprom_props[] = { DEFINE_PROP_UINT32("rom-size", EEPROMState, rsize, 0), + DEFINE_PROP_UINT8("address-size", EEPROMState, asize, 0), DEFINE_PROP_BOOL("writable", EEPROMState, writable, true), DEFINE_PROP_DRIVE("drive", EEPROMState, blk), DEFINE_PROP_END_OF_LIST() diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 432754eda4..29a5bef1d5 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -693,12 +693,12 @@ static const VMStateDescription vmstate_fw_cfg = { } }; -void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, - FWCfgCallback select_cb, - FWCfgWriteCallback write_cb, - void *callback_opaque, - void *data, size_t len, - bool read_only) +static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, + FWCfgCallback select_cb, + FWCfgWriteCallback write_cb, + void *callback_opaque, + void *data, size_t len, + bool read_only) { int arch = !!(key & FW_CFG_ARCH_LOCAL); @@ -741,15 +741,6 @@ void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len) fw_cfg_add_bytes_callback(s, key, NULL, NULL, NULL, data, len, true); } -void *fw_cfg_read_bytes_ptr(FWCfgState *s, uint16_t key) -{ - int arch = !!(key & FW_CFG_ARCH_LOCAL); - - key &= FW_CFG_ENTRY_MASK; - assert(key < fw_cfg_max_entry(s)); - return s->entries[arch][key].data; -} - void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value) { size_t sz = strlen(value) + 1; diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c index 3d4e6b59cd..54f507318f 100644 --- a/hw/pci-bridge/cxl_downstream.c +++ b/hw/pci-bridge/cxl_downstream.c @@ -15,7 +15,7 @@ #include "hw/pci/pcie_port.h" #include "qapi/error.h" -typedef struct CXLDownStreamPort { +typedef struct CXLDownstreamPort { /*< private >*/ PCIESlot parent_obj; diff --git a/hw/pci-host/pnv_phb.c b/hw/pci-host/pnv_phb.c index c62b08538a..82332d7a05 100644 --- a/hw/pci-host/pnv_phb.c +++ b/hw/pci-host/pnv_phb.c @@ -62,6 +62,15 @@ static bool pnv_parent_fixup(Object *parent, BusState *parent_bus, return true; } +static Object *pnv_phb_user_get_parent(PnvChip *chip, PnvPHB *phb, Error **errp) +{ + if (phb->version == 3) { + return OBJECT(pnv_chip_add_phb(chip, phb)); + } else { + return OBJECT(pnv_pec_add_phb(chip, phb, errp)); + } +} + /* * User created devices won't have the initial setup that default * devices have. This setup consists of assigning a parent device @@ -79,7 +88,7 @@ static bool pnv_phb_user_device_init(PnvPHB *phb, Error **errp) return false; } - parent = pnv_chip_add_phb(chip, phb, errp); + parent = pnv_phb_user_get_parent(chip, phb, errp); if (!parent) { return false; } diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c index 43267a428f..3b2850f7a3 100644 --- a/hw/pci-host/pnv_phb4_pec.c +++ b/hw/pci-host/pnv_phb4_pec.c @@ -112,9 +112,50 @@ static const MemoryRegionOps pnv_pec_pci_xscom_ops = { .endianness = DEVICE_BIG_ENDIAN, }; -static void pnv_pec_default_phb_realize(PnvPhb4PecState *pec, - int stack_no, - Error **errp) +PnvPhb4PecState *pnv_pec_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp) +{ + PnvPhb4PecState *pecs = NULL; + int chip_id = phb->chip_id; + int index = phb->phb_id; + int i, j; + + if (phb->version == 4) { + Pnv9Chip *chip9 = PNV9_CHIP(chip); + + pecs = chip9->pecs; + } else if (phb->version == 5) { + Pnv10Chip *chip10 = PNV10_CHIP(chip); + + pecs = chip10->pecs; + } else { + g_assert_not_reached(); + } + + for (i = 0; i < chip->num_pecs; i++) { + /* + * For each PEC, check the amount of phbs it supports + * and see if the given phb4 index matches an index. + */ + PnvPhb4PecState *pec = &pecs[i]; + + for (j = 0; j < pec->num_phbs; j++) { + if (index == pnv_phb4_pec_get_phb_id(pec, j)) { + pec->phbs[j] = phb; + phb->pec = pec; + return pec; + } + } + } + error_setg(errp, + "pnv-phb4 chip-id %d index %d didn't match any existing PEC", + chip_id, index); + + return NULL; +} + +static PnvPHB *pnv_pec_default_phb_realize(PnvPhb4PecState *pec, + int stack_no, + Error **errp) { PnvPHB *phb = PNV_PHB(qdev_new(TYPE_PNV_PHB)); int phb_id = pnv_phb4_pec_get_phb_id(pec, stack_no); @@ -128,8 +169,9 @@ static void pnv_pec_default_phb_realize(PnvPhb4PecState *pec, &error_fatal); if (!sysbus_realize(SYS_BUS_DEVICE(phb), errp)) { - return; + return NULL; } + return phb; } static void pnv_pec_realize(DeviceState *dev, Error **errp) @@ -148,8 +190,9 @@ static void pnv_pec_realize(DeviceState *dev, Error **errp) /* Create PHBs if running with defaults */ if (defaults_enabled()) { + g_assert(pec->num_phbs <= MAX_PHBS_PER_PEC); for (i = 0; i < pec->num_phbs; i++) { - pnv_pec_default_phb_realize(pec, i, errp); + pec->phbs[i] = pnv_pec_default_phb_realize(pec, i, errp); } } @@ -197,9 +240,12 @@ static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, pecc->compat_size))); for (i = 0; i < pec->num_phbs; i++) { - int phb_id = pnv_phb4_pec_get_phb_id(pec, i); int stk_offset; + if (!pec->phbs[i]) { + continue; + } + name = g_strdup_printf("stack@%x", i); stk_offset = fdt_add_subnode(fdt, offset, name); _FDT(stk_offset); @@ -207,7 +253,8 @@ static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, _FDT((fdt_setprop(fdt, stk_offset, "compatible", pecc->stk_compat, pecc->stk_compat_size))); _FDT((fdt_setprop_cell(fdt, stk_offset, "reg", i))); - _FDT((fdt_setprop_cell(fdt, stk_offset, "ibm,phb-index", phb_id))); + _FDT((fdt_setprop_cell(fdt, stk_offset, "ibm,phb-index", + pec->phbs[i]->phb_id))); } return 0; diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 10c980b9f5..034fe49e9a 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -282,9 +282,13 @@ static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) { PCIBus *bus; for (;;) { + int dev_irq = irq_num; bus = pci_get_bus(pci_dev); assert(bus->map_irq); irq_num = bus->map_irq(pci_dev, irq_num); + trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, + pci_bus_is_root(bus) ? "root-complex" + : DEVICE(bus->parent_dev)->canonical_path); if (bus->set_irq) break; pci_dev = bus->parent_dev; @@ -1617,8 +1621,12 @@ PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) PCIBus *bus; do { + int dev_irq = pin; bus = pci_get_bus(dev); pin = bus->map_irq(dev, pin); + trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, + pci_bus_is_root(bus) ? "root-complex" + : DEVICE(bus->parent_dev)->canonical_path); dev = bus->parent_dev; } while (dev); diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 924fdabd15..b8c24cf45f 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -39,6 +39,11 @@ #define PCIE_DEV_PRINTF(dev, fmt, ...) \ PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__) +static bool pcie_sltctl_powered_off(uint16_t sltctl) +{ + return (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF + && (sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF; +} /*************************************************************************** * pci express capability helper functions @@ -373,8 +378,8 @@ void pcie_cap_slot_enable_power(PCIDevice *dev) uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP); if (sltcap & PCI_EXP_SLTCAP_PCP) { - pci_set_word_by_mask(exp_cap + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PCC, PCI_EXP_SLTCTL_PWR_ON); + pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PCC); } } @@ -395,6 +400,7 @@ static void pcie_cap_update_power(PCIDevice *hotplug_dev) if (sltcap & PCI_EXP_SLTCAP_PCP) { power = (sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_ON; + /* Don't we need to check also (sltctl & PCI_EXP_SLTCTL_PIC) ? */ } pci_for_each_device(sec_bus, pci_bus_num(sec_bus), @@ -579,8 +585,7 @@ void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, return; } - if (((sltctl & PCI_EXP_SLTCTL_PIC) == PCI_EXP_SLTCTL_PWR_IND_OFF) && - ((sltctl & PCI_EXP_SLTCTL_PCC) == PCI_EXP_SLTCTL_PWR_OFF)) { + if (pcie_sltctl_powered_off(sltctl)) { /* slot is powered off -> unplug without round-trip to the guest */ pcie_cap_slot_do_unplug(hotplug_pdev); hotplug_event_notify(hotplug_pdev); @@ -634,8 +639,8 @@ void pcie_cap_slot_init(PCIDevice *dev, PCIESlot *s) PCI_EXP_SLTCTL_PIC | PCI_EXP_SLTCTL_AIC); pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_PIC_OFF | - PCI_EXP_SLTCTL_AIC_OFF); + PCI_EXP_SLTCTL_PWR_IND_OFF | + PCI_EXP_SLTCTL_ATTN_IND_OFF); pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PIC | PCI_EXP_SLTCTL_AIC | @@ -679,7 +684,8 @@ void pcie_cap_slot_reset(PCIDevice *dev) PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE); pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_AIC_OFF); + PCI_EXP_SLTCTL_PWR_IND_OFF | + PCI_EXP_SLTCTL_ATTN_IND_OFF); if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) { /* Downstream ports enforce device number 0. */ @@ -694,7 +700,8 @@ void pcie_cap_slot_reset(PCIDevice *dev) PCI_EXP_SLTCTL_PCC); } - pic = populated ? PCI_EXP_SLTCTL_PIC_ON : PCI_EXP_SLTCTL_PIC_OFF; + pic = populated ? + PCI_EXP_SLTCTL_PWR_IND_ON : PCI_EXP_SLTCTL_PWR_IND_OFF; pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic); } @@ -769,10 +776,9 @@ void pcie_cap_slot_write_config(PCIDevice *dev, * this is a work around for guests that overwrite * control of powered off slots before powering them on. */ - if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) && - (val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF && - (!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) || - (old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) { + if ((sltsta & PCI_EXP_SLTSTA_PDS) && pcie_sltctl_powered_off(val) && + !pcie_sltctl_powered_off(old_slt_ctl)) + { pcie_cap_slot_do_unplug(dev); } pcie_cap_update_power(dev); diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c index fca7f6691a..e7bc7192f1 100644 --- a/hw/pci/shpc.c +++ b/hw/pci/shpc.c @@ -123,10 +123,13 @@ #define SHPC_PCI_TO_IDX(pci_slot) ((pci_slot) - 1) #define SHPC_IDX_TO_PHYSICAL(slot) ((slot) + 1) -static uint16_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk) +static uint8_t shpc_get_status(SHPCDevice *shpc, int slot, uint16_t msk) { uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); - return (pci_get_word(status) & msk) >> ctz32(msk); + uint16_t result = (pci_get_word(status) & msk) >> ctz32(msk); + + assert(result <= UINT8_MAX); + return result; } static void shpc_set_status(SHPCDevice *shpc, @@ -223,6 +226,7 @@ void shpc_reset(PCIDevice *d) SHPC_SLOT_STATUS_PRSNT_MASK); shpc_set_status(shpc, i, SHPC_LED_OFF, SHPC_SLOT_PWR_LED_MASK); } + shpc_set_status(shpc, i, SHPC_LED_OFF, SHPC_SLOT_ATTN_LED_MASK); shpc_set_status(shpc, i, 0, SHPC_SLOT_STATUS_66); } shpc_set_sec_bus_speed(shpc, SHPC_SEC_BUS_33); @@ -254,60 +258,66 @@ static void shpc_free_devices_in_slot(SHPCDevice *shpc, int slot) } } -static void shpc_slot_command(SHPCDevice *shpc, uint8_t target, +static bool shpc_slot_is_off(uint8_t state, uint8_t power, uint8_t attn) +{ + return state == SHPC_STATE_DISABLED && power == SHPC_LED_OFF; +} + +static void shpc_slot_command(PCIDevice *d, uint8_t target, uint8_t state, uint8_t power, uint8_t attn) { - uint8_t current_state; + SHPCDevice *shpc = d->shpc; int slot = SHPC_LOGICAL_TO_IDX(target); + uint8_t old_state = shpc_get_status(shpc, slot, SHPC_SLOT_STATE_MASK); + uint8_t old_power = shpc_get_status(shpc, slot, SHPC_SLOT_PWR_LED_MASK); + uint8_t old_attn = shpc_get_status(shpc, slot, SHPC_SLOT_ATTN_LED_MASK); + if (target < SHPC_CMD_TRGT_MIN || slot >= shpc->nslots) { shpc_invalid_command(shpc); return; } - current_state = shpc_get_status(shpc, slot, SHPC_SLOT_STATE_MASK); - if (current_state == SHPC_STATE_ENABLED && state == SHPC_STATE_PWRONLY) { + + if (old_state == SHPC_STATE_ENABLED && state == SHPC_STATE_PWRONLY) { shpc_invalid_command(shpc); return; } - switch (power) { - case SHPC_LED_NO: - break; - default: + if (power == SHPC_LED_NO) { + power = old_power; + } else { /* TODO: send event to monitor */ shpc_set_status(shpc, slot, power, SHPC_SLOT_PWR_LED_MASK); } - switch (attn) { - case SHPC_LED_NO: - break; - default: + + if (attn == SHPC_LED_NO) { + attn = old_attn; + } else { /* TODO: send event to monitor */ shpc_set_status(shpc, slot, attn, SHPC_SLOT_ATTN_LED_MASK); } - if ((current_state == SHPC_STATE_DISABLED && state == SHPC_STATE_PWRONLY) || - (current_state == SHPC_STATE_DISABLED && state == SHPC_STATE_ENABLED)) { - shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK); - } else if ((current_state == SHPC_STATE_ENABLED || - current_state == SHPC_STATE_PWRONLY) && - state == SHPC_STATE_DISABLED) { + if (state == SHPC_STATE_NO) { + state = old_state; + } else { shpc_set_status(shpc, slot, state, SHPC_SLOT_STATE_MASK); - power = shpc_get_status(shpc, slot, SHPC_SLOT_PWR_LED_MASK); - /* TODO: track what monitor requested. */ - /* Look at LED to figure out whether it's ok to remove the device. */ - if (power == SHPC_LED_OFF) { - shpc_free_devices_in_slot(shpc, slot); - shpc_set_status(shpc, slot, 1, SHPC_SLOT_STATUS_MRL_OPEN); - shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_EMPTY, - SHPC_SLOT_STATUS_PRSNT_MASK); - shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= - SHPC_SLOT_EVENT_MRL | - SHPC_SLOT_EVENT_PRESENCE; - } + } + + if (!shpc_slot_is_off(old_state, old_power, old_attn) && + shpc_slot_is_off(state, power, attn)) + { + shpc_free_devices_in_slot(shpc, slot); + shpc_set_status(shpc, slot, 1, SHPC_SLOT_STATUS_MRL_OPEN); + shpc_set_status(shpc, slot, SHPC_SLOT_STATUS_PRSNT_EMPTY, + SHPC_SLOT_STATUS_PRSNT_MASK); + shpc->config[SHPC_SLOT_EVENT_LATCH(slot)] |= + SHPC_SLOT_EVENT_MRL | + SHPC_SLOT_EVENT_PRESENCE; } } -static void shpc_command(SHPCDevice *shpc) +static void shpc_command(PCIDevice *d) { + SHPCDevice *shpc = d->shpc; uint8_t code = pci_get_byte(shpc->config + SHPC_CMD_CODE); uint8_t speed; uint8_t target; @@ -328,7 +338,7 @@ static void shpc_command(SHPCDevice *shpc) state = (code & SHPC_SLOT_STATE_MASK) >> SHPC_SLOT_STATE_SHIFT; power = (code & SHPC_SLOT_PWR_LED_MASK) >> SHPC_SLOT_PWR_LED_SHIFT; attn = (code & SHPC_SLOT_ATTN_LED_MASK) >> SHPC_SLOT_ATTN_LED_SHIFT; - shpc_slot_command(shpc, target, state, power, attn); + shpc_slot_command(d, target, state, power, attn); break; case 0x40 ... 0x47: speed = code & SHPC_SEC_BUS_MASK; @@ -346,10 +356,10 @@ static void shpc_command(SHPCDevice *shpc) } for (i = 0; i < shpc->nslots; ++i) { if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) { - shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_PWRONLY, SHPC_LED_ON, SHPC_LED_NO); } else { - shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO); } } @@ -367,10 +377,10 @@ static void shpc_command(SHPCDevice *shpc) } for (i = 0; i < shpc->nslots; ++i) { if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) { - shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_ENABLED, SHPC_LED_ON, SHPC_LED_NO); } else { - shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, + shpc_slot_command(d, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO); } } @@ -402,7 +412,7 @@ static void shpc_write(PCIDevice *d, unsigned addr, uint64_t val, int l) shpc->config[a] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ } if (ranges_overlap(addr, l, SHPC_CMD_CODE, 2)) { - shpc_command(shpc); + shpc_command(d); } shpc_interrupt_update(d); } @@ -486,8 +496,9 @@ static const MemoryRegionOps shpc_mmio_ops = { .max_access_size = 4, }, }; -static void shpc_device_plug_common(PCIDevice *affected_dev, int *slot, - SHPCDevice *shpc, Error **errp) + +static bool shpc_device_get_slot(PCIDevice *affected_dev, int *slot, + SHPCDevice *shpc, Error **errp) { int pci_slot = PCI_SLOT(affected_dev->devfn); *slot = SHPC_PCI_TO_IDX(pci_slot); @@ -497,21 +508,20 @@ static void shpc_device_plug_common(PCIDevice *affected_dev, int *slot, "controller. Valid slots are between %d and %d.", pci_slot, SHPC_IDX_TO_PCI(0), SHPC_IDX_TO_PCI(shpc->nslots) - 1); - return; + return false; } + + return true; } void shpc_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - Error *local_err = NULL; PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); SHPCDevice *shpc = pci_hotplug_dev->shpc; int slot; - shpc_device_plug_common(PCI_DEVICE(dev), &slot, shpc, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!shpc_device_get_slot(PCI_DEVICE(dev), &slot, shpc, errp)) { return; } @@ -553,16 +563,13 @@ void shpc_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, void shpc_device_unplug_request_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - Error *local_err = NULL; PCIDevice *pci_hotplug_dev = PCI_DEVICE(hotplug_dev); SHPCDevice *shpc = pci_hotplug_dev->shpc; uint8_t state; uint8_t led; int slot; - shpc_device_plug_common(PCI_DEVICE(dev), &slot, shpc, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!shpc_device_get_slot(PCI_DEVICE(dev), &slot, shpc, errp)) { return; } diff --git a/hw/pci/trace-events b/hw/pci/trace-events index aaf46bc92d..42430869ce 100644 --- a/hw/pci/trace-events +++ b/hw/pci/trace-events @@ -3,6 +3,7 @@ # pci.c pci_update_mappings_del(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 pci_update_mappings_add(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 +pci_route_irq(int dev_irq, const char *dev_path, int parent_irq, const char *parent_path) "IRQ %d @%s -> IRQ %d @%s" # pci_host.c pci_cfg_read(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x -> 0x%x" diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 44b1fbbc93..11cb48af2f 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -284,73 +284,19 @@ static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t pir, g_free(reg); } -static PnvPhb4PecState *pnv_phb4_get_pec(PnvChip *chip, PnvPHB4 *phb, - Error **errp) -{ - PnvPHB *phb_base = phb->phb_base; - PnvPhb4PecState *pecs = NULL; - int chip_id = phb->chip_id; - int index = phb->phb_id; - int i, j; - - if (phb_base->version == 4) { - Pnv9Chip *chip9 = PNV9_CHIP(chip); - - pecs = chip9->pecs; - } else if (phb_base->version == 5) { - Pnv10Chip *chip10 = PNV10_CHIP(chip); - - pecs = chip10->pecs; - } else { - g_assert_not_reached(); - } - - for (i = 0; i < chip->num_pecs; i++) { - /* - * For each PEC, check the amount of phbs it supports - * and see if the given phb4 index matches an index. - */ - PnvPhb4PecState *pec = &pecs[i]; - - for (j = 0; j < pec->num_phbs; j++) { - if (index == pnv_phb4_pec_get_phb_id(pec, j)) { - return pec; - } - } - } - error_setg(errp, - "pnv-phb4 chip-id %d index %d didn't match any existing PEC", - chip_id, index); - - return NULL; -} - /* - * Adds a PnvPHB to the chip. Returns the parent obj of the - * PHB which varies with each version (phb version 3 is parented - * by the chip, version 4 and 5 are parented by the PEC - * device). - * - * TODO: for version 3 we're still parenting the PHB with the - * chip. We should parent with a (so far not implemented) - * PHB3 PEC device. + * Adds a PnvPHB to the chip on P8. + * Implemented here, like for defaults PHBs */ -Object *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp) +PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb) { - if (phb->version == 3) { - Pnv8Chip *chip8 = PNV8_CHIP(chip); - - phb->chip = chip; - - chip8->phbs[chip8->num_phbs] = phb; - chip8->num_phbs++; - - return OBJECT(chip); - } + Pnv8Chip *chip8 = PNV8_CHIP(chip); - phb->pec = pnv_phb4_get_pec(chip, PNV_PHB4(phb->backend), errp); + phb->chip = chip; - return OBJECT(phb->pec); + chip8->phbs[chip8->num_phbs] = phb; + chip8->num_phbs++; + return chip; } static void pnv_chip_power8_dt_populate(PnvChip *chip, void *fdt) diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index 4550b3b938..6528ebfa3a 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -44,6 +44,7 @@ config RISCV_VIRT select VIRTIO_MMIO select FW_CFG_DMA select PLATFORM_BUS + select ACPI config SHAKTI_C bool diff --git a/hw/riscv/meson.build b/hw/riscv/meson.build index ab6cae57ea..2f7ee81be3 100644 --- a/hw/riscv/meson.build +++ b/hw/riscv/meson.build @@ -9,5 +9,6 @@ riscv_ss.add(when: 'CONFIG_SIFIVE_E', if_true: files('sifive_e.c')) riscv_ss.add(when: 'CONFIG_SIFIVE_U', if_true: files('sifive_u.c')) riscv_ss.add(when: 'CONFIG_SPIKE', if_true: files('spike.c')) riscv_ss.add(when: 'CONFIG_MICROCHIP_PFSOC', if_true: files('microchip_pfsoc.c')) +riscv_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c')) hw_arch += {'riscv': riscv_ss} diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index ad3bb35b34..35a335b8d0 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -99,7 +99,7 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, MachineState *ms = MACHINE(s); uint64_t mem_size = ms->ram_size; void *fdt; - int cpu, fdt_size; + int cpu; uint32_t *cells; char *nodename; uint32_t plic_phandle, prci_phandle, gpio_phandle, phandle = 1; @@ -112,18 +112,10 @@ static void create_fdt(SiFiveUState *s, const MemMapEntry *memmap, "sifive,plic-1.0.0", "riscv,plic0" }; - if (ms->dtb) { - fdt = ms->fdt = load_device_tree(ms->dtb, &fdt_size); - if (!fdt) { - error_report("load_device_tree() failed"); - exit(1); - } - } else { - fdt = ms->fdt = create_device_tree(&fdt_size); - if (!fdt) { - error_report("create_device_tree() failed"); - exit(1); - } + fdt = ms->fdt = create_device_tree(&s->fdt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); } qemu_fdt_setprop_string(fdt, "/", "model", "SiFive HiFive Unleashed A00"); @@ -560,8 +552,16 @@ static void sifive_u_machine_init(MachineState *machine) qdev_connect_gpio_out(DEVICE(&(s->soc.gpio)), 10, qemu_allocate_irq(sifive_u_machine_reset, NULL, 0)); - /* create device tree */ - create_fdt(s, memmap, riscv_is_32bit(&s->soc.u_cpus)); + /* load/create device tree */ + if (machine->dtb) { + machine->fdt = load_device_tree(machine->dtb, &s->fdt_size); + if (!machine->fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + } else { + create_fdt(s, memmap, riscv_is_32bit(&s->soc.u_cpus)); + } if (s->start_in_flash) { /* diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c new file mode 100644 index 0000000000..82da0a238c --- /dev/null +++ b/hw/riscv/virt-acpi-build.c @@ -0,0 +1,416 @@ +/* + * Support for generating ACPI tables and passing them to Guests + * + * RISC-V virt ACPI generation + * + * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net> + * Copyright (C) 2006 Fabrice Bellard + * Copyright (C) 2013 Red Hat Inc + * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD. + * Copyright (C) 2021-2023 Ventana Micro Systems Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/utils.h" +#include "qapi/error.h" +#include "sysemu/reset.h" +#include "migration/vmstate.h" +#include "hw/riscv/virt.h" +#include "hw/riscv/numa.h" +#include "hw/intc/riscv_aclint.h" + +#define ACPI_BUILD_TABLE_SIZE 0x20000 + +typedef struct AcpiBuildState { + /* Copy of table in RAM (for patching) */ + MemoryRegion *table_mr; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; + /* Is table patched? */ + bool patched; +} AcpiBuildState; + +static void acpi_align_size(GArray *blob, unsigned align) +{ + /* + * Align size to multiple of given size. This reduces the chance + * we need to change size in the future (breaking cross version migration). + */ + g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); +} + +static void riscv_acpi_madt_add_rintc(uint32_t uid, + const CPUArchIdList *arch_ids, + GArray *entry) +{ + uint64_t hart_id = arch_ids->cpus[uid].arch_id; + + build_append_int_noprefix(entry, 0x18, 1); /* Type */ + build_append_int_noprefix(entry, 20, 1); /* Length */ + build_append_int_noprefix(entry, 1, 1); /* Version */ + build_append_int_noprefix(entry, 0, 1); /* Reserved */ + build_append_int_noprefix(entry, 0x1, 4); /* Flags */ + build_append_int_noprefix(entry, hart_id, 8); /* Hart ID */ + build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */ +} + +static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + MachineState *ms = MACHINE(s); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + + for (int i = 0; i < arch_ids->len; i++) { + Aml *dev; + GArray *madt_buf = g_array_new(0, 1, 1); + + dev = aml_device("C%.03X", i); + aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007"))); + aml_append(dev, aml_name_decl("_UID", + aml_int(arch_ids->cpus[i].arch_id))); + + /* build _MAT object */ + riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf); + aml_append(dev, aml_name_decl("_MAT", + aml_buffer(madt_buf->len, + (uint8_t *)madt_buf->data))); + g_array_free(madt_buf, true); + + aml_append(scope, dev); + } +} + +static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) +{ + Aml *dev = aml_device("FWCF"); + aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); + + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, + fw_cfg_memmap->size, AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} + +/* RHCT Node[N] starts at offset 56 */ +#define RHCT_NODE_ARRAY_OFFSET 56 + +/* + * ACPI spec, Revision 6.5+ + * 5.2.36 RISC-V Hart Capabilities Table (RHCT) + * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16 + * https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view + */ +static void build_rhct(GArray *table_data, + BIOSLinker *linker, + RISCVVirtState *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + MachineState *ms = MACHINE(s); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + size_t len, aligned_len; + uint32_t isa_offset, num_rhct_nodes; + RISCVCPU *cpu; + char *isa; + + AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id, + .oem_table_id = s->oem_table_id }; + + acpi_table_begin(&table, table_data); + + build_append_int_noprefix(table_data, 0x0, 4); /* Reserved */ + + /* Time Base Frequency */ + build_append_int_noprefix(table_data, + RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, 8); + + /* ISA + N hart info */ + num_rhct_nodes = 1 + ms->smp.cpus; + + /* Number of RHCT nodes*/ + build_append_int_noprefix(table_data, num_rhct_nodes, 4); + + /* Offset to the RHCT node array */ + build_append_int_noprefix(table_data, RHCT_NODE_ARRAY_OFFSET, 4); + + /* ISA String Node */ + isa_offset = table_data->len - table.table_offset; + build_append_int_noprefix(table_data, 0, 2); /* Type 0 */ + + cpu = &s->soc[0].harts[0]; + isa = riscv_isa_string(cpu); + len = 8 + strlen(isa) + 1; + aligned_len = (len % 2) ? (len + 1) : len; + + build_append_int_noprefix(table_data, aligned_len, 2); /* Length */ + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + + /* ISA string length including NUL */ + build_append_int_noprefix(table_data, strlen(isa) + 1, 2); + g_array_append_vals(table_data, isa, strlen(isa) + 1); /* ISA string */ + + if (aligned_len != len) { + build_append_int_noprefix(table_data, 0x0, 1); /* Optional Padding */ + } + + /* Hart Info Node */ + for (int i = 0; i < arch_ids->len; i++) { + build_append_int_noprefix(table_data, 0xFFFF, 2); /* Type */ + build_append_int_noprefix(table_data, 16, 2); /* Length */ + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + build_append_int_noprefix(table_data, 1, 2); /* Number of offsets */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + build_append_int_noprefix(table_data, isa_offset, 4); /* Offsets[0] */ + } + + acpi_table_end(linker, &table); +} + +/* FADT */ +static void build_fadt_rev6(GArray *table_data, + BIOSLinker *linker, + RISCVVirtState *s, + unsigned dsdt_tbl_offset) +{ + AcpiFadtData fadt = { + .rev = 6, + .minor_ver = 5, + .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI, + .xdsdt_tbl_offset = &dsdt_tbl_offset, + }; + + build_fadt(table_data, linker, &fadt, s->oem_id, s->oem_table_id); +} + +/* DSDT */ +static void build_dsdt(GArray *table_data, + BIOSLinker *linker, + RISCVVirtState *s) +{ + Aml *scope, *dsdt; + const MemMapEntry *memmap = s->memmap; + AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = s->oem_id, + .oem_table_id = s->oem_table_id }; + + + acpi_table_begin(&table, table_data); + dsdt = init_aml_allocator(); + + /* + * When booting the VM with UEFI, UEFI takes ownership of the RTC hardware. + * While UEFI can use libfdt to disable the RTC device node in the DTB that + * it passes to the OS, it cannot modify AML. Therefore, we won't generate + * the RTC ACPI device at all when using UEFI. + */ + scope = aml_scope("\\_SB"); + acpi_dsdt_add_cpus(scope, s); + + acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); + + aml_append(dsdt, scope); + + /* copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +/* + * ACPI spec, Revision 6.5+ + * 5.2.12 Multiple APIC Description Table (MADT) + * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15 + * https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view + */ +static void build_madt(GArray *table_data, + BIOSLinker *linker, + RISCVVirtState *s) +{ + MachineClass *mc = MACHINE_GET_CLASS(s); + MachineState *ms = MACHINE(s); + const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + + AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id, + .oem_table_id = s->oem_table_id }; + + acpi_table_begin(&table, table_data); + /* Local Interrupt Controller Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 0, 4); /* MADT Flags */ + + /* RISC-V Local INTC structures per HART */ + for (int i = 0; i < arch_ids->len; i++) { + riscv_acpi_madt_add_rintc(i, arch_ids, table_data); + } + + acpi_table_end(linker, &table); +} + +static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables) +{ + GArray *table_offsets; + unsigned dsdt, xsdt; + GArray *tables_blob = tables->table_data; + + table_offsets = g_array_new(false, true, + sizeof(uint32_t)); + + bios_linker_loader_alloc(tables->linker, + ACPI_BUILD_TABLE_FILE, tables_blob, + 64, false); + + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, s); + + /* FADT and others pointed to by XSDT */ + acpi_add_table(table_offsets, tables_blob); + build_fadt_rev6(tables_blob, tables->linker, s, dsdt); + + acpi_add_table(table_offsets, tables_blob); + build_madt(tables_blob, tables->linker, s); + + acpi_add_table(table_offsets, tables_blob); + build_rhct(tables_blob, tables->linker, s); + + /* XSDT is pointed to by RSDP */ + xsdt = tables_blob->len; + build_xsdt(tables_blob, tables->linker, table_offsets, s->oem_id, + s->oem_table_id); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + .revision = 2, + .oem_id = s->oem_id, + .xsdt_tbl_offset = &xsdt, + .rsdt_tbl_offset = NULL, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + } + + /* + * The align size is 128, warn if 64k is not enough therefore + * the align size could be resized. + */ + if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { + warn_report("ACPI table size %u exceeds %d bytes," + " migration may not work", + tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); + error_printf("Try removing some objects."); + } + + acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); + + /* Clean up memory that's no longer used */ + g_array_free(table_offsets, true); +} + +static void acpi_ram_update(MemoryRegion *mr, GArray *data) +{ + uint32_t size = acpi_data_len(data); + + /* + * Make sure RAM size is correct - in case it got changed + * e.g. by migration + */ + memory_region_ram_resize(mr, size, &error_abort); + + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); +} + +static void virt_acpi_build_update(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + AcpiBuildTables tables; + + /* No state to update or already patched? Nothing to do. */ + if (!build_state || build_state->patched) { + return; + } + + build_state->patched = true; + + acpi_build_tables_init(&tables); + + virt_acpi_build(RISCV_VIRT_MACHINE(qdev_get_machine()), &tables); + + acpi_ram_update(build_state->table_mr, tables.table_data); + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); + acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); + + acpi_build_tables_cleanup(&tables, true); +} + +static void virt_acpi_build_reset(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + build_state->patched = false; +} + +static const VMStateDescription vmstate_virt_acpi_build = { + .name = "virt_acpi_build", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(patched, AcpiBuildState), + VMSTATE_END_OF_LIST() + }, +}; + +void virt_acpi_setup(RISCVVirtState *s) +{ + AcpiBuildTables tables; + AcpiBuildState *build_state; + + build_state = g_malloc0(sizeof *build_state); + + acpi_build_tables_init(&tables); + virt_acpi_build(s, &tables); + + /* Now expose it all to Guest */ + build_state->table_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, tables.table_data, + ACPI_BUILD_TABLE_FILE); + assert(build_state->table_mr != NULL); + + build_state->linker_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, + tables.linker->cmd_blob, + ACPI_BUILD_LOADER_FILE); + + build_state->rsdp_mr = acpi_add_rom_blob(virt_acpi_build_update, + build_state, tables.rsdp, + ACPI_BUILD_RSDP_FILE); + + qemu_register_reset(virt_acpi_build_reset, build_state); + virt_acpi_build_reset(build_state); + vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state); + + /* + * Clean up tables but don't free the memory: we track it + * in build_state. + */ + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index 86c4adc0c9..4e3efbee16 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -49,6 +49,8 @@ #include "hw/pci/pci.h" #include "hw/pci-host/gpex.h" #include "hw/display/ramfb.h" +#include "hw/acpi/aml-build.h" +#include "qapi/qapi-visit-common.h" /* * The virt machine physical address space used by some of the devices @@ -228,26 +230,41 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, int cpu; uint32_t cpu_phandle; MachineState *ms = MACHINE(s); - char *name, *cpu_name, *core_name, *intc_name; + char *name, *cpu_name, *core_name, *intc_name, *sv_name; bool is_32_bit = riscv_is_32bit(&s->soc[0]); + uint8_t satp_mode_max; for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) { + RISCVCPU *cpu_ptr = &s->soc[socket].harts[cpu]; + cpu_phandle = (*phandle)++; cpu_name = g_strdup_printf("/cpus/cpu@%d", s->soc[socket].hartid_base + cpu); qemu_fdt_add_subnode(ms->fdt, cpu_name); - if (riscv_feature(&s->soc[socket].harts[cpu].env, - RISCV_FEATURE_MMU)) { - qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", - (is_32_bit) ? "riscv,sv32" : "riscv,sv48"); - } else { - qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", - "riscv,none"); - } - name = riscv_isa_string(&s->soc[socket].harts[cpu]); + + satp_mode_max = satp_mode_max_from_map( + s->soc[socket].harts[cpu].cfg.satp_mode.map); + sv_name = g_strdup_printf("riscv,%s", + satp_mode_str(satp_mode_max, is_32_bit)); + qemu_fdt_setprop_string(ms->fdt, cpu_name, "mmu-type", sv_name); + g_free(sv_name); + + + name = riscv_isa_string(cpu_ptr); qemu_fdt_setprop_string(ms->fdt, cpu_name, "riscv,isa", name); g_free(name); + + if (cpu_ptr->cfg.ext_icbom) { + qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbom-block-size", + cpu_ptr->cfg.cbom_blocksize); + } + + if (cpu_ptr->cfg.ext_icboz) { + qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cboz-block-size", + cpu_ptr->cfg.cboz_blocksize); + } + qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv"); qemu_fdt_setprop_string(ms->fdt, cpu_name, "status", "okay"); qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg", @@ -1008,18 +1025,10 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) uint32_t irq_pcie_phandle = 1, irq_virtio_phandle = 1; uint8_t rng_seed[32]; - if (ms->dtb) { - ms->fdt = load_device_tree(ms->dtb, &s->fdt_size); - if (!ms->fdt) { - error_report("load_device_tree() failed"); - exit(1); - } - } else { - ms->fdt = create_device_tree(&s->fdt_size); - if (!ms->fdt) { - error_report("create_device_tree() failed"); - exit(1); - } + ms->fdt = create_device_tree(&s->fdt_size); + if (!ms->fdt) { + error_report("create_device_tree() failed"); + exit(1); } qemu_fdt_setprop_string(ms->fdt, "/", "model", "riscv-virtio,qemu"); @@ -1314,6 +1323,10 @@ static void virt_machine_done(Notifier *notifier, void *data) if (kvm_enabled()) { riscv_setup_direct_kernel(kernel_entry, fdt_load_addr); } + + if (virt_is_acpi_enabled(s)) { + virt_acpi_setup(s); + } } static void virt_machine_init(MachineState *machine) @@ -1449,6 +1462,8 @@ static void virt_machine_init(MachineState *machine) ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size); } + s->memmap = virt_memmap; + /* register system main memory (actual RAM) */ memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base, machine->ram); @@ -1504,8 +1519,16 @@ static void virt_machine_init(MachineState *machine) } virt_flash_map(s, system_memory); - /* create device tree */ - create_fdt(s, memmap); + /* load/create device tree */ + if (machine->dtb) { + machine->fdt = load_device_tree(machine->dtb, &s->fdt_size); + if (!machine->fdt) { + error_report("load_device_tree() failed"); + exit(1); + } + } else { + create_fdt(s, memmap); + } s->machine_done.notify = virt_machine_done; qemu_add_machine_init_done_notifier(&s->machine_done); @@ -1513,6 +1536,11 @@ static void virt_machine_init(MachineState *machine) static void virt_machine_instance_init(Object *obj) { + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + + s->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); + s->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); + s->acpi = ON_OFF_AUTO_AUTO; } static char *virt_get_aia_guests(Object *obj, Error **errp) @@ -1587,6 +1615,28 @@ static void virt_set_aclint(Object *obj, bool value, Error **errp) s->have_aclint = value; } +bool virt_is_acpi_enabled(RISCVVirtState *s) +{ + return s->acpi != ON_OFF_AUTO_OFF; +} + +static void virt_get_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + OnOffAuto acpi = s->acpi; + + visit_type_OnOffAuto(v, name, &acpi, errp); +} + +static void virt_set_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &s->acpi, errp); +} + static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, DeviceState *dev) { @@ -1658,6 +1708,11 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) sprintf(str, "Set number of guest MMIO pages for AIA IMSIC. Valid value " "should be between 0 and %d.", VIRT_IRQCHIP_MAX_GUESTS); object_class_property_set_description(oc, "aia-guests", str); + object_class_property_add(oc, "acpi", "OnOffAuto", + virt_get_acpi, virt_set_acpi, + NULL, NULL); + object_class_property_set_description(oc, "acpi", + "Enable ACPI"); } static const TypeInfo virt_machine_typeinfo = { diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 4869566cf5..d2007e70fb 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -750,14 +750,16 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores; t->core_enabled = t->core_count; - t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores); - t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads; - t->thread_count2 = cpu_to_le16(ms->smp.threads); t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */ t->processor_family2 = cpu_to_le16(0x01); /* Other */ + if (tbl_len == SMBIOS_TYPE_4_LEN_V30) { + t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores); + t->thread_count2 = cpu_to_le16(ms->smp.threads); + } + SMBIOS_BUILD_TABLE_POST; smbios_type4_count++; } diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 22df4be528..7281169322 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -1134,10 +1134,7 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp) /* Setup cs_lines for peripherals */ s->cs_lines = g_new0(qemu_irq, asc->cs_num_max); - - for (i = 0; i < asc->cs_num_max; ++i) { - sysbus_init_irq(sbd, &s->cs_lines[i]); - } + qdev_init_gpio_out_named(DEVICE(s), s->cs_lines, "cs", asc->cs_num_max); /* The memory region for the controller registers */ memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index 214d6a0501..6998094233 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -353,6 +353,16 @@ static const VMStateDescription vmstate_hpet = { } }; +static void hpet_arm(HPETTimer *t, uint64_t ticks) +{ + if (ticks < ns_to_ticks(INT64_MAX / 2)) { + timer_mod(t->qemu_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ticks_to_ns(ticks)); + } else { + timer_del(t->qemu_timer); + } +} + /* * timer expiration callback */ @@ -375,13 +385,11 @@ static void hpet_timer(void *opaque) } } diff = hpet_calculate_diff(t, cur_tick); - timer_mod(t->qemu_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); } else if (t->config & HPET_TN_32BIT && !timer_is_periodic(t)) { if (t->wrap_flag) { diff = hpet_calculate_diff(t, cur_tick); - timer_mod(t->qemu_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); t->wrap_flag = 0; } } @@ -408,8 +416,7 @@ static void hpet_set_timer(HPETTimer *t) t->wrap_flag = 1; } } - timer_mod(t->qemu_timer, - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (int64_t)ticks_to_ns(diff)); + hpet_arm(t, diff); } static void hpet_del_timer(HPETTimer *t) diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index 4307296358..515ccf870d 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -522,7 +522,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, size_t vhost_svq_poll(VhostShadowVirtqueue *svq) { int64_t start_us = g_get_monotonic_time(); - uint32_t len; + uint32_t len = 0; do { if (vhost_svq_more_used(svq)) { diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c index fe3da32c74..d6927b610a 100644 --- a/hw/virtio/vhost-user-gpio.c +++ b/hw/virtio/vhost-user-gpio.c @@ -16,6 +16,7 @@ #include "trace.h" #define REALIZE_CONNECTION_RETRIES 3 +#define VHOST_NVQS 2 /* Features required from VirtIO */ static const int feature_bits[] = { @@ -208,8 +209,7 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio) { virtio_delete_queue(gpio->command_vq); virtio_delete_queue(gpio->interrupt_vq); - g_free(gpio->vhost_dev.vqs); - gpio->vhost_dev.vqs = NULL; + g_free(gpio->vhost_vqs); virtio_cleanup(vdev); vhost_user_cleanup(&gpio->vhost_user); } @@ -229,6 +229,9 @@ static int vu_gpio_connect(DeviceState *dev, Error **errp) vhost_dev_set_config_notifier(vhost_dev, &gpio_ops); gpio->vhost_user.supports_config = true; + gpio->vhost_dev.nvqs = VHOST_NVQS; + gpio->vhost_dev.vqs = gpio->vhost_vqs; + ret = vhost_dev_init(vhost_dev, &gpio->vhost_user, VHOST_BACKEND_TYPE_USER, 0, errp); if (ret < 0) { @@ -347,10 +350,9 @@ static void vu_gpio_device_realize(DeviceState *dev, Error **errp) virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config)); - gpio->vhost_dev.nvqs = 2; gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output); - gpio->vhost_dev.vqs = g_new0(struct vhost_virtqueue, gpio->vhost_dev.nvqs); + gpio->vhost_vqs = g_new0(struct vhost_virtqueue, VHOST_NVQS); gpio->connected = false; diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index dc5c828ba6..60eaf0d95b 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -143,8 +143,6 @@ static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserI2C *i2c) vhost_user_cleanup(&i2c->vhost_user); virtio_delete_queue(i2c->vq); virtio_cleanup(vdev); - g_free(i2c->vhost_dev.vqs); - i2c->vhost_dev.vqs = NULL; } static int vu_i2c_connect(DeviceState *dev) @@ -228,6 +226,7 @@ static void vu_i2c_device_realize(DeviceState *dev, Error **errp) ret = vhost_dev_init(&i2c->vhost_dev, &i2c->vhost_user, VHOST_BACKEND_TYPE_USER, 0, errp); if (ret < 0) { + g_free(i2c->vhost_dev.vqs); do_vhost_user_cleanup(vdev, i2c); } @@ -239,10 +238,12 @@ static void vu_i2c_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserI2C *i2c = VHOST_USER_I2C(dev); + struct vhost_virtqueue *vhost_vqs = i2c->vhost_dev.vqs; /* This will stop vhost backend if appropriate. */ vu_i2c_set_status(vdev, 0); vhost_dev_cleanup(&i2c->vhost_dev); + g_free(vhost_vqs); do_vhost_user_cleanup(vdev, i2c); } diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c index 201a39e220..efc54cd3fb 100644 --- a/hw/virtio/vhost-user-rng.c +++ b/hw/virtio/vhost-user-rng.c @@ -229,6 +229,7 @@ static void vu_rng_device_realize(DeviceState *dev, Error **errp) return; vhost_dev_init_failed: + g_free(rng->vhost_dev.vqs); virtio_delete_queue(rng->req_vq); virtio_add_queue_failed: virtio_cleanup(vdev); @@ -239,12 +240,12 @@ static void vu_rng_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserRNG *rng = VHOST_USER_RNG(dev); + struct vhost_virtqueue *vhost_vqs = rng->vhost_dev.vqs; vu_rng_set_status(vdev, 0); vhost_dev_cleanup(&rng->vhost_dev); - g_free(rng->vhost_dev.vqs); - rng->vhost_dev.vqs = NULL; + g_free(vhost_vqs); virtio_delete_queue(rng->req_vq); virtio_cleanup(vdev); vhost_user_cleanup(&rng->vhost_user); diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index e68daa35d4..8968541514 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -40,7 +40,7 @@ #define VHOST_MEMORY_BASELINE_NREGIONS 8 #define VHOST_USER_F_PROTOCOL_FEATURES 30 -#define VHOST_USER_SLAVE_MAX_FDS 8 +#define VHOST_USER_BACKEND_MAX_FDS 8 /* * Set maximum number of RAM slots supported to @@ -71,12 +71,12 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_RARP = 2, VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, VHOST_USER_PROTOCOL_F_NET_MTU = 4, - VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5, VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, VHOST_USER_PROTOCOL_F_CONFIG = 9, - VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, @@ -110,7 +110,7 @@ typedef enum VhostUserRequest { VHOST_USER_SET_VRING_ENABLE = 18, VHOST_USER_SEND_RARP = 19, VHOST_USER_NET_SET_MTU = 20, - VHOST_USER_SET_SLAVE_REQ_FD = 21, + VHOST_USER_SET_BACKEND_REQ_FD = 21, VHOST_USER_IOTLB_MSG = 22, VHOST_USER_SET_VRING_ENDIAN = 23, VHOST_USER_GET_CONFIG = 24, @@ -134,11 +134,11 @@ typedef enum VhostUserRequest { } VhostUserRequest; typedef enum VhostUserSlaveRequest { - VHOST_USER_SLAVE_NONE = 0, - VHOST_USER_SLAVE_IOTLB_MSG = 1, - VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2, - VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3, - VHOST_USER_SLAVE_MAX + VHOST_USER_BACKEND_NONE = 0, + VHOST_USER_BACKEND_IOTLB_MSG = 1, + VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2, + VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3, + VHOST_USER_BACKEND_MAX } VhostUserSlaveRequest; typedef struct VhostUserMemoryRegion { @@ -1638,13 +1638,13 @@ static gboolean slave_read(QIOChannel *ioc, GIOCondition condition, } switch (hdr.request) { - case VHOST_USER_SLAVE_IOTLB_MSG: + case VHOST_USER_BACKEND_IOTLB_MSG: ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb); break; - case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG : + case VHOST_USER_BACKEND_CONFIG_CHANGE_MSG: ret = vhost_user_slave_handle_config_change(dev); break; - case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: + case VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG: ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area, fd ? fd[0] : -1); break; @@ -1696,7 +1696,7 @@ fdcleanup: static int vhost_setup_slave_channel(struct vhost_dev *dev) { VhostUserMsg msg = { - .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD, + .hdr.request = VHOST_USER_SET_BACKEND_REQ_FD, .hdr.flags = VHOST_USER_VERSION, }; struct vhost_user *u = dev->opaque; @@ -1707,7 +1707,7 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev) QIOChannel *ioc; if (!virtio_has_feature(dev->protocol_features, - VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { + VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { return 0; } @@ -2065,7 +2065,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) && !(virtio_has_feature(dev->protocol_features, - VHOST_USER_PROTOCOL_F_SLAVE_REQ) && + VHOST_USER_PROTOCOL_F_BACKEND_REQ) && virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { error_setg(errp, "IOMMU support requires reply-ack and " diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 542e003101..df3a1e92ac 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -689,26 +689,11 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev, return ret; } -static void vhost_vdpa_reset_svq(struct vhost_vdpa *v) -{ - if (!v->shadow_vqs_enabled) { - return; - } - - for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { - VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); - vhost_svq_stop(svq); - } -} - static int vhost_vdpa_reset_device(struct vhost_dev *dev) { - struct vhost_vdpa *v = dev->opaque; int ret; uint8_t status = 0; - vhost_vdpa_reset_svq(v); - ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); trace_vhost_vdpa_reset_device(dev, status); return ret; @@ -1100,6 +1085,8 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + + vhost_svq_stop(svq); vhost_vdpa_svq_unmap_rings(dev, svq); event_notifier_cleanup(&svq->hdev_kick); diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c index e4d4bece2d..b70148aba9 100644 --- a/hw/virtio/virtio-qmp.c +++ b/hw/virtio/virtio-qmp.c @@ -42,12 +42,12 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_RARP = 2, VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, VHOST_USER_PROTOCOL_F_NET_MTU = 4, - VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5, VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, VHOST_USER_PROTOCOL_F_CONFIG = 9, - VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, @@ -101,8 +101,8 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = { "supported"), FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \ "VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"), - FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \ - "VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated " + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_BACKEND_REQ, \ + "VHOST_USER_PROTOCOL_F_BACKEND_REQ: Socket fd for back-end initiated " "requests supported"), FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \ "VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy " @@ -116,8 +116,8 @@ static const qmp_virtio_feature_map_t vhost_user_protocol_map[] = { FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \ "VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio " "device configuration space supported"), - FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \ - "VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication " + FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD, \ + "VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD: Slave fd communication " "channel supported"), FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \ "VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified " diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 8db0532632..85c93cffcf 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -57,6 +57,7 @@ #include <sys/ioctl.h> #include "hw/pci/pci.h" +#include "hw/pci/pci_bus.h" #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "hw/xen/xen.h" @@ -780,15 +781,6 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function, s->dev.devfn); - xen_host_pci_device_get(&s->real_device, - s->hostaddr.domain, s->hostaddr.bus, - s->hostaddr.slot, s->hostaddr.function, - errp); - if (*errp) { - error_append_hint(errp, "Failed to \"open\" the real pci device"); - return; - } - s->is_virtfn = s->real_device.is_virtfn; if (s->is_virtfn) { XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n", @@ -803,8 +795,10 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) s->io_listener = xen_pt_io_listener; /* Setup VGA bios for passthrough GFX */ - if ((s->real_device.domain == 0) && (s->real_device.bus == 0) && - (s->real_device.dev == 2) && (s->real_device.func == 0)) { + if ((s->real_device.domain == XEN_PCI_IGD_DOMAIN) && + (s->real_device.bus == XEN_PCI_IGD_BUS) && + (s->real_device.dev == XEN_PCI_IGD_DEV) && + (s->real_device.func == XEN_PCI_IGD_FN)) { if (!is_igd_vga_passthrough(&s->real_device)) { error_setg(errp, "Need to enable igd-passthru if you're trying" " to passthrough IGD GFX"); @@ -950,11 +944,58 @@ static void xen_pci_passthrough_instance_init(Object *obj) PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS; } +void xen_igd_reserve_slot(PCIBus *pci_bus) +{ + if (!xen_igd_gfx_pt_enabled()) { + return; + } + + XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n"); + pci_bus->slot_reserved_mask |= XEN_PCI_IGD_SLOT_MASK; +} + +static void xen_igd_clear_slot(DeviceState *qdev, Error **errp) +{ + ERRP_GUARD(); + PCIDevice *pci_dev = (PCIDevice *)qdev; + XenPCIPassthroughState *s = XEN_PT_DEVICE(pci_dev); + XenPTDeviceClass *xpdc = XEN_PT_DEVICE_GET_CLASS(s); + PCIBus *pci_bus = pci_get_bus(pci_dev); + + xen_host_pci_device_get(&s->real_device, + s->hostaddr.domain, s->hostaddr.bus, + s->hostaddr.slot, s->hostaddr.function, + errp); + if (*errp) { + error_append_hint(errp, "Failed to \"open\" the real pci device"); + return; + } + + if (!(pci_bus->slot_reserved_mask & XEN_PCI_IGD_SLOT_MASK)) { + xpdc->pci_qdev_realize(qdev, errp); + return; + } + + if (is_igd_vga_passthrough(&s->real_device) && + s->real_device.domain == XEN_PCI_IGD_DOMAIN && + s->real_device.bus == XEN_PCI_IGD_BUS && + s->real_device.dev == XEN_PCI_IGD_DEV && + s->real_device.func == XEN_PCI_IGD_FN && + s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) { + pci_bus->slot_reserved_mask &= ~XEN_PCI_IGD_SLOT_MASK; + XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n"); + } + xpdc->pci_qdev_realize(qdev, errp); +} + static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + XenPTDeviceClass *xpdc = XEN_PT_DEVICE_CLASS(klass); + xpdc->pci_qdev_realize = dc->realize; + dc->realize = xen_igd_clear_slot; k->realize = xen_pt_realize; k->exit = xen_pt_unregister_device; k->config_read = xen_pt_pci_read_config; @@ -977,6 +1018,7 @@ static const TypeInfo xen_pci_passthrough_info = { .instance_size = sizeof(XenPCIPassthroughState), .instance_finalize = xen_pci_passthrough_finalize, .class_init = xen_pci_passthrough_class_init, + .class_size = sizeof(XenPTDeviceClass), .instance_init = xen_pci_passthrough_instance_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, diff --git a/hw/xen/xen_pt.h b/hw/xen/xen_pt.h index cf10fc7bbf..e184699740 100644 --- a/hw/xen/xen_pt.h +++ b/hw/xen/xen_pt.h @@ -40,7 +40,20 @@ typedef struct XenPTReg XenPTReg; #define TYPE_XEN_PT_DEVICE "xen-pci-passthrough" OBJECT_DECLARE_SIMPLE_TYPE(XenPCIPassthroughState, XEN_PT_DEVICE) +#define XEN_PT_DEVICE_CLASS(klass) \ + OBJECT_CLASS_CHECK(XenPTDeviceClass, klass, TYPE_XEN_PT_DEVICE) +#define XEN_PT_DEVICE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(XenPTDeviceClass, obj, TYPE_XEN_PT_DEVICE) + +typedef void (*XenPTQdevRealize)(DeviceState *qdev, Error **errp); + +typedef struct XenPTDeviceClass { + PCIDeviceClass parent_class; + XenPTQdevRealize pci_qdev_realize; +} XenPTDeviceClass; + uint32_t igd_read_opregion(XenPCIPassthroughState *s); +void xen_igd_reserve_slot(PCIBus *pci_bus); void igd_write_opregion(XenPCIPassthroughState *s, uint32_t val); void xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s, XenHostPCIDevice *dev); @@ -75,6 +88,13 @@ typedef int (*xen_pt_conf_byte_read) #define XEN_PCI_INTEL_OPREGION 0xfc +#define XEN_PCI_IGD_DOMAIN 0 +#define XEN_PCI_IGD_BUS 0 +#define XEN_PCI_IGD_DEV 2 +#define XEN_PCI_IGD_FN 0 +#define XEN_PCI_IGD_SLOT_MASK \ + (1UL << PCI_SLOT(PCI_DEVFN(XEN_PCI_IGD_DEV, XEN_PCI_IGD_FN))) + typedef enum { XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */ XEN_PT_GRP_TYPE_EMU, /* emul reg group */ diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c index cde898b744..8b9b554352 100644 --- a/hw/xen/xen_pt_config_init.c +++ b/hw/xen/xen_pt_config_init.c @@ -1924,7 +1924,7 @@ static void xen_pt_config_reg_init(XenPCIPassthroughState *s, if (reg->init) { uint32_t host_mask, size_mask; unsigned int offset; - uint32_t val; + uint32_t val = 0; /* initialize emulate register */ rc = reg->init(s, reg_entry->reg, diff --git a/hw/xen/xen_pt_stub.c b/hw/xen/xen_pt_stub.c index 2d8cac8d54..5c108446a8 100644 --- a/hw/xen/xen_pt_stub.c +++ b/hw/xen/xen_pt_stub.c @@ -20,3 +20,7 @@ void xen_igd_gfx_pt_set(bool value, Error **errp) error_setg(errp, "Xen PCI passthrough support not built in"); } } + +void xen_igd_reserve_slot(PCIBus *pci_bus) +{ +} diff --git a/include/block/nvme.h b/include/block/nvme.h index 8027b7126b..bb231d0b9a 100644 --- a/include/block/nvme.h +++ b/include/block/nvme.h @@ -1,6 +1,8 @@ #ifndef BLOCK_NVME_H #define BLOCK_NVME_H +#include "hw/registerfields.h" + typedef struct QEMU_PACKED NvmeBar { uint64_t cap; uint32_t vs; @@ -58,6 +60,24 @@ enum NvmeBarRegs { NVME_REG_PMRMSCU = offsetof(NvmeBar, pmrmscu), }; +typedef struct QEMU_PACKED NvmeEndGrpLog { + uint8_t critical_warning; + uint8_t rsvd[2]; + uint8_t avail_spare; + uint8_t avail_spare_thres; + uint8_t percet_used; + uint8_t rsvd1[26]; + uint64_t end_estimate[2]; + uint64_t data_units_read[2]; + uint64_t data_units_written[2]; + uint64_t media_units_written[2]; + uint64_t host_read_commands[2]; + uint64_t host_write_commands[2]; + uint64_t media_integrity_errors[2]; + uint64_t no_err_info_log_entries[2]; + uint8_t rsvd2[352]; +} NvmeEndGrpLog; + enum NvmeCapShift { CAP_MQES_SHIFT = 0, CAP_CQR_SHIFT = 16, @@ -595,7 +615,9 @@ enum NvmeAdminCommands { NVME_ADM_CMD_ACTIVATE_FW = 0x10, NVME_ADM_CMD_DOWNLOAD_FW = 0x11, NVME_ADM_CMD_NS_ATTACHMENT = 0x15, + NVME_ADM_CMD_DIRECTIVE_SEND = 0x19, NVME_ADM_CMD_VIRT_MNGMT = 0x1c, + NVME_ADM_CMD_DIRECTIVE_RECV = 0x1a, NVME_ADM_CMD_DBBUF_CONFIG = 0x7c, NVME_ADM_CMD_FORMAT_NVM = 0x80, NVME_ADM_CMD_SECURITY_SEND = 0x81, @@ -611,7 +633,9 @@ enum NvmeIoCommands { NVME_CMD_WRITE_ZEROES = 0x08, NVME_CMD_DSM = 0x09, NVME_CMD_VERIFY = 0x0c, + NVME_CMD_IO_MGMT_RECV = 0x12, NVME_CMD_COPY = 0x19, + NVME_CMD_IO_MGMT_SEND = 0x1d, NVME_CMD_ZONE_MGMT_SEND = 0x79, NVME_CMD_ZONE_MGMT_RECV = 0x7a, NVME_CMD_ZONE_APPEND = 0x7d, @@ -704,7 +728,9 @@ typedef struct QEMU_PACKED NvmeRwCmd { uint64_t slba; uint16_t nlb; uint16_t control; - uint32_t dsmgmt; + uint8_t dsmgmt; + uint8_t rsvd; + uint16_t dspec; uint32_t reftag; uint16_t apptag; uint16_t appmask; @@ -875,6 +901,8 @@ enum NvmeStatusCodes { NVME_INVALID_PRP_OFFSET = 0x0013, NVME_CMD_SET_CMB_REJECTED = 0x002b, NVME_INVALID_CMD_SET = 0x002c, + NVME_FDP_DISABLED = 0x0029, + NVME_INVALID_PHID_LIST = 0x002a, NVME_LBA_RANGE = 0x0080, NVME_CAP_EXCEEDED = 0x0081, NVME_NS_NOT_READY = 0x0082, @@ -1005,11 +1033,16 @@ enum { }; enum NvmeLogIdentifier { - NVME_LOG_ERROR_INFO = 0x01, - NVME_LOG_SMART_INFO = 0x02, - NVME_LOG_FW_SLOT_INFO = 0x03, - NVME_LOG_CHANGED_NSLIST = 0x04, - NVME_LOG_CMD_EFFECTS = 0x05, + NVME_LOG_ERROR_INFO = 0x01, + NVME_LOG_SMART_INFO = 0x02, + NVME_LOG_FW_SLOT_INFO = 0x03, + NVME_LOG_CHANGED_NSLIST = 0x04, + NVME_LOG_CMD_EFFECTS = 0x05, + NVME_LOG_ENDGRP = 0x09, + NVME_LOG_FDP_CONFS = 0x20, + NVME_LOG_FDP_RUH_USAGE = 0x21, + NVME_LOG_FDP_STATS = 0x22, + NVME_LOG_FDP_EVENTS = 0x23, }; typedef struct QEMU_PACKED NvmePSD { @@ -1091,7 +1124,10 @@ typedef struct QEMU_PACKED NvmeIdCtrl { uint16_t mntmt; uint16_t mxtmt; uint32_t sanicap; - uint8_t rsvd332[180]; + uint8_t rsvd332[6]; + uint16_t nsetidmax; + uint16_t endgidmax; + uint8_t rsvd342[170]; uint8_t sqes; uint8_t cqes; uint16_t maxcmd; @@ -1134,15 +1170,18 @@ enum NvmeIdCtrlOaes { }; enum NvmeIdCtrlCtratt { + NVME_CTRATT_ENDGRPS = 1 << 4, NVME_CTRATT_ELBAS = 1 << 15, + NVME_CTRATT_FDPS = 1 << 19, }; enum NvmeIdCtrlOacs { - NVME_OACS_SECURITY = 1 << 0, - NVME_OACS_FORMAT = 1 << 1, - NVME_OACS_FW = 1 << 2, - NVME_OACS_NS_MGMT = 1 << 3, - NVME_OACS_DBBUF = 1 << 8, + NVME_OACS_SECURITY = 1 << 0, + NVME_OACS_FORMAT = 1 << 1, + NVME_OACS_FW = 1 << 2, + NVME_OACS_NS_MGMT = 1 << 3, + NVME_OACS_DIRECTIVES = 1 << 5, + NVME_OACS_DBBUF = 1 << 8, }; enum NvmeIdCtrlOncs { @@ -1227,6 +1266,7 @@ enum NvmeNsAttachmentOperation { #define NVME_AEC_SMART(aec) (aec & 0xff) #define NVME_AEC_NS_ATTR(aec) ((aec >> 8) & 0x1) #define NVME_AEC_FW_ACTIVATION(aec) ((aec >> 9) & 0x1) +#define NVME_AEC_ENDGRP_NOTICE(aec) ((aec >> 14) & 0x1) #define NVME_ERR_REC_TLER(err_rec) (err_rec & 0xffff) #define NVME_ERR_REC_DULBE(err_rec) (err_rec & 0x10000) @@ -1246,6 +1286,8 @@ enum NvmeFeatureIds { NVME_TIMESTAMP = 0xe, NVME_HOST_BEHAVIOR_SUPPORT = 0x16, NVME_COMMAND_SET_PROFILE = 0x19, + NVME_FDP_MODE = 0x1d, + NVME_FDP_EVENTS = 0x1e, NVME_SOFTWARE_PROGRESS_MARKER = 0x80, NVME_FID_MAX = 0x100, }; @@ -1338,7 +1380,10 @@ typedef struct QEMU_PACKED NvmeIdNs { uint16_t mssrl; uint32_t mcl; uint8_t msrc; - uint8_t rsvd81[23]; + uint8_t rsvd81[18]; + uint8_t nsattr; + uint16_t nvmsetid; + uint16_t endgid; uint8_t nguid[16]; uint64_t eui64; NvmeLBAF lbaf[NVME_MAX_NLBAF]; @@ -1617,6 +1662,169 @@ typedef enum NvmeVirtualResourceType { NVME_VIRT_RES_INTERRUPT = 0x01, } NvmeVirtualResourceType; +typedef struct NvmeDirectiveIdentify { + uint8_t supported; + uint8_t unused1[31]; + uint8_t enabled; + uint8_t unused33[31]; + uint8_t persistent; + uint8_t unused65[31]; + uint8_t rsvd64[4000]; +} NvmeDirectiveIdentify; + +enum NvmeDirectiveTypes { + NVME_DIRECTIVE_IDENTIFY = 0x0, + NVME_DIRECTIVE_DATA_PLACEMENT = 0x2, +}; + +enum NvmeDirectiveOperations { + NVME_DIRECTIVE_RETURN_PARAMS = 0x1, +}; + +typedef struct QEMU_PACKED NvmeFdpConfsHdr { + uint16_t num_confs; + uint8_t version; + uint8_t rsvd3; + uint32_t size; + uint8_t rsvd8[8]; +} NvmeFdpConfsHdr; + +REG8(FDPA, 0x0) + FIELD(FDPA, RGIF, 0, 4) + FIELD(FDPA, VWC, 4, 1) + FIELD(FDPA, VALID, 7, 1); + +typedef struct QEMU_PACKED NvmeFdpDescrHdr { + uint16_t descr_size; + uint8_t fdpa; + uint8_t vss; + uint32_t nrg; + uint16_t nruh; + uint16_t maxpids; + uint32_t nnss; + uint64_t runs; + uint32_t erutl; + uint8_t rsvd28[36]; +} NvmeFdpDescrHdr; + +enum NvmeRuhType { + NVME_RUHT_INITIALLY_ISOLATED = 1, + NVME_RUHT_PERSISTENTLY_ISOLATED = 2, +}; + +typedef struct QEMU_PACKED NvmeRuhDescr { + uint8_t ruht; + uint8_t rsvd1[3]; +} NvmeRuhDescr; + +typedef struct QEMU_PACKED NvmeRuhuLog { + uint16_t nruh; + uint8_t rsvd2[6]; +} NvmeRuhuLog; + +enum NvmeRuhAttributes { + NVME_RUHA_UNUSED = 0, + NVME_RUHA_HOST = 1, + NVME_RUHA_CTRL = 2, +}; + +typedef struct QEMU_PACKED NvmeRuhuDescr { + uint8_t ruha; + uint8_t rsvd1[7]; +} NvmeRuhuDescr; + +typedef struct QEMU_PACKED NvmeFdpStatsLog { + uint64_t hbmw[2]; + uint64_t mbmw[2]; + uint64_t mbe[2]; + uint8_t rsvd48[16]; +} NvmeFdpStatsLog; + +typedef struct QEMU_PACKED NvmeFdpEventsLog { + uint32_t num_events; + uint8_t rsvd4[60]; +} NvmeFdpEventsLog; + +enum NvmeFdpEventType { + FDP_EVT_RU_NOT_FULLY_WRITTEN = 0x0, + FDP_EVT_RU_ATL_EXCEEDED = 0x1, + FDP_EVT_CTRL_RESET_RUH = 0x2, + FDP_EVT_INVALID_PID = 0x3, + FDP_EVT_MEDIA_REALLOC = 0x80, + FDP_EVT_RUH_IMPLICIT_RU_CHANGE = 0x81, +}; + +enum NvmeFdpEventFlags { + FDPEF_PIV = 1 << 0, + FDPEF_NSIDV = 1 << 1, + FDPEF_LV = 1 << 2, +}; + +typedef struct QEMU_PACKED NvmeFdpEvent { + uint8_t type; + uint8_t flags; + uint16_t pid; + uint64_t timestamp; + uint32_t nsid; + uint64_t type_specific[2]; + uint16_t rgid; + uint8_t ruhid; + uint8_t rsvd35[5]; + uint64_t vendor[3]; +} NvmeFdpEvent; + +typedef struct QEMU_PACKED NvmePhidList { + uint16_t nnruhd; + uint8_t rsvd2[6]; +} NvmePhidList; + +typedef struct QEMU_PACKED NvmePhidDescr { + uint8_t ruht; + uint8_t rsvd1; + uint16_t ruhid; +} NvmePhidDescr; + +REG32(FEAT_FDP, 0x0) + FIELD(FEAT_FDP, FDPE, 0, 1) + FIELD(FEAT_FDP, CONF_NDX, 8, 8); + +typedef struct QEMU_PACKED NvmeFdpEventDescr { + uint8_t evt; + uint8_t evta; +} NvmeFdpEventDescr; + +REG32(NVME_IOMR, 0x0) + FIELD(NVME_IOMR, MO, 0, 8) + FIELD(NVME_IOMR, MOS, 16, 16); + +enum NvmeIomr2Mo { + NVME_IOMR_MO_NOP = 0x0, + NVME_IOMR_MO_RUH_STATUS = 0x1, + NVME_IOMR_MO_VENDOR_SPECIFIC = 0x255, +}; + +typedef struct QEMU_PACKED NvmeRuhStatus { + uint8_t rsvd0[14]; + uint16_t nruhsd; +} NvmeRuhStatus; + +typedef struct QEMU_PACKED NvmeRuhStatusDescr { + uint16_t pid; + uint16_t ruhid; + uint32_t earutr; + uint64_t ruamw; + uint8_t rsvd16[16]; +} NvmeRuhStatusDescr; + +REG32(NVME_IOMS, 0x0) + FIELD(NVME_IOMS, MO, 0, 8) + FIELD(NVME_IOMS, MOS, 16, 16); + +enum NvmeIoms2Mo { + NVME_IOMS_MO_NOP = 0x0, + NVME_IOMS_MO_RUH_UPDATE = 0x1, +}; + static inline void _nvme_check_size(void) { QEMU_BUILD_BUG_ON(sizeof(NvmeBar) != 4096); @@ -1655,5 +1863,7 @@ static inline void _nvme_check_size(void) QEMU_BUILD_BUG_ON(sizeof(NvmePriCtrlCap) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlEntry) != 32); QEMU_BUILD_BUG_ON(sizeof(NvmeSecCtrlList) != 4096); + QEMU_BUILD_BUG_ON(sizeof(NvmeEndGrpLog) != 512); + QEMU_BUILD_BUG_ON(sizeof(NvmeDirectiveIdentify) != 4096); } #endif diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h index aff35d6982..f6de79a6b4 100644 --- a/include/exec/gen-icount.h +++ b/include/exec/gen-icount.h @@ -61,8 +61,6 @@ static inline void gen_tb_start(const TranslationBlock *tb) offsetof(ArchCPU, parent_obj.can_do_io) - offsetof(ArchCPU, env)); } - - tcg_temp_free_i32(count); } static inline void gen_tb_end(const TranslationBlock *tb, int num_insns) diff --git a/include/exec/memory.h b/include/exec/memory.h index 2e602a2fad..6fa0b071f0 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -1732,6 +1732,16 @@ void memory_region_notify_iommu_one(IOMMUNotifier *notifier, IOMMUTLBEvent *event); /** + * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU + * translation that covers the + * range of a notifier + * + * @notifier: the notifier to be notified + */ +void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *n); + + +/** * memory_region_register_iommu_notifier: register a notifier for changes to * IOMMU translation entries. * diff --git a/include/exec/translator.h b/include/exec/translator.h index 8b36690e80..797fef7515 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -150,8 +150,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns, target_ulong pc, void *host_pc, const TranslatorOps *ops, DisasContextBase *db); -void translator_loop_temp_check(DisasContextBase *db); - /** * translator_use_goto_tb * @db: Disassembly context diff --git a/include/hw/arm/allwinner-h3.h b/include/hw/arm/allwinner-h3.h index 1d7ce20589..59e0f822d2 100644 --- a/include/hw/arm/allwinner-h3.h +++ b/include/hw/arm/allwinner-h3.h @@ -84,6 +84,8 @@ enum { AW_H3_DEV_UART3, AW_H3_DEV_EMAC, AW_H3_DEV_TWI0, + AW_H3_DEV_TWI1, + AW_H3_DEV_TWI2, AW_H3_DEV_DRAMCOM, AW_H3_DEV_DRAMCTL, AW_H3_DEV_DRAMPHY, @@ -93,6 +95,7 @@ enum { AW_H3_DEV_GIC_VCPU, AW_H3_DEV_RTC, AW_H3_DEV_CPUCFG, + AW_H3_DEV_R_TWI, AW_H3_DEV_SDRAM }; @@ -133,6 +136,9 @@ struct AwH3State { AwSidState sid; AwSdHostState mmc0; AWI2CState i2c0; + AWI2CState i2c1; + AWI2CState i2c2; + AWI2CState r_twi; AwSun8iEmacState emac; AwRtcState rtc; GICState gic; diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index bd1e03e78a..8adff70072 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -58,6 +58,8 @@ struct AspeedSoCState { MemoryRegion *dram_mr; MemoryRegion dram_container; MemoryRegion sram; + MemoryRegion spi_boot_container; + MemoryRegion spi_boot; AspeedVICState vic; AspeedRtcState rtc; AspeedTimerCtrlState timerctrl; @@ -120,6 +122,7 @@ struct AspeedSoCClass { enum { + ASPEED_DEV_SPI_BOOT, ASPEED_DEV_IOMEM, ASPEED_DEV_UART1, ASPEED_DEV_UART2, @@ -190,6 +193,8 @@ enum { ASPEED_DEV_JTAG1, }; +#define ASPEED_SOC_SPI_BOOT_ADDR 0x0 + qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev); bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp); void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr); diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index fb5d9667ca..75689bff02 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -923,9 +923,10 @@ void cpu_single_step(CPUState *cpu, int enabled); #define BP_GDB 0x10 #define BP_CPU 0x20 #define BP_ANY (BP_GDB | BP_CPU) -#define BP_WATCHPOINT_HIT_READ 0x40 -#define BP_WATCHPOINT_HIT_WRITE 0x80 -#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) +#define BP_HIT_SHIFT 6 +#define BP_WATCHPOINT_HIT_READ (BP_MEM_READ << BP_HIT_SHIFT) +#define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT) +#define BP_WATCHPOINT_HIT (BP_MEM_ACCESS << BP_HIT_SHIFT) int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, CPUBreakpoint **breakpoint); diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index 250adf18b2..7e5ad65c1d 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -170,7 +170,7 @@ CXL_DEVICE_CAPABILITY_HEADER_REGISTER(MEMORY_DEVICE, CXL_DEVICE_CAP_HDR1_OFFSET + CXL_DEVICE_CAP_REG_SIZE * 2) -int cxl_initialize_mailbox(CXLDeviceState *cxl_dstate); +void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate); void cxl_process_mailbox(CXLDeviceState *cxl_dstate); #define cxl_device_cap_init(dstate, reg, cap_id) \ diff --git a/include/hw/i2c/allwinner-i2c.h b/include/hw/i2c/allwinner-i2c.h index 4f378b86ba..0e325d265e 100644 --- a/include/hw/i2c/allwinner-i2c.h +++ b/include/hw/i2c/allwinner-i2c.h @@ -28,6 +28,10 @@ #include "qom/object.h" #define TYPE_AW_I2C "allwinner.i2c" + +/** Allwinner I2C sun6i family and newer (A31, H2+, H3, etc) */ +#define TYPE_AW_I2C_SUN6I TYPE_AW_I2C "-sun6i" + OBJECT_DECLARE_SIMPLE_TYPE(AWI2CState, AW_I2C) #define AW_I2C_MEM_SIZE 0x24 @@ -50,6 +54,8 @@ struct AWI2CState { uint8_t srst; uint8_t efr; uint8_t lcr; + + bool irq_clear_inverted; }; #endif /* ALLWINNER_I2C_H */ diff --git a/include/hw/i2c/i2c.h b/include/hw/i2c/i2c.h index 9b9581d230..2a3abacd1b 100644 --- a/include/hw/i2c/i2c.h +++ b/include/hw/i2c/i2c.h @@ -141,6 +141,8 @@ int i2c_start_send(I2CBus *bus, uint8_t address); */ int i2c_start_send_async(I2CBus *bus, uint8_t address); +void i2c_schedule_pending_master(I2CBus *bus); + void i2c_end_transfer(I2CBus *bus); void i2c_nack(I2CBus *bus); void i2c_ack(I2CBus *bus); diff --git a/include/hw/i386/microvm.h b/include/hw/i386/microvm.h index e8af61f194..fad97a891d 100644 --- a/include/hw/i386/microvm.h +++ b/include/hw/i386/microvm.h @@ -50,9 +50,8 @@ */ /* Platform virtio definitions */ -#define VIRTIO_MMIO_BASE 0xfeb00000 -#define VIRTIO_CMDLINE_MAXLEN 64 -#define VIRTIO_CMDLINE_TOTAL_MAX_LEN ((VIRTIO_CMDLINE_MAXLEN + 1) * 16) +#define VIRTIO_MMIO_BASE 0xfeb00000 +#define VIRTIO_CMDLINE_MAXLEN 64 #define GED_MMIO_BASE 0xfea00000 #define GED_MMIO_BASE_MEMHP (GED_MMIO_BASE + 0x100) diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 467311007e..8206d5405a 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -127,9 +127,6 @@ struct PCMachineClass { /* create kvmclock device even when KVM PV features are not exposed */ bool kvmclock_create_always; - - /* skip passing an rng seed for legacy machines */ - bool legacy_no_rng_seed; }; #define TYPE_PC_MACHINE "generic-pc-machine" diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h index 890dfad23e..0b337a036c 100644 --- a/include/hw/i386/x86.h +++ b/include/hw/i386/x86.h @@ -123,8 +123,7 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware, void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool legacy_no_rng_seed); + bool pvh_enabled); bool x86_machine_is_smm_enabled(const X86MachineState *x86ms); bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms); diff --git a/include/hw/loader.h b/include/hw/loader.h index 1384796a4b..c4c14170ea 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -86,6 +86,25 @@ ssize_t load_image_gzipped_buffer(const char *filename, uint64_t max_sz, uint8_t **buffer); ssize_t load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz); +/** + * unpack_efi_zboot_image: + * @buffer: pointer to a variable holding the address of a buffer containing the + * image + * @size: pointer to a variable holding the size of the buffer + * + * Check whether the buffer contains a EFI zboot image, and if it does, extract + * the compressed payload and decompress it into a new buffer. If successful, + * the old buffer is freed, and the *buffer and size variables pointed to by the + * function arguments are updated to refer to the newly populated buffer. + * + * Returns 0 if the image could not be identified as a EFI zboot image. + * Returns -1 if the buffer contents were identified as a EFI zboot image, but + * unpacking failed for any reason. + * Returns the size of the decompressed payload if decompression was performed + * successfully. + */ +ssize_t unpack_efi_zboot_image(uint8_t **buffer, int *size); + #define ELF_LOAD_FAILED -1 #define ELF_LOAD_NOT_ELF -2 #define ELF_LOAD_WRONG_ARCH -3 diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index f5f818894e..7ae8a91229 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -45,6 +45,7 @@ struct LoongArchMachineState { /* State for other subsystems/APIs: */ FWCfgState *fw_cfg; Notifier machine_done; + Notifier powerdown_notifier; OnOffAuto acpi; char *oem_id; char *oem_table_id; diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index 990dcdbb2e..c1f81a5f13 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -118,37 +118,6 @@ struct FWCfgMemState { void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len); /** - * fw_cfg_add_bytes_callback: - * @s: fw_cfg device being modified - * @key: selector key value for new fw_cfg item - * @select_cb: callback function when selecting - * @write_cb: callback function after a write - * @callback_opaque: argument to be passed into callback function - * @data: pointer to start of item data - * @len: size of item data - * @read_only: is file read only - * - * Add a new fw_cfg item, available by selecting the given key, as a raw - * "blob" of the given size. The data referenced by the starting pointer - * is only linked, NOT copied, into the data structure of the fw_cfg device. - */ -void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key, - FWCfgCallback select_cb, - FWCfgWriteCallback write_cb, - void *callback_opaque, - void *data, size_t len, - bool read_only); - -/** - * fw_cfg_read_bytes_ptr: - * @s: fw_cfg device being modified - * @key: selector key value for new fw_cfg item - * - * Reads an existing fw_cfg data pointer. - */ -void *fw_cfg_read_bytes_ptr(FWCfgState *s, uint16_t key); - -/** * fw_cfg_add_string: * @s: fw_cfg device being modified * @key: selector key value for new fw_cfg item diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h index ff4b979912..e753449593 100644 --- a/include/hw/pci-host/ls7a.h +++ b/include/hw/pci-host/ls7a.h @@ -26,24 +26,25 @@ #define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL /* - * According to the kernel pch irq start from 64 offset - * 0 ~ 16 irqs used for non-pci device while 16 ~ 64 irqs - * used for pci device. + * GSI_BASE is hard-coded with 64 in linux kernel, else kernel fails to boot + * 0 - 15 GSI for ISA devices even if there is no ISA devices + * 16 - 63 GSI for CPU devices such as timers/perf monitor etc + * 64 - GSI for external devices */ #define VIRT_PCH_PIC_IRQ_NUM 32 -#define PCH_PIC_IRQ_OFFSET 64 +#define VIRT_GSI_BASE 64 #define VIRT_DEVICE_IRQS 16 -#define VIRT_UART_IRQ (PCH_PIC_IRQ_OFFSET + 2) +#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2) #define VIRT_UART_BASE 0x1fe001e0 #define VIRT_UART_SIZE 0X100 -#define VIRT_RTC_IRQ (PCH_PIC_IRQ_OFFSET + 3) +#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 3) #define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000) #define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100) #define VIRT_RTC_LEN 0x100 -#define VIRT_SCI_IRQ (PCH_PIC_IRQ_OFFSET + 4) +#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 4) #define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000 #define VIRT_PLATFORM_BUS_SIZE 0x2000000 #define VIRT_PLATFORM_BUS_NUM_IRQS 2 -#define VIRT_PLATFORM_BUS_IRQ 69 +#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 5) #endif diff --git a/include/hw/pci-host/pnv_phb4.h b/include/hw/pci-host/pnv_phb4.h index 28d61b96c7..2d026db9a3 100644 --- a/include/hw/pci-host/pnv_phb4.h +++ b/include/hw/pci-host/pnv_phb4.h @@ -157,6 +157,7 @@ struct PnvPHB4 { void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon); int pnv_phb4_pec_get_phb_id(PnvPhb4PecState *pec, int stack_index); +PnvPhb4PecState *pnv_pec_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp); void pnv_phb4_bus_init(DeviceState *dev, PnvPHB4 *phb); extern const MemoryRegionOps pnv_phb4_xscom_ops; @@ -185,6 +186,8 @@ struct PnvPhb4PecState { /* PHBs */ uint32_t num_phbs; +#define MAX_PHBS_PER_PEC 3 + PnvPHB *phbs[MAX_PHBS_PER_PEC]; PnvChip *chip; }; diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index 798a262a0a..3cc2b15957 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -28,14 +28,6 @@ #include "hw/hotplug.h" typedef enum { - /* for attention and power indicator */ - PCI_EXP_HP_IND_RESERVED = PCI_EXP_SLTCTL_IND_RESERVED, - PCI_EXP_HP_IND_ON = PCI_EXP_SLTCTL_IND_ON, - PCI_EXP_HP_IND_BLINK = PCI_EXP_SLTCTL_IND_BLINK, - PCI_EXP_HP_IND_OFF = PCI_EXP_SLTCTL_IND_OFF, -} PCIExpressIndicator; - -typedef enum { /* these bits must match the bits in Slot Control/Status registers. * PCI_EXP_HP_EV_xxx = PCI_EXP_SLTCTL_xxxE = PCI_EXP_SLTSTA_xxx * diff --git a/include/hw/pci/pcie_regs.h b/include/hw/pci/pcie_regs.h index 963dc2e170..1fe0bdd25b 100644 --- a/include/hw/pci/pcie_regs.h +++ b/include/hw/pci/pcie_regs.h @@ -66,20 +66,6 @@ typedef enum PCIExpLinkWidth { #define PCI_EXP_SLTCAP_PSN_SHIFT ctz32(PCI_EXP_SLTCAP_PSN) -#define PCI_EXP_SLTCTL_IND_RESERVED 0x0 -#define PCI_EXP_SLTCTL_IND_ON 0x1 -#define PCI_EXP_SLTCTL_IND_BLINK 0x2 -#define PCI_EXP_SLTCTL_IND_OFF 0x3 -#define PCI_EXP_SLTCTL_AIC_SHIFT ctz32(PCI_EXP_SLTCTL_AIC) -#define PCI_EXP_SLTCTL_AIC_OFF \ - (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_AIC_SHIFT) - -#define PCI_EXP_SLTCTL_PIC_SHIFT ctz32(PCI_EXP_SLTCTL_PIC) -#define PCI_EXP_SLTCTL_PIC_OFF \ - (PCI_EXP_SLTCTL_IND_OFF << PCI_EXP_SLTCTL_PIC_SHIFT) -#define PCI_EXP_SLTCTL_PIC_ON \ - (PCI_EXP_SLTCTL_IND_ON << PCI_EXP_SLTCTL_PIC_SHIFT) - #define PCI_EXP_SLTCTL_SUPPORTED \ (PCI_EXP_SLTCTL_ABPE | \ PCI_EXP_SLTCTL_PDCE | \ diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h index 96fb850419..409f3bf763 100644 --- a/include/hw/ppc/pnv.h +++ b/include/hw/ppc/pnv.h @@ -100,7 +100,7 @@ struct PnvMachineState { }; PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id); -Object *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb, Error **errp); +PnvChip *pnv_chip_add_phb(PnvChip *chip, PnvPHB *phb); #define PNV_FDT_ADDR 0x01000000 #define PNV_TIMEBASE_FREQ 512000000ULL diff --git a/include/hw/riscv/sifive_u.h b/include/hw/riscv/sifive_u.h index 65af306963..0696f85942 100644 --- a/include/hw/riscv/sifive_u.h +++ b/include/hw/riscv/sifive_u.h @@ -68,6 +68,7 @@ typedef struct SiFiveUState { /*< public >*/ SiFiveUSoCState soc; + int fdt_size; bool start_in_flash; uint32_t msel; diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h index b3d26135c0..e5c474b26e 100644 --- a/include/hw/riscv/virt.h +++ b/include/hw/riscv/virt.h @@ -56,6 +56,10 @@ struct RISCVVirtState { bool have_aclint; RISCVVirtAIAType aia_type; int aia_guests; + char *oem_id; + char *oem_table_id; + OnOffAuto acpi; + const MemMapEntry *memmap; }; enum { @@ -121,4 +125,6 @@ enum { #define FDT_APLIC_INT_MAP_WIDTH (FDT_PCI_ADDR_CELLS + FDT_PCI_INT_CELLS + \ 1 + FDT_APLIC_INT_CELLS) +bool virt_is_acpi_enabled(RISCVVirtState *s); +void virt_acpi_setup(RISCVVirtState *vms); #endif diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h index a9305c5e6c..a9d3f9b049 100644 --- a/include/hw/virtio/vhost-user-gpio.h +++ b/include/hw/virtio/vhost-user-gpio.h @@ -23,7 +23,7 @@ struct VHostUserGPIO { VirtIODevice parent_obj; CharBackend chardev; struct virtio_gpio_config config; - struct vhost_virtqueue *vhost_vq; + struct vhost_virtqueue *vhost_vqs; struct vhost_dev vhost_dev; VhostUserState vhost_user; VirtQueue *command_vq; diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h index b1650daedf..15a78c0db5 100644 --- a/include/qemu/bswap.h +++ b/include/qemu/bswap.h @@ -125,11 +125,20 @@ CPU_CONVERT(le, 32, uint32_t) CPU_CONVERT(le, 64, uint64_t) /* - * Same as cpu_to_le{16,32}, except that gcc will figure the result is + * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is * a compile-time constant if you pass in a constant. So this can be * used to initialize static variables. */ #if HOST_BIG_ENDIAN +# define const_le64(_x) \ + ((((_x) & 0x00000000000000ffU) << 56) | \ + (((_x) & 0x000000000000ff00U) << 40) | \ + (((_x) & 0x0000000000ff0000U) << 24) | \ + (((_x) & 0x00000000ff000000U) << 8) | \ + (((_x) & 0x000000ff00000000U) >> 8) | \ + (((_x) & 0x0000ff0000000000U) >> 24) | \ + (((_x) & 0x00ff000000000000U) >> 40) | \ + (((_x) & 0xff00000000000000U) >> 56)) # define const_le32(_x) \ ((((_x) & 0x000000ffU) << 24) | \ (((_x) & 0x0000ff00U) << 8) | \ @@ -139,6 +148,7 @@ CPU_CONVERT(le, 64, uint64_t) ((((_x) & 0x00ff) << 8) | \ (((_x) & 0xff00) >> 8)) #else +# define const_le64(_x) (_x) # define const_le32(_x) (_x) # define const_le16(_x) (_x) #endif diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h index 7adb12d320..1451e8ef2f 100644 --- a/include/qemu/cpuid.h +++ b/include/qemu/cpuid.h @@ -71,4 +71,11 @@ #define bit_LZCNT (1 << 5) #endif +static inline unsigned xgetbv_low(unsigned c) +{ + unsigned a, d; + asm("xgetbv" : "=a"(a), "=d"(d) : "c"(c)); + return a; +} + #endif /* QEMU_CPUID_H */ diff --git a/include/qemu/uuid.h b/include/qemu/uuid.h index 9925febfa5..dc40ee1fc9 100644 --- a/include/qemu/uuid.h +++ b/include/qemu/uuid.h @@ -61,6 +61,18 @@ typedef struct { (clock_seq_hi_and_reserved), (clock_seq_low), (node0), (node1), (node2),\ (node3), (node4), (node5) } +/* Normal (network byte order) UUID */ +#define UUID(time_low, time_mid, time_hi_and_version, \ + clock_seq_hi_and_reserved, clock_seq_low, node0, node1, node2, \ + node3, node4, node5) \ + { ((time_low) >> 24) & 0xff, ((time_low) >> 16) & 0xff, \ + ((time_low) >> 8) & 0xff, (time_low) & 0xff, \ + ((time_mid) >> 8) & 0xff, (time_mid) & 0xff, \ + ((time_hi_and_version) >> 8) & 0xff, (time_hi_and_version) & 0xff, \ + (clock_seq_hi_and_reserved), (clock_seq_low), \ + (node0), (node1), (node2), (node3), (node4), (node5) \ + } + #define UUID_FMT "%02hhx%02hhx%02hhx%02hhx-" \ "%02hhx%02hhx-%02hhx%02hhx-" \ "%02hhx%02hhx-" \ diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h index 353d430a63..70856147c5 100644 --- a/include/tcg/tcg-op.h +++ b/include/tcg/tcg-op.h @@ -259,12 +259,7 @@ static inline void gen_set_label(TCGLabel *l) tcg_gen_op1(INDEX_op_set_label, label_arg(l)); } -static inline void tcg_gen_br(TCGLabel *l) -{ - l->refs++; - tcg_gen_op1(INDEX_op_br, label_arg(l)); -} - +void tcg_gen_br(TCGLabel *l); void tcg_gen_mb(TCGBar); /* Helper calls. */ diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 7e2b954dbc..a5cf21be83 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -238,16 +238,23 @@ struct TCGRelocation { int type; }; +typedef struct TCGOp TCGOp; +typedef struct TCGLabelUse TCGLabelUse; +struct TCGLabelUse { + QSIMPLEQ_ENTRY(TCGLabelUse) next; + TCGOp *op; +}; + typedef struct TCGLabel TCGLabel; struct TCGLabel { - unsigned present : 1; - unsigned has_value : 1; - unsigned id : 14; - unsigned refs : 16; + bool present; + bool has_value; + uint16_t id; union { uintptr_t value; const tcg_insn_unit *value_ptr; } u; + QSIMPLEQ_HEAD(, TCGLabelUse) branches; QSIMPLEQ_HEAD(, TCGRelocation) relocs; QSIMPLEQ_ENTRY(TCGLabel) next; }; @@ -487,7 +494,7 @@ typedef struct TCGTempSet { #define SYNC_ARG (1 << 0) typedef uint32_t TCGLifeData; -typedef struct TCGOp { +struct TCGOp { TCGOpcode opc : 8; unsigned nargs : 8; @@ -506,7 +513,7 @@ typedef struct TCGOp { /* Arguments for the opcode. */ TCGArg args[]; -} TCGOp; +}; #define TCGOP_CALLI(X) (X)->param1 #define TCGOP_CALLO(X) (X)->param2 @@ -567,7 +574,6 @@ struct TCGContext { #endif #ifdef CONFIG_DEBUG_TCG - int temps_in_use; int goto_tb_issue_mask; const TCGOpcode *vecop_list; #endif @@ -958,19 +964,6 @@ static inline TCGv_ptr tcg_temp_new_ptr(void) return temp_tcgv_ptr(t); } -#if defined(CONFIG_DEBUG_TCG) -/* If you call tcg_clear_temp_count() at the start of a section of - * code which is not supposed to leak any TCG temporaries, then - * calling tcg_check_temp_count() at the end of the section will - * return 1 if the section did in fact leak a temporary. - */ -void tcg_clear_temp_count(void); -int tcg_check_temp_count(void); -#else -#define tcg_clear_temp_count() do { } while (0) -#define tcg_check_temp_count() 0 -#endif - int64_t tcg_cpu_exec_time(void); void tcg_dump_info(GString *buf); void tcg_dump_op_count(GString *buf); diff --git a/meson.build b/meson.build index e533d6c95b..6bcab8bf0d 100644 --- a/meson.build +++ b/meson.build @@ -2861,7 +2861,6 @@ tracetool_depends = files( 'scripts/tracetool/format/log_stap.py', 'scripts/tracetool/format/stap.py', 'scripts/tracetool/__init__.py', - 'scripts/tracetool/transform.py', 'scripts/tracetool/vcpu.py' ) diff --git a/migration/exec.c b/migration/exec.c index 375d2e1b54..38604d73a6 100644 --- a/migration/exec.c +++ b/migration/exec.c @@ -23,12 +23,31 @@ #include "migration.h" #include "io/channel-command.h" #include "trace.h" +#include "qemu/cutils.h" +#ifdef WIN32 +const char *exec_get_cmd_path(void); +const char *exec_get_cmd_path(void) +{ + g_autofree char *detected_path = g_new(char, MAX_PATH); + if (GetSystemDirectoryA(detected_path, MAX_PATH) == 0) { + warn_report("Could not detect cmd.exe path, using default."); + return "C:\\Windows\\System32\\cmd.exe"; + } + pstrcat(detected_path, MAX_PATH, "\\cmd.exe"); + return g_steal_pointer(&detected_path); +} +#endif void exec_start_outgoing_migration(MigrationState *s, const char *command, Error **errp) { QIOChannel *ioc; + +#ifdef WIN32 + const char *argv[] = { exec_get_cmd_path(), "/c", command, NULL }; +#else const char *argv[] = { "/bin/sh", "-c", command, NULL }; +#endif trace_migration_exec_outgoing(command); ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv, @@ -55,7 +74,12 @@ static gboolean exec_accept_incoming_migration(QIOChannel *ioc, void exec_start_incoming_migration(const char *command, Error **errp) { QIOChannel *ioc; + +#ifdef WIN32 + const char *argv[] = { exec_get_cmd_path(), "/c", command, NULL }; +#else const char *argv[] = { "/bin/sh", "-c", command, NULL }; +#endif trace_migration_exec_incoming(command); ioc = QIO_CHANNEL(qio_channel_command_new_spawn(argv, diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin index 81bab1adc9..6a8425885c 100644 --- a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin +++ b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin Binary files differdiff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin index 5eb0a74326..80bdbf2170 100644 --- a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin +++ b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin Binary files differdiff --git a/qemu-options.hx b/qemu-options.hx index beeb4475ba..d42f60fb91 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -2585,7 +2585,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 17 fields\n" "-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n" " specify SMBIOS type 41 fields\n", - QEMU_ARCH_I386 | QEMU_ARCH_ARM) + QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH) SRST ``-smbios file=binary`` Load SMBIOS entry from binary file. diff --git a/roms/opensbi b/roms/opensbi -Subproject 4489876e933d8ba0d8bc6c64bae71e295d45faa +Subproject 6b5188ca14e59ce7bf71afe4e7d3d557c3d31bf diff --git a/scripts/tracetool/__init__.py b/scripts/tracetool/__init__.py index 5393c7fc5c..33cf85e2b0 100644 --- a/scripts/tracetool/__init__.py +++ b/scripts/tracetool/__init__.py @@ -18,7 +18,6 @@ import weakref import tracetool.format import tracetool.backend -import tracetool.transform def error_write(*lines): @@ -190,18 +189,6 @@ class Arguments: """List of argument names casted to their type.""" return ["(%s)%s" % (type_, name) for type_, name in self._args] - def transform(self, *trans): - """Return a new Arguments instance with transformed types. - - The types in the resulting Arguments instance are transformed according - to tracetool.transform.transform_type. - """ - res = [] - for type_, name in self._args: - res.append((tracetool.transform.transform_type(type_, *trans), - name)) - return Arguments(res) - class Event(object): """Event description. @@ -358,16 +345,6 @@ class Event(object): fmt = Event.QEMU_TRACE return fmt % {"name": self.name, "NAME": self.name.upper()} - def transform(self, *trans): - """Return a new Event with transformed Arguments.""" - return Event(self.name, - list(self.properties), - self.fmt, - self.args.transform(*trans), - self.lineno, - self.filename, - self) - def read_events(fobj, fname): """Generate the output for the given (format, backends) pair. diff --git a/scripts/tracetool/transform.py b/scripts/tracetool/transform.py deleted file mode 100644 index ea8b27799d..0000000000 --- a/scripts/tracetool/transform.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Type-transformation rules. -""" - -__author__ = "LluÃs Vilanova <vilanova@ac.upc.edu>" -__copyright__ = "Copyright 2012-2016, LluÃs Vilanova <vilanova@ac.upc.edu>" -__license__ = "GPL version 2 or (at your option) any later version" - -__maintainer__ = "Stefan Hajnoczi" -__email__ = "stefanha@redhat.com" - - -def _transform_type(type_, trans): - if isinstance(trans, str): - return trans - elif isinstance(trans, dict): - if type_ in trans: - return _transform_type(type_, trans[type_]) - elif None in trans: - return _transform_type(type_, trans[None]) - else: - return type_ - elif callable(trans): - return trans(type_) - else: - raise ValueError("Invalid type transformation rule: %s" % trans) - - -def transform_type(type_, *trans): - """Return a new type transformed according to the given rules. - - Applies each of the transformation rules in trans in order. - - If an element of trans is a string, return it. - - If an element of trans is a function, call it with type_ as its only - argument. - - If an element of trans is a dict, search type_ in its keys. If type_ is - a key, use the value as a transformation rule for type_. Otherwise, if - None is a key use the value as a transformation rule for type_. - - Otherwise, return type_. - - Parameters - ---------- - type_ : str - Type to transform. - trans : list of function or dict - Type transformation rules. - """ - if len(trans) == 0: - raise ValueError - res = type_ - for t in trans: - res = _transform_type(res, t) - return res - - -################################################## -# tcg -> host - -def _tcg_2_host(type_): - if type_ == "TCGv": - # force a fixed-size type (target-independent) - return "uint64_t" - else: - return type_ - -TCG_2_HOST = { - "TCGv_i32": "uint32_t", - "TCGv_i64": "uint64_t", - "TCGv_ptr": "void *", - None: _tcg_2_host, - } - - -################################################## -# host -> host compatible with tcg sizes - -HOST_2_TCG_COMPAT = { - "uint8_t": "uint32_t", - "uint16_t": "uint32_t", - } - - -################################################## -# host/tcg -> tcg - -def _host_2_tcg(type_): - if type_.startswith("TCGv"): - return type_ - raise ValueError("Don't know how to translate '%s' into a TCG type\n" % type_) - -HOST_2_TCG = { - "uint32_t": "TCGv_i32", - "uint64_t": "TCGv_i64", - "void *" : "TCGv_ptr", - "CPUArchState *": "TCGv_env", - None: _host_2_tcg, - } - - -################################################## -# tcg -> tcg helper definition - -def _tcg_2_helper_def(type_): - if type_ == "TCGv": - return "target_ulong" - else: - return type_ - -TCG_2_TCG_HELPER_DEF = { - "TCGv_i32": "uint32_t", - "TCGv_i64": "uint64_t", - "TCGv_ptr": "void *", - None: _tcg_2_helper_def, - } - - -################################################## -# tcg -> tcg helper declaration - -def _tcg_2_tcg_helper_decl_error(type_): - raise ValueError("Don't know how to translate type '%s' into a TCG helper declaration type\n" % type_) - -TCG_2_TCG_HELPER_DECL = { - "TCGv" : "tl", - "TCGv_ptr": "ptr", - "TCGv_i32": "i32", - "TCGv_i64": "i64", - "TCGv_env": "env", - None: _tcg_2_tcg_helper_decl_error, - } - - -################################################## -# host/tcg -> tcg temporal constant allocation - -def _host_2_tcg_tmp_new(type_): - if type_.startswith("TCGv"): - return "tcg_temp_new_nop" - raise ValueError("Don't know how to translate type '%s' into a TCG temporal allocation" % type_) - -HOST_2_TCG_TMP_NEW = { - "uint32_t": "tcg_const_i32", - "uint64_t": "tcg_const_i64", - "void *" : "tcg_const_ptr", - None: _host_2_tcg_tmp_new, - } - - -################################################## -# host/tcg -> tcg temporal constant deallocation - -def _host_2_tcg_tmp_free(type_): - if type_.startswith("TCGv"): - return "tcg_temp_free_nop" - raise ValueError("Don't know how to translate type '%s' into a TCG temporal deallocation" % type_) - -HOST_2_TCG_TMP_FREE = { - "uint32_t": "tcg_temp_free_i32", - "uint64_t": "tcg_temp_free_i64", - "void *" : "tcg_temp_free_ptr", - None: _host_2_tcg_tmp_free, - } diff --git a/softmmu/memory.c b/softmmu/memory.c index 9d64efca26..4699ba55ec 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -1900,6 +1900,7 @@ int memory_region_register_iommu_notifier(MemoryRegion *mr, iommu_mr = IOMMU_MEMORY_REGION(mr); assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); assert(n->start <= n->end); + assert(n->end <= memory_region_size(mr)); assert(n->iommu_idx >= 0 && n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr)); @@ -1923,7 +1924,6 @@ uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) { - MemoryRegion *mr = MEMORY_REGION(iommu_mr); IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); hwaddr addr, granularity; IOMMUTLBEntry iotlb; @@ -1936,7 +1936,7 @@ void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) granularity = memory_region_iommu_get_min_page_size(iommu_mr); - for (addr = 0; addr < memory_region_size(mr); addr += granularity) { + for (addr = n->start; addr < n->end; addr += granularity) { iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); if (iotlb.perm != IOMMU_NONE) { n->notify(n, &iotlb); @@ -1996,6 +1996,19 @@ void memory_region_notify_iommu_one(IOMMUNotifier *notifier, } } +void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *n) +{ + IOMMUTLBEvent event; + + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.target_as = &address_space_memory; + event.entry.iova = n->start; + event.entry.perm = IOMMU_NONE; + event.entry.addr_mask = n->end - n->start; + + memory_region_notify_iommu_one(n, &event); +} + void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, int iommu_idx, IOMMUTLBEvent event) diff --git a/softmmu/runstate.c b/softmmu/runstate.c index f9ad88e6a7..9b3611d56d 100644 --- a/softmmu/runstate.c +++ b/softmmu/runstate.c @@ -46,6 +46,7 @@ #include "qemu/module.h" #include "qemu/plugin.h" #include "qemu/sockets.h" +#include "qemu/timer.h" #include "qemu/thread.h" #include "qom/object.h" #include "qom/object_interfaces.h" diff --git a/softmmu/watchpoint.c b/softmmu/watchpoint.c index 279129dd1c..ad58736787 100644 --- a/softmmu/watchpoint.c +++ b/softmmu/watchpoint.c @@ -162,9 +162,12 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, /* this is currently used only by ARM BE32 */ addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); } + + assert((flags & ~BP_MEM_ACCESS) == 0); QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (watchpoint_address_matches(wp, addr, len) - && (wp->flags & flags)) { + int hit_flags = wp->flags & flags; + + if (hit_flags && watchpoint_address_matches(wp, addr, len)) { if (replay_running_debug()) { /* * replay_breakpoint reads icount. @@ -184,16 +187,14 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, replay_breakpoint(); return; } - if (flags == BP_MEM_READ) { - wp->flags |= BP_WATCHPOINT_HIT_READ; - } else { - wp->flags |= BP_WATCHPOINT_HIT_WRITE; - } + + wp->flags |= hit_flags << BP_HIT_SHIFT; wp->hitaddr = MAX(addr, wp->vaddr); wp->hitattrs = attrs; - if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint && - !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { + if (wp->flags & BP_CPU + && cc->tcg_ops->debug_check_watchpoint + && !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { wp->flags &= ~BP_WATCHPOINT_HIT; continue; } diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index fc69783d2b..0200b78e8e 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -140,7 +140,7 @@ vu_request_to_string(unsigned int req) REQ(VHOST_USER_SET_VRING_ENABLE), REQ(VHOST_USER_SEND_RARP), REQ(VHOST_USER_NET_SET_MTU), - REQ(VHOST_USER_SET_SLAVE_REQ_FD), + REQ(VHOST_USER_SET_BACKEND_REQ_FD), REQ(VHOST_USER_IOTLB_MSG), REQ(VHOST_USER_SET_VRING_ENDIAN), REQ(VHOST_USER_GET_CONFIG), @@ -1365,7 +1365,7 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, int qidx = vq - dev->vq; int fd_num = 0; VhostUserMsg vmsg = { - .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG, + .request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG, .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, .size = sizeof(vmsg.payload.area), .payload.area = { @@ -1383,7 +1383,7 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, vmsg.fd_num = fd_num; - if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) { + if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) { return false; } @@ -1461,9 +1461,9 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) */ uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ | 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | - 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | + 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | - 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | + 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS; @@ -1494,7 +1494,7 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) if (vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && - (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ) || + (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) || !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { /* * The use case for using messages for kick/call is simulation, to make @@ -1507,7 +1507,7 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) * that actually enables the simulation case. */ vu_panic(dev, - "F_IN_BAND_NOTIFICATIONS requires F_SLAVE_REQ && F_REPLY_ACK"); + "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK"); return false; } @@ -1910,7 +1910,7 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg) return vu_get_queue_num_exec(dev, vmsg); case VHOST_USER_SET_VRING_ENABLE: return vu_set_vring_enable_exec(dev, vmsg); - case VHOST_USER_SET_SLAVE_REQ_FD: + case VHOST_USER_SET_BACKEND_REQ_FD: return vu_set_slave_req_fd(dev, vmsg); case VHOST_USER_GET_CONFIG: return vu_get_config(dev, vmsg); @@ -2416,9 +2416,9 @@ static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) if (vq->call_fd < 0 && vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && - vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { + vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { VhostUserMsg vmsg = { - .request = VHOST_USER_SLAVE_VRING_CALL, + .request = VHOST_USER_BACKEND_VRING_CALL, .flags = VHOST_USER_VERSION, .size = sizeof(vmsg.payload.state), .payload.state = { @@ -2553,6 +2553,10 @@ virtqueue_alloc_element(size_t sz, assert(sz >= sizeof(VuVirtqElement)); elem = malloc(out_sg_end); + if (!elem) { + DPRINT("%s: failed to malloc virtqueue element\n", __func__); + return NULL; + } elem->out_num = out_num; elem->in_num = in_num; elem->in_sg = (void *)elem + in_sg_ofs; @@ -2639,6 +2643,9 @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) /* Now copy what we have collected and mapped */ elem = virtqueue_alloc_element(sz, out_num, in_num); + if (!elem) { + return NULL; + } elem->index = idx; for (i = 0; i < out_num; i++) { elem->out_sg[i] = iov[i]; diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h index 8cda9b8f57..8c5a2719e3 100644 --- a/subprojects/libvhost-user/libvhost-user.h +++ b/subprojects/libvhost-user/libvhost-user.h @@ -54,12 +54,12 @@ enum VhostUserProtocolFeature { VHOST_USER_PROTOCOL_F_RARP = 2, VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, VHOST_USER_PROTOCOL_F_NET_MTU = 4, - VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, + VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5, VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, VHOST_USER_PROTOCOL_F_CONFIG = 9, - VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, + VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14, @@ -92,7 +92,7 @@ typedef enum VhostUserRequest { VHOST_USER_SET_VRING_ENABLE = 18, VHOST_USER_SEND_RARP = 19, VHOST_USER_NET_SET_MTU = 20, - VHOST_USER_SET_SLAVE_REQ_FD = 21, + VHOST_USER_SET_BACKEND_REQ_FD = 21, VHOST_USER_IOTLB_MSG = 22, VHOST_USER_SET_VRING_ENDIAN = 23, VHOST_USER_GET_CONFIG = 24, @@ -113,13 +113,13 @@ typedef enum VhostUserRequest { } VhostUserRequest; typedef enum VhostUserSlaveRequest { - VHOST_USER_SLAVE_NONE = 0, - VHOST_USER_SLAVE_IOTLB_MSG = 1, - VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2, - VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3, - VHOST_USER_SLAVE_VRING_CALL = 4, - VHOST_USER_SLAVE_VRING_ERR = 5, - VHOST_USER_SLAVE_MAX + VHOST_USER_BACKEND_NONE = 0, + VHOST_USER_BACKEND_IOTLB_MSG = 1, + VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2, + VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3, + VHOST_USER_BACKEND_VRING_CALL = 4, + VHOST_USER_BACKEND_VRING_ERR = 5, + VHOST_USER_BACKEND_MAX } VhostUserSlaveRequest; typedef struct VhostUserMemoryRegion { diff --git a/target/alpha/translate.c b/target/alpha/translate.c index 716b083f39..9d25e21164 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -179,7 +179,6 @@ static void free_context_temps(DisasContext *ctx) { if (ctx->sink) { tcg_gen_discard_i64(ctx->sink); - tcg_temp_free(ctx->sink); ctx->sink = NULL; } } @@ -279,7 +278,6 @@ static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) TCGv_i32 tmp32 = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); gen_helper_memory_to_f(dest, tmp32); - tcg_temp_free_i32(tmp32); } static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) @@ -287,7 +285,6 @@ static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) TCGv tmp = tcg_temp_new(); tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); gen_helper_memory_to_g(dest, tmp); - tcg_temp_free(tmp); } static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) @@ -295,7 +292,6 @@ static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) TCGv_i32 tmp32 = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); gen_helper_memory_to_s(dest, tmp32); - tcg_temp_free_i32(tmp32); } static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) @@ -311,7 +307,6 @@ static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, TCGv addr = tcg_temp_new(); tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); func(ctx, cpu_fir[ra], addr); - tcg_temp_free(addr); } } @@ -342,7 +337,6 @@ static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, tcg_gen_mov_i64(cpu_lock_addr, addr); tcg_gen_mov_i64(cpu_lock_value, dest); } - tcg_temp_free(addr); } static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) @@ -350,7 +344,6 @@ static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) TCGv_i32 tmp32 = tcg_temp_new_i32(); gen_helper_f_to_memory(tmp32, addr); tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); - tcg_temp_free_i32(tmp32); } static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) @@ -358,7 +351,6 @@ static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) TCGv tmp = tcg_temp_new(); gen_helper_g_to_memory(tmp, src); tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx)); - tcg_temp_free(tmp); } static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) @@ -366,7 +358,6 @@ static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) TCGv_i32 tmp32 = tcg_temp_new_i32(); gen_helper_s_to_memory(tmp32, src); tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx)); - tcg_temp_free_i32(tmp32); } static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) @@ -380,7 +371,6 @@ static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, TCGv addr = tcg_temp_new(); tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); func(ctx, load_fpr(ctx, ra), addr); - tcg_temp_free(addr); } static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, @@ -398,8 +388,6 @@ static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, src = load_gpr(ctx, ra); tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); - - tcg_temp_free(addr); } static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, @@ -416,7 +404,6 @@ static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, lab_fail = gen_new_label(); lab_done = gen_new_label(); tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); - tcg_temp_free_i64(addr); val = tcg_temp_new_i64(); tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, @@ -426,7 +413,6 @@ static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, if (ra != 31) { tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value); } - tcg_temp_free_i64(val); tcg_gen_br(lab_done); gen_set_label(lab_fail); @@ -504,7 +490,6 @@ static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra, tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1); ret = gen_bcond_internal(ctx, cond, tmp, disp); - tcg_temp_free(tmp); return ret; } return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp); @@ -550,7 +535,6 @@ static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); - tcg_temp_free(cmp_tmp); return ret; } @@ -564,8 +548,6 @@ static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) gen_fold_mzero(cond, va, load_fpr(ctx, ra)); tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); - - tcg_temp_free(va); } #define QUAL_RM_N 0x080 /* Round mode nearest even */ @@ -615,8 +597,6 @@ static void gen_qual_roundmode(DisasContext *ctx, int fn11) #else gen_helper_setroundmode(tmp); #endif - - tcg_temp_free_i32(tmp); } static void gen_qual_flushzero(DisasContext *ctx, int fn11) @@ -645,8 +625,6 @@ static void gen_qual_flushzero(DisasContext *ctx, int fn11) #else gen_helper_setflushzero(tmp); #endif - - tcg_temp_free_i32(tmp); } static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) @@ -716,8 +694,6 @@ static void gen_cvtlq(TCGv vc, TCGv vb) tcg_gen_shri_i64(tmp, vb, 29); tcg_gen_sari_i64(vc, vb, 32); tcg_gen_deposit_i64(vc, vc, tmp, 0, 30); - - tcg_temp_free(tmp); } static void gen_ieee_arith2(DisasContext *ctx, @@ -808,8 +784,6 @@ static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) tcg_gen_andc_i64(vc, vb, vmask); tcg_gen_or_i64(vc, vc, tmp); - - tcg_temp_free(tmp); } static void gen_ieee_arith3(DisasContext *ctx, @@ -927,7 +901,6 @@ static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_neg_i64(tmp, tmp); tcg_gen_andi_i64(tmp, tmp, 0x3f); tcg_gen_shl_i64(vc, va, tmp); - tcg_temp_free(tmp); } gen_zapnoti(vc, vc, byte_mask); } @@ -948,7 +921,6 @@ static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); tcg_gen_shli_i64(tmp, tmp, 3); tcg_gen_shr_i64(vc, va, tmp); - tcg_temp_free(tmp); gen_zapnoti(vc, vc, byte_mask); } } @@ -986,8 +958,6 @@ static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_shr_i64(vc, tmp, shift); tcg_gen_shri_i64(vc, vc, 1); - tcg_temp_free(shift); - tcg_temp_free(tmp); } } @@ -1015,8 +985,6 @@ static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); tcg_gen_shli_i64(shift, shift, 3); tcg_gen_shl_i64(vc, tmp, shift); - tcg_temp_free(shift); - tcg_temp_free(tmp); } } @@ -1047,9 +1015,6 @@ static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_shri_i64(mask, mask, 1); tcg_gen_andc_i64(vc, va, mask); - - tcg_temp_free(mask); - tcg_temp_free(shift); } } @@ -1069,9 +1034,6 @@ static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, tcg_gen_shl_i64(mask, mask, shift); tcg_gen_andc_i64(vc, va, mask); - - tcg_temp_free(mask); - tcg_temp_free(shift); } } @@ -1152,7 +1114,6 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) TCGv tmp = tcg_temp_new(); tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); - tcg_temp_free(tmp); } /* Allow interrupts to be recognized right away. */ @@ -1215,7 +1176,6 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) tcg_gen_movi_i64(tmp, exc_addr); tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); - tcg_temp_free(tmp); entry += (palcode & 0x80 ? 0x2000 + (palcode - 0x80) * 64 @@ -1550,7 +1510,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shli_i64(tmp, va, 2); tcg_gen_add_i64(tmp, tmp, vb); tcg_gen_ext32s_i64(vc, tmp); - tcg_temp_free(tmp); break; case 0x09: /* SUBL */ @@ -1563,7 +1522,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shli_i64(tmp, va, 2); tcg_gen_sub_i64(tmp, tmp, vb); tcg_gen_ext32s_i64(vc, tmp); - tcg_temp_free(tmp); break; case 0x0F: /* CMPBGE */ @@ -1580,7 +1538,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shli_i64(tmp, va, 3); tcg_gen_add_i64(tmp, tmp, vb); tcg_gen_ext32s_i64(vc, tmp); - tcg_temp_free(tmp); break; case 0x1B: /* S8SUBL */ @@ -1588,7 +1545,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shli_i64(tmp, va, 3); tcg_gen_sub_i64(tmp, tmp, vb); tcg_gen_ext32s_i64(vc, tmp); - tcg_temp_free(tmp); break; case 0x1D: /* CMPULT */ @@ -1603,7 +1559,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, va, 2); tcg_gen_add_i64(vc, tmp, vb); - tcg_temp_free(tmp); break; case 0x29: /* SUBQ */ @@ -1614,7 +1569,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, va, 2); tcg_gen_sub_i64(vc, tmp, vb); - tcg_temp_free(tmp); break; case 0x2D: /* CMPEQ */ @@ -1625,14 +1579,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, va, 3); tcg_gen_add_i64(vc, tmp, vb); - tcg_temp_free(tmp); break; case 0x3B: /* S8SUBQ */ tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, va, 3); tcg_gen_sub_i64(vc, tmp, vb); - tcg_temp_free(tmp); break; case 0x3D: /* CMPULE */ @@ -1646,7 +1598,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_add_i64(tmp, tmp, vc); tcg_gen_ext32s_i64(vc, tmp); gen_helper_check_overflow(cpu_env, vc, tmp); - tcg_temp_free(tmp); break; case 0x49: /* SUBL/V */ @@ -1656,7 +1607,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_sub_i64(tmp, tmp, vc); tcg_gen_ext32s_i64(vc, tmp); gen_helper_check_overflow(cpu_env, vc, tmp); - tcg_temp_free(tmp); break; case 0x4D: /* CMPLT */ @@ -1674,8 +1624,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shri_i64(tmp, tmp, 63); tcg_gen_movi_i64(tmp2, 0); gen_helper_check_overflow(cpu_env, tmp, tmp2); - tcg_temp_free(tmp); - tcg_temp_free(tmp2); break; case 0x69: /* SUBQ/V */ @@ -1689,8 +1637,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_shri_i64(tmp, tmp, 63); tcg_gen_movi_i64(tmp2, 0); gen_helper_check_overflow(cpu_env, tmp, tmp2); - tcg_temp_free(tmp); - tcg_temp_free(tmp2); break; case 0x6D: /* CMPLE */ @@ -1744,7 +1690,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_andi_i64(tmp, va, 1); tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), vb, load_gpr(ctx, rc)); - tcg_temp_free(tmp); break; case 0x16: /* CMOVLBC */ @@ -1752,7 +1697,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_andi_i64(tmp, va, 1); tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), vb, load_gpr(ctx, rc)); - tcg_temp_free(tmp); break; case 0x20: /* BIS */ @@ -1884,7 +1828,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tcg_gen_andi_i64(tmp, vb, 0x3f); tcg_gen_shr_i64(vc, va, tmp); - tcg_temp_free(tmp); } break; case 0x36: @@ -1900,7 +1843,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tcg_gen_andi_i64(tmp, vb, 0x3f); tcg_gen_shl_i64(vc, va, tmp); - tcg_temp_free(tmp); } break; case 0x3B: @@ -1916,7 +1858,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) vb = load_gpr(ctx, rb); tcg_gen_andi_i64(tmp, vb, 0x3f); tcg_gen_sar_i64(vc, va, tmp); - tcg_temp_free(tmp); } break; case 0x52: @@ -1978,7 +1919,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) /* UMULH */ tmp = tcg_temp_new(); tcg_gen_mulu2_i64(tmp, vc, va, vb); - tcg_temp_free(tmp); break; case 0x40: /* MULL/V */ @@ -1988,7 +1928,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_mul_i64(tmp, tmp, vc); tcg_gen_ext32s_i64(vc, tmp); gen_helper_check_overflow(cpu_env, vc, tmp); - tcg_temp_free(tmp); break; case 0x60: /* MULQ/V */ @@ -1997,8 +1936,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_muls2_i64(vc, tmp, va, vb); tcg_gen_sari_i64(tmp2, vc, 63); gen_helper_check_overflow(cpu_env, tmp, tmp2); - tcg_temp_free(tmp); - tcg_temp_free(tmp2); break; default: goto invalid_opc; @@ -2017,7 +1954,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) va = load_gpr(ctx, ra); tcg_gen_extrl_i64_i32(t32, va); gen_helper_memory_to_s(vc, t32); - tcg_temp_free_i32(t32); break; case 0x0A: /* SQRTF */ @@ -2040,7 +1976,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) va = load_gpr(ctx, ra); tcg_gen_extrl_i64_i32(t32, va); gen_helper_memory_to_f(vc, t32); - tcg_temp_free_i32(t32); break; case 0x24: /* ITOFT */ @@ -2526,7 +2461,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ); break; } - tcg_temp_free(addr); break; } #else @@ -2550,7 +2484,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) va = load_fpr(ctx, ra); gen_helper_s_to_memory(t32, va); tcg_gen_ext_i32_i64(vc, t32); - tcg_temp_free_i32(t32); break; } @@ -2706,7 +2639,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_andi_i64(tmp, vb, 1); st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); - tcg_temp_free(tmp); tcg_gen_andi_i64(cpu_pc, vb, ~3); /* Allow interrupts to be recognized right away. */ ret = DISAS_PC_UPDATED_NOCHAIN; @@ -2728,7 +2660,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_addi_i64(tmp, vb, disp12); tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL); - tcg_temp_free(tmp); break; case 0x1: /* Quadword physical access */ @@ -2737,7 +2668,6 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) tmp = tcg_temp_new(); tcg_gen_addi_i64(tmp, vb, disp12); tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ); - tcg_temp_free(tmp); break; case 0x2: /* Longword physical access with lock */ @@ -2996,7 +2926,6 @@ static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) ctx->base.is_jmp = translate_one(ctx, insn); free_context_temps(ctx); - translator_loop_temp_check(&ctx->base); } static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 787121694c..c097cae988 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -869,6 +869,8 @@ struct ArchCPU { DynamicGDBXMLInfo dyn_sysreg_xml; DynamicGDBXMLInfo dyn_svereg_xml; + DynamicGDBXMLInfo dyn_m_systemreg_xml; + DynamicGDBXMLInfo dyn_m_secextreg_xml; /* Timers used by the generic (architected) timer */ QEMUTimer *gt_timer[NUM_GTIMERS]; @@ -1112,13 +1114,6 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -/* - * Helpers to dynamically generates XML descriptions of the sysregs - * and SVE registers. Returns the number of registers in each set. - */ -int arm_gen_dynamic_sysreg_xml(CPUState *cpu, int base_reg); -int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); - /* Returns the dynamically generated XML for the gdb stub. * Returns a pointer to the XML contents for the specified XML file or NULL * if the XML name doesn't match the predefined one. @@ -2389,7 +2384,8 @@ static inline int arm_feature(CPUARMState *env, int feature) void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp); #if !defined(CONFIG_USER_ONLY) -/* Return true if exception levels below EL3 are in secure state, +/* + * Return true if exception levels below EL3 are in secure state, * or would be following an exception return to that level. * Unlike arm_is_secure() (which is always a question about the * _current_ state of the CPU) this doesn't care about the current @@ -2397,6 +2393,7 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp); */ static inline bool arm_is_secure_below_el3(CPUARMState *env) { + assert(!arm_feature(env, ARM_FEATURE_M)); if (arm_feature(env, ARM_FEATURE_EL3)) { return !(env->cp15.scr_el3 & SCR_NS); } else { @@ -2410,6 +2407,7 @@ static inline bool arm_is_secure_below_el3(CPUARMState *env) /* Return true if the CPU is AArch64 EL3 or AArch32 Mon */ static inline bool arm_is_el3_or_mon(CPUARMState *env) { + assert(!arm_feature(env, ARM_FEATURE_M)); if (arm_feature(env, ARM_FEATURE_EL3)) { if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { /* CPU currently in AArch64 state and EL3 */ @@ -2426,6 +2424,9 @@ static inline bool arm_is_el3_or_mon(CPUARMState *env) /* Return true if the processor is in secure state */ static inline bool arm_is_secure(CPUARMState *env) { + if (arm_feature(env, ARM_FEATURE_M)) { + return env->v7m.secure; + } if (arm_is_el3_or_mon(env)) { return true; } diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index 2f806512d0..3f799f5d05 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -305,7 +305,7 @@ static void arm_register_sysreg_for_xml(gpointer key, gpointer value, } } -int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg) +static int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg) { ARMCPU *cpu = ARM_CPU(cs); GString *s = g_string_new(NULL); @@ -322,125 +322,163 @@ int arm_gen_dynamic_sysreg_xml(CPUState *cs, int base_reg) return cpu->dyn_sysreg_xml.num; } -struct TypeSize { - const char *gdb_type; - int size; - const char sz, suffix; +typedef enum { + M_SYSREG_MSP, + M_SYSREG_PSP, + M_SYSREG_PRIMASK, + M_SYSREG_CONTROL, + M_SYSREG_BASEPRI, + M_SYSREG_FAULTMASK, + M_SYSREG_MSPLIM, + M_SYSREG_PSPLIM, +} MProfileSysreg; + +static const struct { + const char *name; + int feature; +} m_sysreg_def[] = { + [M_SYSREG_MSP] = { "msp", ARM_FEATURE_M }, + [M_SYSREG_PSP] = { "psp", ARM_FEATURE_M }, + [M_SYSREG_PRIMASK] = { "primask", ARM_FEATURE_M }, + [M_SYSREG_CONTROL] = { "control", ARM_FEATURE_M }, + [M_SYSREG_BASEPRI] = { "basepri", ARM_FEATURE_M_MAIN }, + [M_SYSREG_FAULTMASK] = { "faultmask", ARM_FEATURE_M_MAIN }, + [M_SYSREG_MSPLIM] = { "msplim", ARM_FEATURE_V8 }, + [M_SYSREG_PSPLIM] = { "psplim", ARM_FEATURE_V8 }, }; -static const struct TypeSize vec_lanes[] = { - /* quads */ - { "uint128", 128, 'q', 'u' }, - { "int128", 128, 'q', 's' }, - /* 64 bit */ - { "ieee_double", 64, 'd', 'f' }, - { "uint64", 64, 'd', 'u' }, - { "int64", 64, 'd', 's' }, - /* 32 bit */ - { "ieee_single", 32, 's', 'f' }, - { "uint32", 32, 's', 'u' }, - { "int32", 32, 's', 's' }, - /* 16 bit */ - { "ieee_half", 16, 'h', 'f' }, - { "uint16", 16, 'h', 'u' }, - { "int16", 16, 'h', 's' }, - /* bytes */ - { "uint8", 8, 'b', 'u' }, - { "int8", 8, 'b', 's' }, -}; +static uint32_t *m_sysreg_ptr(CPUARMState *env, MProfileSysreg reg, bool sec) +{ + uint32_t *ptr; + + switch (reg) { + case M_SYSREG_MSP: + ptr = arm_v7m_get_sp_ptr(env, sec, false, true); + break; + case M_SYSREG_PSP: + ptr = arm_v7m_get_sp_ptr(env, sec, true, true); + break; + case M_SYSREG_MSPLIM: + ptr = &env->v7m.msplim[sec]; + break; + case M_SYSREG_PSPLIM: + ptr = &env->v7m.psplim[sec]; + break; + case M_SYSREG_PRIMASK: + ptr = &env->v7m.primask[sec]; + break; + case M_SYSREG_BASEPRI: + ptr = &env->v7m.basepri[sec]; + break; + case M_SYSREG_FAULTMASK: + ptr = &env->v7m.faultmask[sec]; + break; + case M_SYSREG_CONTROL: + ptr = &env->v7m.control[sec]; + break; + default: + return NULL; + } + return arm_feature(env, m_sysreg_def[reg].feature) ? ptr : NULL; +} + +static int m_sysreg_get(CPUARMState *env, GByteArray *buf, + MProfileSysreg reg, bool secure) +{ + uint32_t *ptr = m_sysreg_ptr(env, reg, secure); + + if (ptr == NULL) { + return 0; + } + return gdb_get_reg32(buf, *ptr); +} + +static int arm_gdb_get_m_systemreg(CPUARMState *env, GByteArray *buf, int reg) +{ + /* + * Here, we emulate MRS instruction, where CONTROL has a mix of + * banked and non-banked bits. + */ + if (reg == M_SYSREG_CONTROL) { + return gdb_get_reg32(buf, arm_v7m_mrs_control(env, env->v7m.secure)); + } + return m_sysreg_get(env, buf, reg, env->v7m.secure); +} +static int arm_gdb_set_m_systemreg(CPUARMState *env, uint8_t *buf, int reg) +{ + return 0; /* TODO */ +} -int arm_gen_dynamic_svereg_xml(CPUState *cs, int base_reg) +static int arm_gen_dynamic_m_systemreg_xml(CPUState *cs, int orig_base_reg) { ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; GString *s = g_string_new(NULL); - DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml; - g_autoptr(GString) ts = g_string_new(""); - int i, j, bits, reg_width = (cpu->sve_max_vq * 128); - info->num = 0; + int base_reg = orig_base_reg; + int i; + g_string_printf(s, "<?xml version=\"1.0\"?>"); g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"); - g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">"); + g_string_append_printf(s, "<feature name=\"org.gnu.gdb.arm.m-system\">\n"); - /* First define types and totals in a whole VL */ - for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { - int count = reg_width / vec_lanes[i].size; - g_string_printf(ts, "svev%c%c", vec_lanes[i].sz, vec_lanes[i].suffix); - g_string_append_printf(s, - "<vector id=\"%s\" type=\"%s\" count=\"%d\"/>", - ts->str, vec_lanes[i].gdb_type, count); - } - /* - * Now define a union for each size group containing unsigned and - * signed and potentially float versions of each size from 128 to - * 8 bits. - */ - for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) { - const char suf[] = { 'q', 'd', 's', 'h', 'b' }; - g_string_append_printf(s, "<union id=\"svevn%c\">", suf[i]); - for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) { - if (vec_lanes[j].size == bits) { - g_string_append_printf(s, "<field name=\"%c\" type=\"svev%c%c\"/>", - vec_lanes[j].suffix, - vec_lanes[j].sz, vec_lanes[j].suffix); - } + for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) { + if (arm_feature(env, m_sysreg_def[i].feature)) { + g_string_append_printf(s, + "<reg name=\"%s\" bitsize=\"32\" regnum=\"%d\"/>\n", + m_sysreg_def[i].name, base_reg++); } - g_string_append(s, "</union>"); } - /* And now the final union of unions */ - g_string_append(s, "<union id=\"svev\">"); - for (bits = 128, i = 0; bits >= 8; bits /= 2, i++) { - const char suf[] = { 'q', 'd', 's', 'h', 'b' }; - g_string_append_printf(s, "<field name=\"%c\" type=\"svevn%c\"/>", - suf[i], suf[i]); - } - g_string_append(s, "</union>"); - /* Finally the sve prefix type */ - g_string_append_printf(s, - "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>", - reg_width / 8); + g_string_append_printf(s, "</feature>"); + cpu->dyn_m_systemreg_xml.desc = g_string_free(s, false); + cpu->dyn_m_systemreg_xml.num = base_reg - orig_base_reg; + + return cpu->dyn_m_systemreg_xml.num; +} + +#ifndef CONFIG_USER_ONLY +/* + * For user-only, we see the non-secure registers via m_systemreg above. + * For secext, encode the non-secure view as even and secure view as odd. + */ +static int arm_gdb_get_m_secextreg(CPUARMState *env, GByteArray *buf, int reg) +{ + return m_sysreg_get(env, buf, reg >> 1, reg & 1); +} + +static int arm_gdb_set_m_secextreg(CPUARMState *env, uint8_t *buf, int reg) +{ + return 0; /* TODO */ +} + +static int arm_gen_dynamic_m_secextreg_xml(CPUState *cs, int orig_base_reg) +{ + ARMCPU *cpu = ARM_CPU(cs); + GString *s = g_string_new(NULL); + int base_reg = orig_base_reg; + int i; + + g_string_printf(s, "<?xml version=\"1.0\"?>"); + g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"); + g_string_append_printf(s, "<feature name=\"org.gnu.gdb.arm.secext\">\n"); - /* Then define each register in parts for each vq */ - for (i = 0; i < 32; i++) { + for (i = 0; i < ARRAY_SIZE(m_sysreg_def); i++) { g_string_append_printf(s, - "<reg name=\"z%d\" bitsize=\"%d\"" - " regnum=\"%d\" type=\"svev\"/>", - i, reg_width, base_reg++); - info->num++; - } - /* fpscr & status registers */ - g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\"" - " regnum=\"%d\" group=\"float\"" - " type=\"int\"/>", base_reg++); - g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\"" - " regnum=\"%d\" group=\"float\"" - " type=\"int\"/>", base_reg++); - info->num += 2; - - for (i = 0; i < 16; i++) { + "<reg name=\"%s_ns\" bitsize=\"32\" regnum=\"%d\"/>\n", + m_sysreg_def[i].name, base_reg++); g_string_append_printf(s, - "<reg name=\"p%d\" bitsize=\"%d\"" - " regnum=\"%d\" type=\"svep\"/>", - i, cpu->sve_max_vq * 16, base_reg++); - info->num++; + "<reg name=\"%s_s\" bitsize=\"32\" regnum=\"%d\"/>\n", + m_sysreg_def[i].name, base_reg++); } - g_string_append_printf(s, - "<reg name=\"ffr\" bitsize=\"%d\"" - " regnum=\"%d\" group=\"vector\"" - " type=\"svep\"/>", - cpu->sve_max_vq * 16, base_reg++); - g_string_append_printf(s, - "<reg name=\"vg\" bitsize=\"64\"" - " regnum=\"%d\" type=\"int\"/>", - base_reg++); - info->num += 2; + g_string_append_printf(s, "</feature>"); - cpu->dyn_svereg_xml.desc = g_string_free(s, false); + cpu->dyn_m_secextreg_xml.desc = g_string_free(s, false); + cpu->dyn_m_secextreg_xml.num = base_reg - orig_base_reg; - return cpu->dyn_svereg_xml.num; + return cpu->dyn_m_secextreg_xml.num; } - +#endif const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) { @@ -450,6 +488,12 @@ const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) return cpu->dyn_sysreg_xml.desc; } else if (strcmp(xmlname, "sve-registers.xml") == 0) { return cpu->dyn_svereg_xml.desc; + } else if (strcmp(xmlname, "arm-m-system.xml") == 0) { + return cpu->dyn_m_systemreg_xml.desc; +#ifndef CONFIG_USER_ONLY + } else if (strcmp(xmlname, "arm-m-secext.xml") == 0) { + return cpu->dyn_m_secextreg_xml.desc; +#endif } return NULL; } @@ -466,14 +510,20 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) */ #ifdef TARGET_AARCH64 if (isar_feature_aa64_sve(&cpu->isar)) { - gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg, - arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs), + int nreg = arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs); + gdb_register_coprocessor(cs, aarch64_gdb_get_sve_reg, + aarch64_gdb_set_sve_reg, nreg, "sve-registers.xml", 0); } else { - gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, - aarch64_fpu_gdb_set_reg, + gdb_register_coprocessor(cs, aarch64_gdb_get_fpu_reg, + aarch64_gdb_set_fpu_reg, 34, "aarch64-fpu.xml", 0); } + if (isar_feature_aa64_pauth(&cpu->isar)) { + gdb_register_coprocessor(cs, aarch64_gdb_get_pauth_reg, + aarch64_gdb_set_pauth_reg, + 4, "aarch64-pauth.xml", 0); + } #endif } else { if (arm_feature(env, ARM_FEATURE_NEON)) { @@ -503,4 +553,18 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), "system-registers.xml", 0); + if (arm_feature(env, ARM_FEATURE_M)) { + gdb_register_coprocessor(cs, + arm_gdb_get_m_systemreg, arm_gdb_set_m_systemreg, + arm_gen_dynamic_m_systemreg_xml(cs, cs->gdb_num_regs), + "arm-m-system.xml", 0); +#ifndef CONFIG_USER_ONLY + if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { + gdb_register_coprocessor(cs, + arm_gdb_get_m_secextreg, arm_gdb_set_m_secextreg, + arm_gen_dynamic_m_secextreg_xml(cs, cs->gdb_num_regs), + "arm-m-secext.xml", 0); + } +#endif + } } diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c index 07a6746944..3bee892fb7 100644 --- a/target/arm/gdbstub64.c +++ b/target/arm/gdbstub64.c @@ -72,7 +72,7 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 0; } -int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg) { switch (reg) { case 0 ... 31: @@ -92,7 +92,7 @@ int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) } } -int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg) { switch (reg) { case 0 ... 31: @@ -116,7 +116,7 @@ int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) } } -int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) +int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg) { ARMCPU *cpu = env_archcpu(env); @@ -164,7 +164,7 @@ int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) return 0; } -int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) +int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg) { ARMCPU *cpu = env_archcpu(env); @@ -209,3 +209,170 @@ int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) return 0; } + +int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg) +{ + switch (reg) { + case 0: /* pauth_dmask */ + case 1: /* pauth_cmask */ + case 2: /* pauth_dmask_high */ + case 3: /* pauth_cmask_high */ + /* + * Note that older versions of this feature only contained + * pauth_{d,c}mask, for use with Linux user processes, and + * thus exclusively in the low half of the address space. + * + * To support system mode, and to debug kernels, two new regs + * were added to cover the high half of the address space. + * For the purpose of pauth_ptr_mask, we can use any well-formed + * address within the address space half -- here, 0 and -1. + */ + { + bool is_data = !(reg & 1); + bool is_high = reg & 2; + uint64_t mask = pauth_ptr_mask(env, -is_high, is_data); + return gdb_get_reg64(buf, mask); + } + default: + return 0; + } +} + +int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg) +{ + /* All pseudo registers are read-only. */ + return 0; +} + +static void output_vector_union_type(GString *s, int reg_width, + const char *name) +{ + struct TypeSize { + const char *gdb_type; + short size; + char sz, suffix; + }; + + static const struct TypeSize vec_lanes[] = { + /* quads */ + { "uint128", 128, 'q', 'u' }, + { "int128", 128, 'q', 's' }, + /* 64 bit */ + { "ieee_double", 64, 'd', 'f' }, + { "uint64", 64, 'd', 'u' }, + { "int64", 64, 'd', 's' }, + /* 32 bit */ + { "ieee_single", 32, 's', 'f' }, + { "uint32", 32, 's', 'u' }, + { "int32", 32, 's', 's' }, + /* 16 bit */ + { "ieee_half", 16, 'h', 'f' }, + { "uint16", 16, 'h', 'u' }, + { "int16", 16, 'h', 's' }, + /* bytes */ + { "uint8", 8, 'b', 'u' }, + { "int8", 8, 'b', 's' }, + }; + + static const char suf[] = { 'b', 'h', 's', 'd', 'q' }; + int i, j; + + /* First define types and totals in a whole VL */ + for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) { + g_string_append_printf(s, + "<vector id=\"%s%c%c\" type=\"%s\" count=\"%d\"/>", + name, vec_lanes[i].sz, vec_lanes[i].suffix, + vec_lanes[i].gdb_type, reg_width / vec_lanes[i].size); + } + + /* + * Now define a union for each size group containing unsigned and + * signed and potentially float versions of each size from 128 to + * 8 bits. + */ + for (i = 0; i < ARRAY_SIZE(suf); i++) { + int bits = 8 << i; + + g_string_append_printf(s, "<union id=\"%sn%c\">", name, suf[i]); + for (j = 0; j < ARRAY_SIZE(vec_lanes); j++) { + if (vec_lanes[j].size == bits) { + g_string_append_printf(s, "<field name=\"%c\" type=\"%s%c%c\"/>", + vec_lanes[j].suffix, name, + vec_lanes[j].sz, vec_lanes[j].suffix); + } + } + g_string_append(s, "</union>"); + } + + /* And now the final union of unions */ + g_string_append_printf(s, "<union id=\"%s\">", name); + for (i = ARRAY_SIZE(suf) - 1; i >= 0; i--) { + g_string_append_printf(s, "<field name=\"%c\" type=\"%sn%c\"/>", + suf[i], name, suf[i]); + } + g_string_append(s, "</union>"); +} + +int arm_gen_dynamic_svereg_xml(CPUState *cs, int orig_base_reg) +{ + ARMCPU *cpu = ARM_CPU(cs); + GString *s = g_string_new(NULL); + DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml; + int reg_width = cpu->sve_max_vq * 128; + int pred_width = cpu->sve_max_vq * 16; + int base_reg = orig_base_reg; + int i; + + g_string_printf(s, "<?xml version=\"1.0\"?>"); + g_string_append_printf(s, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"); + g_string_append_printf(s, "<feature name=\"org.gnu.gdb.aarch64.sve\">"); + + /* Create the vector union type. */ + output_vector_union_type(s, reg_width, "svev"); + + /* Create the predicate vector type. */ + g_string_append_printf(s, + "<vector id=\"svep\" type=\"uint8\" count=\"%d\"/>", + pred_width / 8); + + /* Define the vector registers. */ + for (i = 0; i < 32; i++) { + g_string_append_printf(s, + "<reg name=\"z%d\" bitsize=\"%d\"" + " regnum=\"%d\" type=\"svev\"/>", + i, reg_width, base_reg++); + } + + /* fpscr & status registers */ + g_string_append_printf(s, "<reg name=\"fpsr\" bitsize=\"32\"" + " regnum=\"%d\" group=\"float\"" + " type=\"int\"/>", base_reg++); + g_string_append_printf(s, "<reg name=\"fpcr\" bitsize=\"32\"" + " regnum=\"%d\" group=\"float\"" + " type=\"int\"/>", base_reg++); + + /* Define the predicate registers. */ + for (i = 0; i < 16; i++) { + g_string_append_printf(s, + "<reg name=\"p%d\" bitsize=\"%d\"" + " regnum=\"%d\" type=\"svep\"/>", + i, pred_width, base_reg++); + } + g_string_append_printf(s, + "<reg name=\"ffr\" bitsize=\"%d\"" + " regnum=\"%d\" group=\"vector\"" + " type=\"svep\"/>", + pred_width, base_reg++); + + /* Define the vector length pseudo-register. */ + g_string_append_printf(s, + "<reg name=\"vg\" bitsize=\"64\"" + " regnum=\"%d\" type=\"int\"/>", + base_reg++); + + g_string_append_printf(s, "</feature>"); + + info->desc = g_string_free(s, false); + info->num = base_reg - orig_base_reg; + return info->num; +} diff --git a/target/arm/helper.c b/target/arm/helper.c index 82c546f11a..2297626bfb 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -5787,6 +5787,9 @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure) uint64_t arm_hcr_el2_eff(CPUARMState *env) { + if (arm_feature(env, ARM_FEATURE_M)) { + return 0; + } return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env)); } diff --git a/target/arm/internals.h b/target/arm/internals.h index 3c7341e774..b1ef05963f 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1344,16 +1344,32 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env) } #ifdef TARGET_AARCH64 -int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg); -int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg); -int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg); -int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg); +int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); +int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg); +int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg); +int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg); +int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg); +int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg); +int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg); void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); #endif +/* Read the CONTROL register as the MRS instruction would. */ +uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); + +/* + * Return a pointer to the location where we currently store the + * stack pointer for the requested security state and thread mode. + * This pointer will become invalid if the CPU state is updated + * such that the stack pointers are switched around (eg changing + * the SPSEL control bit). + */ +uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, + bool threadmode, bool spsel); + #ifdef CONFIG_USER_ONLY static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { } #else @@ -1367,6 +1383,16 @@ int exception_target_el(CPUARMState *env); bool arm_singlestep_active(CPUARMState *env); bool arm_generate_debug_exceptions(CPUARMState *env); +/** + * pauth_ptr_mask: + * @env: cpu context + * @ptr: selects between TTBR0 and TTBR1 + * @data: selects between TBI and TBID + * + * Return a mask of the bits of @ptr that contain the authentication code. + */ +uint64_t pauth_ptr_mask(CPUARMState *env, uint64_t ptr, bool data); + /* Add the cpreg definitions for debug related system registers */ void define_debug_regs(ARMCPU *cpu); diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 8541ef56d6..ec3f51782a 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -1081,70 +1081,119 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, * check_s2_mmu_setup * @cpu: ARMCPU * @is_aa64: True if the translation regime is in AArch64 state - * @startlevel: Suggested starting level - * @inputsize: Bitsize of IPAs + * @tcr: VTCR_EL2 or VSTCR_EL2 + * @ds: Effective value of TCR.DS. + * @iasize: Bitsize of IPAs * @stride: Page-table stride (See the ARM ARM) * - * Returns true if the suggested S2 translation parameters are OK and - * false otherwise. + * Decode the starting level of the S2 lookup, returning INT_MIN if + * the configuration is invalid. */ -static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, - int inputsize, int stride, int outputsize) +static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr, + bool ds, int iasize, int stride) { - const int grainsize = stride + 3; - int startsizecheck; - - /* - * Negative levels are usually not allowed... - * Except for FEAT_LPA2, 4k page table, 52-bit address space, which - * begins with level -1. Note that previous feature tests will have - * eliminated this combination if it is not enabled. - */ - if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) { - return false; - } - - startsizecheck = inputsize - ((3 - level) * stride + grainsize); - if (startsizecheck < 1 || startsizecheck > stride + 4) { - return false; - } + int sl0, sl2, startlevel, granulebits, levels; + int s1_min_iasize, s1_max_iasize; + sl0 = extract32(tcr, 6, 2); if (is_aa64) { + /* + * AArch64.S2InvalidTxSZ: While we checked tsz_oob near the top of + * get_phys_addr_lpae, that used aa64_va_parameters which apply + * to aarch64. If Stage1 is aarch32, the min_txsz is larger. + * See AArch64.S2MinTxSZ, where min_tsz is 24, translated to + * inputsize is 64 - 24 = 40. + */ + if (iasize < 40 && !arm_el_is_aa64(&cpu->env, 1)) { + goto fail; + } + + /* + * AArch64.S2InvalidSL: Interpretation of SL depends on the page size, + * so interleave AArch64.S2StartLevel. + */ switch (stride) { - case 13: /* 64KB Pages. */ - if (level == 0 || (level == 1 && outputsize <= 42)) { - return false; + case 9: /* 4KB */ + /* SL2 is RES0 unless DS=1 & 4KB granule. */ + sl2 = extract64(tcr, 33, 1); + if (ds && sl2) { + if (sl0 != 0) { + goto fail; + } + startlevel = -1; + } else { + startlevel = 2 - sl0; + switch (sl0) { + case 2: + if (arm_pamax(cpu) < 44) { + goto fail; + } + break; + case 3: + if (!cpu_isar_feature(aa64_st, cpu)) { + goto fail; + } + startlevel = 3; + break; + } } break; - case 11: /* 16KB Pages. */ - if (level == 0 || (level == 1 && outputsize <= 40)) { - return false; + case 11: /* 16KB */ + switch (sl0) { + case 2: + if (arm_pamax(cpu) < 42) { + goto fail; + } + break; + case 3: + if (!ds) { + goto fail; + } + break; } + startlevel = 3 - sl0; break; - case 9: /* 4KB Pages. */ - if (level == 0 && outputsize <= 42) { - return false; + case 13: /* 64KB */ + switch (sl0) { + case 2: + if (arm_pamax(cpu) < 44) { + goto fail; + } + break; + case 3: + goto fail; } + startlevel = 3 - sl0; break; default: g_assert_not_reached(); } - - /* Inputsize checks. */ - if (inputsize > outputsize && - (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) { - /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ - return false; - } } else { - /* AArch32 only supports 4KB pages. Assert on that. */ + /* + * Things are simpler for AArch32 EL2, with only 4k pages. + * There is no separate S2InvalidSL function, but AArch32.S2Walk + * begins with walkparms.sl0 in {'1x'}. + */ assert(stride == 9); - - if (level == 0) { - return false; + if (sl0 >= 2) { + goto fail; } + startlevel = 2 - sl0; } - return true; + + /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */ + levels = 3 - startlevel; + granulebits = stride + 3; + + s1_min_iasize = levels * stride + granulebits + 1; + s1_max_iasize = s1_min_iasize + (stride - 1) + 4; + + if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) { + return startlevel; + } + + fail: + return INT_MIN; } /** @@ -1300,38 +1349,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, */ level = 4 - (inputsize - 4) / stride; } else { - /* - * For stage 2 translations the starting level is specified by the - * VTCR_EL2.SL0 field (whose interpretation depends on the page size) - */ - uint32_t sl0 = extract32(tcr, 6, 2); - uint32_t sl2 = extract64(tcr, 33, 1); - int32_t startlevel; - bool ok; - - /* SL2 is RES0 unless DS=1 & 4kb granule. */ - if (param.ds && stride == 9 && sl2) { - if (sl0 != 0) { - level = 0; - goto do_translation_fault; - } - startlevel = -1; - } else if (!aarch64 || stride == 9) { - /* AArch32 or 4KB pages */ - startlevel = 2 - sl0; - - if (cpu_isar_feature(aa64_st, cpu)) { - startlevel &= 3; - } - } else { - /* 16KB or 64KB pages */ - startlevel = 3 - sl0; - } - - /* Check that the starting level is valid. */ - ok = check_s2_mmu_setup(cpu, aarch64, startlevel, - inputsize, stride, outputsize); - if (!ok) { + int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds, + inputsize, stride); + if (startlevel == INT_MIN) { + level = 0; goto do_translation_fault; } level = startlevel; diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index f94e87e728..081fc3f5f7 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -56,7 +56,7 @@ static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el) return xpsr_read(env) & mask; } -static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure) +uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure) { uint32_t value = env->v7m.control[secure]; @@ -93,7 +93,7 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) case 0 ... 7: /* xPSR sub-fields */ return v7m_mrs_xpsr(env, reg, 0); case 20: /* CONTROL */ - return v7m_mrs_control(env, 0); + return arm_v7m_mrs_control(env, 0); default: /* Unprivileged reads others as zero. */ return 0; @@ -650,42 +650,6 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) arm_rebuild_hflags(env); } -static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, - bool spsel) -{ - /* - * Return a pointer to the location where we currently store the - * stack pointer for the requested security state and thread mode. - * This pointer will become invalid if the CPU state is updated - * such that the stack pointers are switched around (eg changing - * the SPSEL control bit). - * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). - * Unlike that pseudocode, we require the caller to pass us in the - * SPSEL control bit value; this is because we also use this - * function in handling of pushing of the callee-saves registers - * part of the v8M stack frame (pseudocode PushCalleeStack()), - * and in the tailchain codepath the SPSEL bit comes from the exception - * return magic LR value from the previous exception. The pseudocode - * opencodes the stack-selection in PushCalleeStack(), but we prefer - * to make this utility function generic enough to do the job. - */ - bool want_psp = threadmode && spsel; - - if (secure == env->v7m.secure) { - if (want_psp == v7m_using_psp(env)) { - return &env->regs[13]; - } else { - return &env->v7m.other_sp; - } - } else { - if (want_psp) { - return &env->v7m.other_ss_psp; - } else { - return &env->v7m.other_ss_msp; - } - } -} - static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, uint32_t *pvec) { @@ -810,8 +774,8 @@ static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, !mode; mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); - frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, - lr & R_V7M_EXCRET_SPSEL_MASK); + frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode, + lr & R_V7M_EXCRET_SPSEL_MASK); want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK); if (want_psp) { limit = env->v7m.psplim[M_REG_S]; @@ -1656,10 +1620,8 @@ static void do_v7m_exception_exit(ARMCPU *cpu) * use 'frame_sp_p' after we do something that makes it invalid. */ bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK; - uint32_t *frame_sp_p = get_v7m_sp_ptr(env, - return_to_secure, - !return_to_handler, - spsel); + uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure, + !return_to_handler, spsel); uint32_t frameptr = *frame_sp_p; bool pop_ok = true; ARMMMUIdx mmu_idx; @@ -1965,7 +1927,7 @@ static bool do_v7m_function_return(ARMCPU *cpu) threadmode = !arm_v7m_is_handler_mode(env); spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; - frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); + frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel); frameptr = *frame_sp_p; /* @@ -2465,7 +2427,7 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) case 0 ... 7: /* xPSR sub-fields */ return v7m_mrs_xpsr(env, reg, el); case 20: /* CONTROL */ - return v7m_mrs_control(env, env->v7m.secure); + return arm_v7m_mrs_control(env, env->v7m.secure); case 0x94: /* CONTROL_NS */ /* * We have to handle this here because unprivileged Secure code @@ -2900,3 +2862,39 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) } #endif /* !CONFIG_USER_ONLY */ + +uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode, + bool spsel) +{ + /* + * Return a pointer to the location where we currently store the + * stack pointer for the requested security state and thread mode. + * This pointer will become invalid if the CPU state is updated + * such that the stack pointers are switched around (eg changing + * the SPSEL control bit). + * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). + * Unlike that pseudocode, we require the caller to pass us in the + * SPSEL control bit value; this is because we also use this + * function in handling of pushing of the callee-saves registers + * part of the v8M stack frame (pseudocode PushCalleeStack()), + * and in the tailchain codepath the SPSEL bit comes from the exception + * return magic LR value from the previous exception. The pseudocode + * opencodes the stack-selection in PushCalleeStack(), but we prefer + * to make this utility function generic enough to do the job. + */ + bool want_psp = threadmode && spsel; + + if (secure == env->v7m.secure) { + if (want_psp == v7m_using_psp(env)) { + return &env->regs[13]; + } else { + return &env->v7m.other_sp; + } + } else { + if (want_psp) { + return &env->v7m.other_ss_psp; + } else { + return &env->v7m.other_ss_msp; + } + } +} diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c index d0483bf051..20f347332d 100644 --- a/target/arm/tcg/pauth_helper.c +++ b/target/arm/tcg/pauth_helper.c @@ -339,14 +339,32 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier, return pac | ext | ptr; } -static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param) +static uint64_t pauth_ptr_mask_internal(ARMVAParameters param) { - /* Note that bit 55 is used whether or not the regime has 2 ranges. */ - uint64_t extfield = sextract64(ptr, 55, 1); int bot_pac_bit = 64 - param.tsz; int top_pac_bit = 64 - 8 * param.tbi; - return deposit64(ptr, bot_pac_bit, top_pac_bit - bot_pac_bit, extfield); + return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); +} + +static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param) +{ + uint64_t mask = pauth_ptr_mask_internal(param); + + /* Note that bit 55 is used whether or not the regime has 2 ranges. */ + if (extract64(ptr, 55, 1)) { + return ptr | mask; + } else { + return ptr & ~mask; + } +} + +uint64_t pauth_ptr_mask(CPUARMState *env, uint64_t ptr, bool data) +{ + ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); + ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); + + return pauth_ptr_mask_internal(param); } static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier, diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index f092aec801..2c2ea45b47 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -224,7 +224,7 @@ static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src) TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr) { - TCGv_i64 clean = new_tmp_a64(s); + TCGv_i64 clean = tcg_temp_new_i64(); #ifdef CONFIG_USER_ONLY gen_top_byte_ignore(s, clean, addr, s->tbid); #else @@ -269,7 +269,7 @@ static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1); - ret = new_tmp_a64(s); + ret = tcg_temp_new_i64(); gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); return ret; @@ -300,7 +300,7 @@ TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1); - ret = new_tmp_a64(s); + ret = tcg_temp_new_i64(); gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); return ret; @@ -319,18 +319,13 @@ static void a64_test_cc(DisasCompare64 *c64, int cc) arm_test_cc(&c32, cc); - /* Sign-extend the 32-bit value so that the GE/LT comparisons work - * properly. The NE/EQ comparisons are also fine with this choice. */ + /* + * Sign-extend the 32-bit value so that the GE/LT comparisons work + * properly. The NE/EQ comparisons are also fine with this choice. + */ c64->cond = c32.cond; c64->value = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(c64->value, c32.value); - - arm_free_cc(&c32); -} - -static void a64_free_cc(DisasCompare64 *c64) -{ - tcg_temp_free_i64(c64->value); } static void gen_rebuild_hflags(DisasContext *s) @@ -413,36 +408,6 @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff) } } -static void init_tmp_a64_array(DisasContext *s) -{ -#ifdef CONFIG_DEBUG_TCG - memset(s->tmp_a64, 0, sizeof(s->tmp_a64)); -#endif - s->tmp_a64_count = 0; -} - -static void free_tmp_a64(DisasContext *s) -{ - int i; - for (i = 0; i < s->tmp_a64_count; i++) { - tcg_temp_free_i64(s->tmp_a64[i]); - } - init_tmp_a64_array(s); -} - -TCGv_i64 new_tmp_a64(DisasContext *s) -{ - assert(s->tmp_a64_count < TMP_A64_MAX); - return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64(); -} - -TCGv_i64 new_tmp_a64_zero(DisasContext *s) -{ - TCGv_i64 t = new_tmp_a64(s); - tcg_gen_movi_i64(t, 0); - return t; -} - /* * Register access functions * @@ -461,7 +426,9 @@ TCGv_i64 new_tmp_a64_zero(DisasContext *s) TCGv_i64 cpu_reg(DisasContext *s, int reg) { if (reg == 31) { - return new_tmp_a64_zero(s); + TCGv_i64 t = tcg_temp_new_i64(); + tcg_gen_movi_i64(t, 0); + return t; } else { return cpu_X[reg]; } @@ -479,7 +446,7 @@ TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) */ TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) { - TCGv_i64 v = new_tmp_a64(s); + TCGv_i64 v = tcg_temp_new_i64(); if (reg != 31) { if (sf) { tcg_gen_mov_i64(v, cpu_X[reg]); @@ -494,7 +461,7 @@ TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) { - TCGv_i64 v = new_tmp_a64(s); + TCGv_i64 v = tcg_temp_new_i64(); if (sf) { tcg_gen_mov_i64(v, cpu_X[reg]); } else { @@ -575,7 +542,6 @@ static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) tcg_gen_extu_i32_i64(tmp, v); write_fp_dreg(s, reg, tmp); - tcg_temp_free_i64(tmp); } /* Expand a 2-operand AdvSIMD vector operation using an expander function. */ @@ -644,7 +610,6 @@ static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn, vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), fpst, is_q ? 16 : 8, vec_full_reg_size(s), data, fn); - tcg_temp_free_ptr(fpst); } /* Expand a 3-operand + qc + operation using an out-of-line helper. */ @@ -658,7 +623,6 @@ static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn, vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), qc_ptr, is_q ? 16 : 8, vec_full_reg_size(s), 0, fn); - tcg_temp_free_ptr(qc_ptr); } /* Expand a 4-operand operation using an out-of-line helper. */ @@ -686,7 +650,6 @@ static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, vec_full_reg_offset(s, rm), vec_full_reg_offset(s, ra), fpst, is_q ? 16 : 8, vec_full_reg_size(s), data, fn); - tcg_temp_free_ptr(fpst); } /* Set ZF and NF based on a 64 bit result. This is alas fiddlier @@ -730,12 +693,9 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_xor_i64(flag, result, t0); tcg_gen_xor_i64(tmp, t0, t1); tcg_gen_andc_i64(flag, flag, tmp); - tcg_temp_free_i64(tmp); tcg_gen_extrh_i64_i32(cpu_VF, flag); tcg_gen_mov_i64(dest, result); - tcg_temp_free_i64(result); - tcg_temp_free_i64(flag); } else { /* 32 bit arithmetic */ TCGv_i32 t0_32 = tcg_temp_new_i32(); @@ -751,10 +711,6 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_xor_i32(tmp, t0_32, t1_32); tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); tcg_gen_extu_i32_i64(dest, cpu_NF); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(t0_32); - tcg_temp_free_i32(t1_32); } } @@ -778,11 +734,8 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tmp = tcg_temp_new_i64(); tcg_gen_xor_i64(tmp, t0, t1); tcg_gen_and_i64(flag, flag, tmp); - tcg_temp_free_i64(tmp); tcg_gen_extrh_i64_i32(cpu_VF, flag); tcg_gen_mov_i64(dest, result); - tcg_temp_free_i64(flag); - tcg_temp_free_i64(result); } else { /* 32 bit arithmetic */ TCGv_i32 t0_32 = tcg_temp_new_i32(); @@ -797,10 +750,7 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, t0_32, t1_32); - tcg_temp_free_i32(t0_32); - tcg_temp_free_i32(t1_32); tcg_gen_and_i32(cpu_VF, cpu_VF, tmp); - tcg_temp_free_i32(tmp); tcg_gen_extu_i32_i64(dest, cpu_NF); } } @@ -812,7 +762,6 @@ static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_extu_i32_i64(flag, cpu_CF); tcg_gen_add_i64(dest, t0, t1); tcg_gen_add_i64(dest, dest, flag); - tcg_temp_free_i64(flag); if (!sf) { tcg_gen_ext32u_i64(dest, dest); @@ -841,11 +790,6 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_extrh_i64_i32(cpu_VF, vf_64); tcg_gen_mov_i64(dest, result); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(vf_64); - tcg_temp_free_i64(cf_64); - tcg_temp_free_i64(result); } else { TCGv_i32 t0_32 = tcg_temp_new_i32(); TCGv_i32 t1_32 = tcg_temp_new_i32(); @@ -862,10 +806,6 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_xor_i32(tmp, t0_32, t1_32); tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); tcg_gen_extu_i32_i64(dest, cpu_NF); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(t1_32); - tcg_temp_free_i32(t0_32); } } @@ -975,12 +915,7 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr, get_mem_index(s), mop); - - tcg_temp_free_i64(tcg_hiaddr); - tcg_temp_free_i64(tmphi); } - - tcg_temp_free_i64(tmplo); } /* @@ -1009,15 +944,12 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr, get_mem_index(s), mop); - tcg_temp_free_i64(tcg_hiaddr); } tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64)); - tcg_temp_free_i64(tmplo); if (tmphi) { tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx)); - tcg_temp_free_i64(tmphi); } clear_vec_high(s, tmphi != NULL, destidx); } @@ -1143,8 +1075,6 @@ static void do_vec_st(DisasContext *s, int srcidx, int element, read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE); tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); - - tcg_temp_free_i64(tcg_tmp); } /* Load from memory to vector register */ @@ -1155,8 +1085,6 @@ static void do_vec_ld(DisasContext *s, int destidx, int element, tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE); - - tcg_temp_free_i64(tcg_tmp); } /* Check that FP/Neon access is enabled. If it is, return @@ -1440,7 +1368,6 @@ static void disas_test_b_imm(DisasContext *s, uint32_t insn) match = gen_disas_label(s); tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, match.label); - tcg_temp_free_i64(tcg_cmp); gen_goto_tb(s, 0, 4); set_disas_label(s, match); gen_goto_tb(s, 1, diff); @@ -1560,7 +1487,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, case 0b11000: /* PACIAZ */ if (s->pauth_active) { gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } break; case 0b11001: /* PACIASP */ @@ -1571,7 +1498,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, case 0b11010: /* PACIBZ */ if (s->pauth_active) { gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } break; case 0b11011: /* PACIBSP */ @@ -1582,7 +1509,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, case 0b11100: /* AUTIAZ */ if (s->pauth_active) { gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } break; case 0b11101: /* AUTIASP */ @@ -1593,7 +1520,7 @@ static void handle_hint(DisasContext *s, uint32_t insn, case 0b11110: /* AUTIBZ */ if (s->pauth_active) { gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } break; case 0b11111: /* AUTIBSP */ @@ -1696,8 +1623,6 @@ static void gen_xaflag(void) /* C | Z */ tcg_gen_or_i32(cpu_CF, cpu_CF, z); - - tcg_temp_free_i32(z); } static void gen_axflag(void) @@ -1873,9 +1798,6 @@ static void gen_get_nzcv(TCGv_i64 tcg_rt) tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1); /* generate result */ tcg_gen_extu_i32_i64(tcg_rt, nzcv); - - tcg_temp_free_i32(nzcv); - tcg_temp_free_i32(tmp); } static void gen_set_nzcv(TCGv_i64 tcg_rt) @@ -1896,7 +1818,6 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt) /* bit 28, V */ tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28)); tcg_gen_shli_i32(cpu_VF, cpu_VF, 3); - tcg_temp_free_i32(nzcv); } static void gen_sysreg_undef(DisasContext *s, bool isread, @@ -1982,7 +1903,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, case 0: break; case ARM_CP_NOP: - goto exit; + return; case ARM_CP_NZCV: tcg_rt = cpu_reg(s, rt); if (isread) { @@ -1990,14 +1911,14 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, } else { gen_set_nzcv(tcg_rt); } - goto exit; + return; case ARM_CP_CURRENTEL: /* Reads as current EL value from pstate, which is * guaranteed to be constant by the tb flags. */ tcg_rt = cpu_reg(s, rt); tcg_gen_movi_i64(tcg_rt, s->current_el << 2); - goto exit; + return; case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ if (s->mte_active[0]) { @@ -2007,14 +1928,14 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); - tcg_rt = new_tmp_a64(s); + tcg_rt = tcg_temp_new_i64(); gen_helper_mte_check_zva(tcg_rt, cpu_env, tcg_constant_i32(desc), cpu_reg(s, rt)); } else { tcg_rt = clean_data_tbi(s, cpu_reg(s, rt)); } gen_helper_dc_zva(cpu_env, tcg_rt); - goto exit; + return; case ARM_CP_DC_GVA: { TCGv_i64 clean_addr, tag; @@ -2032,10 +1953,9 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, tag = tcg_temp_new_i64(); tcg_gen_shri_i64(tag, tcg_rt, 56); gen_helper_stzgm_tags(cpu_env, clean_addr, tag); - tcg_temp_free_i64(tag); } } - goto exit; + return; case ARM_CP_DC_GZVA: { TCGv_i64 clean_addr, tag; @@ -2050,19 +1970,18 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, tag = tcg_temp_new_i64(); tcg_gen_shri_i64(tag, tcg_rt, 56); gen_helper_stzgm_tags(cpu_env, clean_addr, tag); - tcg_temp_free_i64(tag); } } - goto exit; + return; default: g_assert_not_reached(); } if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { - goto exit; + return; } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { - goto exit; + return; } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { - goto exit; + return; } if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { @@ -2085,7 +2004,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, } else { if (ri->type & ARM_CP_CONST) { /* If not forbidden by access permissions, treat as WI */ - goto exit; + return; } else if (ri->writefn) { if (!tcg_ri) { tcg_ri = gen_lookup_cp_reg(key); @@ -2113,11 +2032,6 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, */ s->base.is_jmp = DISAS_UPDATE_EXIT; } - - exit: - if (tcg_ri) { - tcg_temp_free_ptr(tcg_ri); - } } /* System @@ -2313,10 +2227,10 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) if (op4 != 0x1f) { goto do_unallocated; } - modifier = new_tmp_a64_zero(s); + modifier = tcg_constant_i64(0); } if (s->pauth_active) { - dst = new_tmp_a64(s); + dst = tcg_temp_new_i64(); if (op3 == 2) { gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier); } else { @@ -2334,7 +2248,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) if (opc == 1) { TCGv_i64 lr = cpu_reg(s, 30); if (dst == lr) { - TCGv_i64 tmp = new_tmp_a64(s); + TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_mov_i64(tmp, dst); dst = tmp; } @@ -2353,7 +2267,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) } btype_mod = opc & 1; if (s->pauth_active) { - dst = new_tmp_a64(s); + dst = tcg_temp_new_i64(); modifier = cpu_reg_sp(s, op4); if (op3 == 2) { gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier); @@ -2367,7 +2281,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) if (opc == 9) { TCGv_i64 lr = cpu_reg(s, 30); if (dst == lr) { - TCGv_i64 tmp = new_tmp_a64(s); + TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_mov_i64(tmp, dst); dst = tmp; } @@ -2428,7 +2342,6 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) } gen_helper_exception_return(cpu_env, dst); - tcg_temp_free_i64(dst); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; return; @@ -2548,7 +2461,6 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv_i64 addr2 = tcg_temp_new_i64(); tcg_gen_addi_i64(addr2, addr, 8); tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop); - tcg_temp_free_i64(addr2); tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high); @@ -2613,7 +2525,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16, get_mem_index(s), MO_128 | MO_ALIGN | s->be_data); - tcg_temp_free_i128(c16); a = tcg_temp_new_i64(); b = tcg_temp_new_i64(); @@ -2626,9 +2537,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, tcg_gen_xor_i64(a, a, cpu_exclusive_val); tcg_gen_xor_i64(b, b, cpu_exclusive_high); tcg_gen_or_i64(tmp, a, b); - tcg_temp_free_i64(a); - tcg_temp_free_i64(b); - tcg_temp_free_i128(t16); tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0); } @@ -2639,7 +2547,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); } tcg_gen_mov_i64(cpu_reg(s, rd), tmp); - tcg_temp_free_i64(tmp); tcg_gen_br(done_label); gen_set_label(fail_label); @@ -2695,14 +2602,12 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx, MO_64 | MO_ALIGN | s->be_data); - tcg_temp_free_i64(val); if (s->be_data == MO_LE) { tcg_gen_extr32_i64(s1, s2, cmp); } else { tcg_gen_extr32_i64(s2, s1, cmp); } - tcg_temp_free_i64(cmp); } else { TCGv_i128 cmp = tcg_temp_new_i128(); TCGv_i128 val = tcg_temp_new_i128(); @@ -2717,14 +2622,12 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx, MO_128 | MO_ALIGN | s->be_data); - tcg_temp_free_i128(val); if (s->be_data == MO_LE) { tcg_gen_extr_i128_i64(s1, s2, cmp); } else { tcg_gen_extr_i128_i64(s2, s1, cmp); } - tcg_temp_free_i128(cmp); } } @@ -2935,7 +2838,7 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn) tcg_rt = cpu_reg(s, rt); - clean_addr = new_tmp_a64(s); + clean_addr = tcg_temp_new_i64(); gen_pc_plus_diff(s, clean_addr, imm); if (is_vector) { do_fp_ld(s, rt, clean_addr, size); @@ -3108,7 +3011,6 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn) false, false, 0, false, false); tcg_gen_mov_i64(tcg_rt, tmp); - tcg_temp_free_i64(tmp); } else { do_gpr_st(s, tcg_rt, clean_addr, size, false, 0, false, false); @@ -3578,10 +3480,10 @@ static void disas_ldst_pac(DisasContext *s, uint32_t insn, if (s->pauth_active) { if (use_key_a) { gen_helper_autda(dirty_addr, cpu_env, dirty_addr, - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } else { gen_helper_autdb(dirty_addr, cpu_env, dirty_addr, - new_tmp_a64_zero(s)); + tcg_constant_i64(0)); } } @@ -4008,7 +3910,6 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt), (is_q + 1) * 8, vec_full_reg_size(s), tcg_tmp); - tcg_temp_free_i64(tcg_tmp); } else { /* Load/store one element per register */ if (is_load) { @@ -4350,8 +4251,6 @@ static void disas_add_sub_imm(DisasContext *s, uint32_t insn) } else { tcg_gen_ext32u_i64(tcg_rd, tcg_result); } - - tcg_temp_free_i64(tcg_result); } /* @@ -4739,10 +4638,8 @@ static void disas_extract(DisasContext *s, uint32_t insn) TCGv_i32 t1 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t1, tcg_rn); tcg_gen_extract2_i32(t0, t0, t1, imm); - tcg_temp_free_i32(t1); } tcg_gen_extu_i32_i64(tcg_rd, t0); - tcg_temp_free_i32(t0); } } } @@ -4811,8 +4708,6 @@ static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf, tcg_gen_extrl_i64_i32(t1, shift_amount); tcg_gen_rotr_i32(t0, t0, t1); tcg_gen_extu_i32_i64(dst, t0); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); } break; default: @@ -5001,8 +4896,6 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) } else { tcg_gen_ext32u_i64(tcg_rd, tcg_result); } - - tcg_temp_free_i64(tcg_result); } /* @@ -5065,8 +4958,6 @@ static void disas_add_sub_reg(DisasContext *s, uint32_t insn) } else { tcg_gen_ext32u_i64(tcg_rd, tcg_result); } - - tcg_temp_free_i64(tcg_result); } /* Data-processing (3 source) @@ -5124,8 +5015,6 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) } else { tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); } - - tcg_temp_free_i64(low_bits); return; } @@ -5161,10 +5050,6 @@ static void disas_data_proc_3src(DisasContext *s, uint32_t insn) if (!sf) { tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd)); } - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_tmp); } /* Add/subtract (with carry) @@ -5190,7 +5075,7 @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn) tcg_rn = cpu_reg(s, rn); if (op) { - tcg_y = new_tmp_a64(s); + tcg_y = tcg_temp_new_i64(); tcg_gen_not_i64(tcg_y, cpu_reg(s, rm)); } else { tcg_y = cpu_reg(s, rm); @@ -5244,8 +5129,6 @@ static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn) if (mask & 1) { /* V */ tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0); } - - tcg_temp_free_i32(nzcv); } /* @@ -5278,7 +5161,6 @@ static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn) tcg_gen_shli_i32(cpu_VF, tmp, shift - 1); tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF); - tcg_temp_free_i32(tmp); } /* Conditional compare (immediate / register) @@ -5315,11 +5197,10 @@ static void disas_cc(DisasContext *s, uint32_t insn) tcg_t0 = tcg_temp_new_i32(); arm_test_cc(&c, cond); tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0); - arm_free_cc(&c); /* Load the arguments for the new comparison. */ if (is_imm) { - tcg_y = new_tmp_a64(s); + tcg_y = tcg_temp_new_i64(); tcg_gen_movi_i64(tcg_y, y); } else { tcg_y = cpu_reg(s, y); @@ -5333,7 +5214,6 @@ static void disas_cc(DisasContext *s, uint32_t insn) } else { gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y); } - tcg_temp_free_i64(tcg_tmp); /* If COND was false, force the flags to #nzcv. Compute two masks * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0). @@ -5381,9 +5261,6 @@ static void disas_cc(DisasContext *s, uint32_t insn) tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2); } } - tcg_temp_free_i32(tcg_t0); - tcg_temp_free_i32(tcg_t1); - tcg_temp_free_i32(tcg_t2); } /* Conditional select @@ -5435,8 +5312,6 @@ static void disas_cond_select(DisasContext *s, uint32_t insn) tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false); } - a64_free_cc(&c); - if (!sf) { tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } @@ -5456,7 +5331,6 @@ static void handle_clz(DisasContext *s, unsigned int sf, tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - tcg_temp_free_i32(tcg_tmp32); } } @@ -5474,7 +5348,6 @@ static void handle_cls(DisasContext *s, unsigned int sf, tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - tcg_temp_free_i32(tcg_tmp32); } } @@ -5492,7 +5365,6 @@ static void handle_rbit(DisasContext *s, unsigned int sf, tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); gen_helper_rbit(tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); - tcg_temp_free_i32(tcg_tmp32); } } @@ -5538,8 +5410,6 @@ static void handle_rev16(DisasContext *s, unsigned int sf, tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask); tcg_gen_shli_i64(tcg_rd, tcg_rd, 8); tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp); - - tcg_temp_free_i64(tcg_tmp); } /* Data-processing (1 source) @@ -5659,7 +5529,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x09): /* PACIZB */ @@ -5667,7 +5537,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0a): /* PACDZA */ @@ -5675,7 +5545,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0b): /* PACDZB */ @@ -5683,7 +5553,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0c): /* AUTIZA */ @@ -5691,7 +5561,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_autia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0d): /* AUTIZB */ @@ -5699,7 +5569,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_autib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0e): /* AUTDZA */ @@ -5707,7 +5577,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_autda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x0f): /* AUTDZB */ @@ -5715,7 +5585,7 @@ static void disas_data_proc_1src(DisasContext *s, uint32_t insn) goto do_unallocated; } else if (s->pauth_active) { tcg_rd = cpu_reg(s, rd); - gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); + gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0)); } break; case MAP(1, 0x01, 0x10): /* XPACI */ @@ -5750,8 +5620,8 @@ static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, tcg_rd = cpu_reg(s, rd); if (!sf && is_signed) { - tcg_n = new_tmp_a64(s); - tcg_m = new_tmp_a64(s); + tcg_n = tcg_temp_new_i64(); + tcg_m = tcg_temp_new_i64(); tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn)); tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm)); } else { @@ -5781,7 +5651,6 @@ static void handle_shift_reg(DisasContext *s, tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31); shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift); - tcg_temp_free_i64(tcg_shift); } /* CRC32[BHWX], CRC32C[BHWX] */ @@ -5816,7 +5685,7 @@ static void handle_crc32(DisasContext *s, default: g_assert_not_reached(); } - tcg_val = new_tmp_a64(s); + tcg_val = tcg_temp_new_i64(); tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask); } @@ -5898,8 +5767,6 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn) tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4); tcg_gen_shl_i64(t, tcg_constant_i64(1), t); tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t); - - tcg_temp_free_i64(t); } break; case 8: /* LSLV */ @@ -6043,8 +5910,6 @@ static void handle_fp_compare(DisasContext *s, int size, } else { gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst); } - tcg_temp_free_i64(tcg_vn); - tcg_temp_free_i64(tcg_vm); } else { TCGv_i32 tcg_vn = tcg_temp_new_i32(); TCGv_i32 tcg_vm = tcg_temp_new_i32(); @@ -6074,16 +5939,9 @@ static void handle_fp_compare(DisasContext *s, int size, default: g_assert_not_reached(); } - - tcg_temp_free_i32(tcg_vn); - tcg_temp_free_i32(tcg_vm); } - tcg_temp_free_ptr(fpst); - gen_set_nzcv(tcg_flags); - - tcg_temp_free_i64(tcg_flags); } /* Floating point compare @@ -6255,13 +6113,10 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) a64_test_cc(&c, cond); tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0), t_true, t_false); - tcg_temp_free_i64(t_false); - a64_free_cc(&c); /* Note that sregs & hregs write back zeros to the high bits, and we've already done the zero-extension. */ write_fp_dreg(s, rd, t_true); - tcg_temp_free_i64(t_true); } /* Floating-point data-processing (1 source) - half precision */ @@ -6298,7 +6153,6 @@ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); break; } case 0xe: /* FRINTX */ @@ -6314,12 +6168,6 @@ static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) } write_fp_sreg(s, rd, tcg_res); - - if (fpst) { - tcg_temp_free_ptr(fpst); - } - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); } /* Floating-point data-processing (1 source) - single precision */ @@ -6387,16 +6235,12 @@ static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); gen_fpst(tcg_res, tcg_op, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); } else { gen_fpst(tcg_res, tcg_op, fpst); } - tcg_temp_free_ptr(fpst); done: write_fp_sreg(s, rd, tcg_res); - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); } /* Floating-point data-processing (1 source) - double precision */ @@ -6464,16 +6308,12 @@ static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); gen_fpst(tcg_res, tcg_op, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); } else { gen_fpst(tcg_res, tcg_op, fpst); } - tcg_temp_free_ptr(fpst); done: write_fp_dreg(s, rd, tcg_res); - tcg_temp_free_i64(tcg_op); - tcg_temp_free_i64(tcg_res); } static void handle_fp_fcvt(DisasContext *s, int opcode, @@ -6488,7 +6328,6 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, TCGv_i64 tcg_rd = tcg_temp_new_i64(); gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env); write_fp_dreg(s, rd, tcg_rd); - tcg_temp_free_i64(tcg_rd); } else { /* Single to half */ TCGv_i32 tcg_rd = tcg_temp_new_i32(); @@ -6498,11 +6337,7 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp); /* write_fp_sreg is OK here because top half of tcg_rd is zero */ write_fp_sreg(s, rd, tcg_rd); - tcg_temp_free_i32(tcg_rd); - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); } - tcg_temp_free_i32(tcg_rn); break; } case 0x1: @@ -6518,12 +6353,8 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, /* Double to half */ gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp); /* write_fp_sreg is OK here because top half of tcg_rd is zero */ - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(ahp); } write_fp_sreg(s, rd, tcg_rd); - tcg_temp_free_i32(tcg_rd); - tcg_temp_free_i64(tcg_rn); break; } case 0x3: @@ -6537,17 +6368,12 @@ static void handle_fp_fcvt(DisasContext *s, int opcode, TCGv_i32 tcg_rd = tcg_temp_new_i32(); gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); write_fp_sreg(s, rd, tcg_rd); - tcg_temp_free_i32(tcg_rd); } else { /* Half to double */ TCGv_i64 tcg_rd = tcg_temp_new_i64(); gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); write_fp_dreg(s, rd, tcg_rd); - tcg_temp_free_i64(tcg_rd); } - tcg_temp_free_i32(tcg_rn); - tcg_temp_free_ptr(tcg_fpst); - tcg_temp_free_i32(tcg_ahp); break; } default: @@ -6695,11 +6521,6 @@ static void handle_fp_2src_single(DisasContext *s, int opcode, } write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_res); } /* Floating-point data-processing (2 source) - double precision */ @@ -6748,11 +6569,6 @@ static void handle_fp_2src_double(DisasContext *s, int opcode, } write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_res); } /* Floating-point data-processing (2 source) - half precision */ @@ -6803,11 +6619,6 @@ static void handle_fp_2src_half(DisasContext *s, int opcode, } write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_res); } /* Floating point data-processing (2 source) @@ -6888,12 +6699,6 @@ static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1, gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_op3); - tcg_temp_free_i32(tcg_res); } /* Floating-point data-processing (3 source) - double precision */ @@ -6926,12 +6731,6 @@ static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1, gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_op3); - tcg_temp_free_i64(tcg_res); } /* Floating-point data-processing (3 source) - half precision */ @@ -6964,12 +6763,6 @@ static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1, gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_op3); - tcg_temp_free_i32(tcg_res); } /* Floating point data-processing (3 source) @@ -7089,7 +6882,7 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, if (itof) { TCGv_i64 tcg_int = cpu_reg(s, rn); if (!sf) { - TCGv_i64 tcg_extend = new_tmp_a64(s); + TCGv_i64 tcg_extend = tcg_temp_new_i64(); if (is_signed) { tcg_gen_ext32s_i64(tcg_extend, tcg_int); @@ -7111,7 +6904,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } write_fp_dreg(s, rd, tcg_double); - tcg_temp_free_i64(tcg_double); break; case 0: /* float32 */ @@ -7124,7 +6916,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } write_fp_sreg(s, rd, tcg_single); - tcg_temp_free_i32(tcg_single); break; case 3: /* float16 */ @@ -7137,7 +6928,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } write_fp_sreg(s, rd, tcg_single); - tcg_temp_free_i32(tcg_single); break; default: @@ -7181,7 +6971,6 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, if (!sf) { tcg_gen_ext32u_i64(tcg_int, tcg_int); } - tcg_temp_free_i64(tcg_double); break; case 0: /* float32 */ @@ -7204,9 +6993,7 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } tcg_gen_extu_i32_i64(tcg_int, tcg_dest); - tcg_temp_free_i32(tcg_dest); } - tcg_temp_free_i32(tcg_single); break; case 3: /* float16 */ @@ -7229,9 +7016,7 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, tcg_shift, tcg_fpstatus); } tcg_gen_extu_i32_i64(tcg_int, tcg_dest); - tcg_temp_free_i32(tcg_dest); } - tcg_temp_free_i32(tcg_single); break; default: @@ -7239,10 +7024,7 @@ static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, } gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); } - - tcg_temp_free_ptr(tcg_fpstatus); } /* Floating point <-> fixed point conversions @@ -7319,7 +7101,6 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) tmp = tcg_temp_new_i64(); tcg_gen_ext32u_i64(tmp, tcg_rn); write_fp_dreg(s, rd, tmp); - tcg_temp_free_i64(tmp); break; case 1: /* 64 bit */ @@ -7335,7 +7116,6 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) tmp = tcg_temp_new_i64(); tcg_gen_ext16u_i64(tmp, tcg_rn); write_fp_dreg(s, rd, tmp); - tcg_temp_free_i64(tmp); break; default: g_assert_not_reached(); @@ -7373,15 +7153,11 @@ static void handle_fjcvtzs(DisasContext *s, int rd, int rn) gen_helper_fjcvtzs(t, t, fpstatus); - tcg_temp_free_ptr(fpstatus); - tcg_gen_ext32u_i64(cpu_reg(s, rd), t); tcg_gen_extrh_i64_i32(cpu_ZF, t); tcg_gen_movi_i32(cpu_CF, 0); tcg_gen_movi_i32(cpu_NF, 0); tcg_gen_movi_i32(cpu_VF, 0); - - tcg_temp_free_i64(t); } /* Floating point <-> integer conversions @@ -7546,8 +7322,6 @@ static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right, tcg_gen_shri_i64(tcg_right, tcg_right, pos); tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos); tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp); - - tcg_temp_free_i64(tcg_tmp); } /* EXT @@ -7612,16 +7386,13 @@ static void disas_simd_ext(DisasContext *s, uint32_t insn) tcg_hh = tcg_temp_new_i64(); read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64); do_ext64(s, tcg_hh, tcg_resh, pos); - tcg_temp_free_i64(tcg_hh); } } write_vec_element(s, tcg_resl, rd, 0, MO_64); - tcg_temp_free_i64(tcg_resl); if (is_q) { write_vec_element(s, tcg_resh, rd, 1, MO_64); } - tcg_temp_free_i64(tcg_resh); clear_vec_high(s, is_q, rd); } @@ -7738,14 +7509,9 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) } } - tcg_temp_free_i64(tcg_res); - write_vec_element(s, tcg_resl, rd, 0, MO_64); - tcg_temp_free_i64(tcg_resl); - if (is_q) { write_vec_element(s, tcg_resh, rd, 1, MO_64); - tcg_temp_free_i64(tcg_resh); } clear_vec_high(s, is_q, rd); } @@ -7815,9 +7581,6 @@ static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, default: g_assert_not_reached(); } - - tcg_temp_free_i32(tcg_hi); - tcg_temp_free_i32(tcg_lo); return tcg_res; } } @@ -7945,12 +7708,8 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize, (is_q ? 128 : 64), vmap, fpst); tcg_gen_extu_i32_i64(tcg_res, tcg_res32); - tcg_temp_free_i32(tcg_res32); - tcg_temp_free_ptr(fpst); } - tcg_temp_free_i64(tcg_elt); - /* Now truncate the result to the width required for the final output */ if (opcode == 0x03) { /* SADDLV, UADDLV: result is 2*esize */ @@ -7974,7 +7733,6 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) } write_fp_dreg(s, rd, tcg_res); - tcg_temp_free_i64(tcg_res); } /* DUP (Element, Vector) @@ -8037,7 +7795,6 @@ static void handle_simd_dupes(DisasContext *s, int rd, int rn, tmp = tcg_temp_new_i64(); read_vec_element(s, tmp, rn, index, size); write_fp_dreg(s, rd, tmp); - tcg_temp_free_i64(tmp); } /* DUP (General) @@ -8105,8 +7862,6 @@ static void handle_simd_inse(DisasContext *s, int rd, int rn, read_vec_element(s, tmp, rn, src_index, size); write_vec_element(s, tmp, rd, dst_index, size); - tcg_temp_free_i64(tmp); - /* INS is considered a 128-bit write for SVE. */ clear_vec_high(s, true, rd); } @@ -8417,10 +8172,6 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) } write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_res); } else { TCGv_i32 tcg_op1 = tcg_temp_new_i32(); TCGv_i32 tcg_op2 = tcg_temp_new_i32(); @@ -8472,14 +8223,6 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) } write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_res); - } - - if (fpst) { - tcg_temp_free_ptr(fpst); } } @@ -8564,10 +8307,6 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, } else { tcg_gen_mov_i64(tcg_res, tcg_src); } - - if (extended_result) { - tcg_temp_free_i64(tcg_src_hi); - } } /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */ @@ -8633,9 +8372,6 @@ static void handle_scalar_simd_shri(DisasContext *s, } write_fp_dreg(s, rd, tcg_rd); - - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rd); } /* SHL/SLI - Scalar shift left */ @@ -8668,9 +8404,6 @@ static void handle_scalar_simd_shli(DisasContext *s, bool insert, } write_fp_dreg(s, rd, tcg_rd); - - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rd); } /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with @@ -8752,12 +8485,6 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, } else { write_vec_element(s, tcg_final, rd, 1, MO_64); } - - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i32(tcg_rd_narrowed); - tcg_temp_free_i64(tcg_final); - clear_vec_high(s, is_q, rd); } @@ -8818,8 +8545,6 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, read_vec_element(s, tcg_op, rn, pass, MO_64); genfn(tcg_op, cpu_env, tcg_op, tcg_shift); write_vec_element(s, tcg_op, rd, pass, MO_64); - - tcg_temp_free_i64(tcg_op); } clear_vec_high(s, is_q, rd); } else { @@ -8865,8 +8590,6 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, } else { write_vec_element_i32(s, tcg_op, rd, pass, MO_32); } - - tcg_temp_free_i32(tcg_op); } if (!scalar) { @@ -8910,10 +8633,6 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, write_vec_element(s, tcg_double, rd, pass, MO_64); } } - - tcg_temp_free_i64(tcg_int64); - tcg_temp_free_i64(tcg_double); - } else { TCGv_i32 tcg_int32 = tcg_temp_new_i32(); TCGv_i32 tcg_float = tcg_temp_new_i32(); @@ -8966,13 +8685,8 @@ static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, write_vec_element_i32(s, tcg_float, rd, pass, size); } } - - tcg_temp_free_i32(tcg_int32); - tcg_temp_free_i32(tcg_float); } - tcg_temp_free_ptr(tcg_fpst); - clear_vec_high(s, elements << size == 16, rd); } @@ -9076,7 +8790,6 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); } write_vec_element(s, tcg_op, rd, pass, MO_64); - tcg_temp_free_i64(tcg_op); } clear_vec_high(s, is_q, rd); } else { @@ -9112,7 +8825,6 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, } else { write_vec_element_i32(s, tcg_op, rd, pass, size); } - tcg_temp_free_i32(tcg_op); } if (!is_scalar) { clear_vec_high(s, is_q, rd); @@ -9120,8 +8832,6 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, } gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_ptr(tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); } /* AdvSIMD scalar shift by immediate @@ -9264,10 +8974,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) } write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_res); } else { TCGv_i32 tcg_op1 = read_fp_hreg(s, rn); TCGv_i32 tcg_op2 = read_fp_hreg(s, rm); @@ -9288,7 +8994,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) read_vec_element(s, tcg_op3, rd, 0, MO_32); gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_op3); - tcg_temp_free_i64(tcg_op3); break; } default: @@ -9297,10 +9002,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) tcg_gen_ext32u_i64(tcg_res, tcg_res); write_fp_dreg(s, rd, tcg_res); - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i64(tcg_res); } } @@ -9475,10 +9176,6 @@ static void handle_3same_float(DisasContext *s, int size, int elements, } write_vec_element(s, tcg_res, rd, pass, MO_64); - - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } else { /* Single */ TCGv_i32 tcg_op1 = tcg_temp_new_i32(); @@ -9560,19 +9257,12 @@ static void handle_3same_float(DisasContext *s, int size, int elements, tcg_gen_extu_i32_i64(tcg_tmp, tcg_res); write_vec_element(s, tcg_tmp, rd, pass, MO_64); - tcg_temp_free_i64(tcg_tmp); } else { write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); } } - tcg_temp_free_ptr(fpst); - clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd); } @@ -9658,8 +9348,6 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) TCGv_i64 tcg_rm = read_fp_dreg(s, rm); handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm); - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rm); } else { /* Do a single operation on the lowest element in the vector. * We use the standard Neon helpers and rely on 0 OP 0 == 0 with @@ -9732,14 +9420,9 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm); tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32); - tcg_temp_free_i32(tcg_rd32); - tcg_temp_free_i32(tcg_rn); - tcg_temp_free_i32(tcg_rm); } write_fp_dreg(s, rd, tcg_rd); - - tcg_temp_free_i64(tcg_rd); } /* AdvSIMD scalar three same FP16 @@ -9829,12 +9512,6 @@ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s, } write_fp_sreg(s, rd, tcg_res); - - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_ptr(fpst); } /* AdvSIMD scalar three same extra @@ -9909,15 +9586,10 @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, default: g_assert_not_reached(); } - tcg_temp_free_i32(ele1); - tcg_temp_free_i32(ele2); res = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(res, ele3); - tcg_temp_free_i32(ele3); - write_fp_dreg(s, rd, res); - tcg_temp_free_i64(res); } static void handle_2misc_64(DisasContext *s, int opcode, bool u, @@ -10073,8 +9745,6 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, } write_vec_element(s, tcg_res, rd, pass, MO_64); } - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op); clear_vec_high(s, !is_scalar, rd); } else { @@ -10147,14 +9817,11 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, write_vec_element_i32(s, tcg_res, rd, pass, size); } } - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); + if (!is_scalar) { clear_vec_high(s, is_q, rd); } } - - tcg_temp_free_ptr(fpst); } static void handle_2misc_reciprocal(DisasContext *s, int opcode, @@ -10186,8 +9853,6 @@ static void handle_2misc_reciprocal(DisasContext *s, int opcode, } write_vec_element(s, tcg_res, rd, pass, MO_64); } - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_op = tcg_temp_new_i32(); @@ -10226,13 +9891,10 @@ static void handle_2misc_reciprocal(DisasContext *s, int opcode, write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } } - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); if (!is_scalar) { clear_vec_high(s, is_q, rd); } } - tcg_temp_free_ptr(fpst); } static void handle_2misc_narrow(DisasContext *s, bool scalar, @@ -10310,17 +9972,12 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp); gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp); tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16); - tcg_temp_free_i32(tcg_lo); - tcg_temp_free_i32(tcg_hi); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(ahp); } break; case 0x36: /* BFCVTN, BFCVTN2 */ { TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst); - tcg_temp_free_ptr(fpst); } break; case 0x56: /* FCVTXN, FCVTXN2 */ @@ -10339,13 +9996,10 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, } else if (genenvfn) { genenvfn(tcg_res[pass], cpu_env, tcg_op); } - - tcg_temp_free_i64(tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32); - tcg_temp_free_i32(tcg_res[pass]); } clear_vec_high(s, is_q, rd); } @@ -10372,8 +10026,6 @@ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, } write_vec_element(s, tcg_rd, rd, pass, MO_64); } - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i64(tcg_rn); clear_vec_high(s, !is_scalar, rd); } else { TCGv_i32 tcg_rn = tcg_temp_new_i32(); @@ -10430,8 +10082,6 @@ static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, } write_vec_element_i32(s, tcg_rd, rd, pass, MO_32); } - tcg_temp_free_i32(tcg_rd); - tcg_temp_free_i32(tcg_rn); clear_vec_high(s, is_q, rd); } } @@ -10583,8 +10233,6 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus); write_fp_dreg(s, rd, tcg_rd); - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i64(tcg_rn); } else { TCGv_i32 tcg_rn = tcg_temp_new_i32(); TCGv_i32 tcg_rd = tcg_temp_new_i32(); @@ -10625,14 +10273,10 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) } write_fp_sreg(s, rd, tcg_rd); - tcg_temp_free_i32(tcg_rd); - tcg_temp_free_i32(tcg_rn); } if (is_fcvt) { gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); - tcg_temp_free_ptr(tcg_fpstatus); } } @@ -10734,8 +10378,8 @@ static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u, int dsize = 64; int esize = 8 << size; int elements = dsize/esize; - TCGv_i64 tcg_rn = new_tmp_a64(s); - TCGv_i64 tcg_rd = new_tmp_a64(s); + TCGv_i64 tcg_rn = tcg_temp_new_i64(); + TCGv_i64 tcg_rd = tcg_temp_new_i64(); int i; if (size >= 3) { @@ -10809,9 +10453,6 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q, } else { write_vec_element(s, tcg_final, rd, 1, MO_64); } - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i64(tcg_final); clear_vec_high(s, is_q, rd); } @@ -10982,8 +10623,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE, tcg_passres, tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2); - tcg_temp_free_i64(tcg_tmp1); - tcg_temp_free_i64(tcg_tmp2); break; } case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ @@ -11014,13 +10653,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, } else if (accop < 0) { tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres); } - - if (accop != 0) { - tcg_temp_free_i64(tcg_passres); - } - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } } else { /* size 0 or 1, generally helper functions */ @@ -11054,7 +10686,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, widenfn(tcg_passres, tcg_op1); gen_neon_addl(size, (opcode == 2), tcg_passres, tcg_passres, tcg_op2_64); - tcg_temp_free_i64(tcg_op2_64); break; } case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ @@ -11101,8 +10732,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, default: g_assert_not_reached(); } - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); if (accop != 0) { if (opcode == 9 || opcode == 11) { @@ -11117,15 +10746,12 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, gen_neon_addl(size, (accop < 0), tcg_res[pass], tcg_res[pass], tcg_passres); } - tcg_temp_free_i64(tcg_passres); } } } write_vec_element(s, tcg_res[0], rd, 0, MO_64); write_vec_element(s, tcg_res[1], rd, 1, MO_64); - tcg_temp_free_i64(tcg_res[0]); - tcg_temp_free_i64(tcg_res[1]); } static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, @@ -11149,17 +10775,13 @@ static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, read_vec_element(s, tcg_op1, rn, pass, MO_64); read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32); widenfn(tcg_op2_wide, tcg_op2); - tcg_temp_free_i32(tcg_op2); tcg_res[pass] = tcg_temp_new_i64(); gen_neon_addl(size, (opcode == 3), tcg_res[pass], tcg_op1, tcg_op2_wide); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2_wide); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } @@ -11194,17 +10816,12 @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_res[pass] = tcg_temp_new_i32(); gennarrow(tcg_res[pass], tcg_wideres); - tcg_temp_free_i64(tcg_wideres); } for (pass = 0; pass < 2; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32); - tcg_temp_free_i32(tcg_res[pass]); } clear_vec_high(s, is_q, rd); } @@ -11431,14 +11048,10 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, default: g_assert_not_reached(); } - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } else { int maxpass = is_q ? 4 : 2; @@ -11510,21 +11123,13 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, if (genfn) { genfn(tcg_res[pass], tcg_op1, tcg_op2); } - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); } for (pass = 0; pass < maxpass; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); - tcg_temp_free_i32(tcg_res[pass]); } clear_vec_high(s, is_q, rd); } - - if (fpst) { - tcg_temp_free_ptr(fpst); - } } /* Floating point op subgroup of C3.6.16. */ @@ -11781,10 +11386,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2); write_vec_element(s, tcg_res, rd, pass, MO_64); - - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } } else { for (pass = 0; pass < (is_q ? 4 : 2); pass++) { @@ -11869,10 +11470,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) } write_vec_element_i32(s, tcg_res, rd, pass, MO_32); - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); } } clear_vec_high(s, is_q, rd); @@ -12043,12 +11640,7 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) for (pass = 0; pass < maxpass; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16); - tcg_temp_free_i32(tcg_res[pass]); } - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - } else { for (pass = 0; pass < elements; pass++) { TCGv_i32 tcg_op1 = tcg_temp_new_i32(); @@ -12128,14 +11720,9 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) } write_vec_element_i32(s, tcg_res, rd, pass, MO_16); - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); } } - tcg_temp_free_ptr(fpst); - clear_vec_high(s, is_q, rd); } @@ -12347,11 +11934,9 @@ static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32); gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env); - tcg_temp_free_i32(tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } else { /* 16 -> 32 bit fp conversion */ @@ -12369,11 +11954,7 @@ static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, } for (pass = 0; pass < 4; pass++) { write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); - tcg_temp_free_i32(tcg_res[pass]); } - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(ahp); } } @@ -12417,7 +11998,6 @@ static void handle_rev(DisasContext *s, int opcode, bool u, g_assert_not_reached(); } write_vec_element(s, tcg_tmp, rd, i, grp_size); - tcg_temp_free_i64(tcg_tmp); } clear_vec_high(s, is_q, rd); } else { @@ -12441,10 +12021,6 @@ static void handle_rev(DisasContext *s, int opcode, bool u, } write_vec_element(s, tcg_rd, rd, 0, MO_64); write_vec_element(s, tcg_rd_hi, rd, 1, MO_64); - - tcg_temp_free_i64(tcg_rd_hi); - tcg_temp_free_i64(tcg_rd); - tcg_temp_free_i64(tcg_rn); } } @@ -12478,9 +12054,6 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, read_vec_element(s, tcg_op1, rd, pass, MO_64); tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1); } - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); } } else { for (pass = 0; pass < maxpass; pass++) { @@ -12508,7 +12081,6 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, tcg_res[pass], tcg_op); } } - tcg_temp_free_i64(tcg_op); } } if (!is_q) { @@ -12516,7 +12088,6 @@ static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } @@ -12540,13 +12111,10 @@ static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd) tcg_res[pass] = tcg_temp_new_i64(); widenfn(tcg_res[pass], tcg_op); tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size); - - tcg_temp_free_i32(tcg_op); } for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } @@ -12901,9 +12469,6 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) tcg_rmode, tcg_fpstatus); write_vec_element(s, tcg_res, rd, pass, MO_64); - - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_op); } } else { int pass; @@ -13026,19 +12591,12 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) } write_vec_element_i32(s, tcg_res, rd, pass, MO_32); - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); } } clear_vec_high(s, is_q, rd); if (need_rmode) { gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); - } - if (need_fpstatus) { - tcg_temp_free_ptr(tcg_fpstatus); } } @@ -13264,9 +12822,6 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) /* limit any sign extension going on */ tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff); write_fp_sreg(s, rd, tcg_res); - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); } else { for (pass = 0; pass < (is_q ? 8 : 4); pass++) { TCGv_i32 tcg_op = tcg_temp_new_i32(); @@ -13320,9 +12875,6 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) } write_vec_element_i32(s, tcg_res, rd, pass, MO_16); - - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_op); } clear_vec_high(s, is_q, rd); @@ -13330,11 +12882,6 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) if (tcg_rmode) { gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); - tcg_temp_free_i32(tcg_rmode); - } - - if (tcg_fpstatus) { - tcg_temp_free_ptr(tcg_fpstatus); } } @@ -13604,7 +13151,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) size == MO_64 ? gen_helper_gvec_fcmlas_idx : gen_helper_gvec_fcmlah_idx); - tcg_temp_free_ptr(fpst); } return; @@ -13709,11 +13255,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } write_vec_element(s, tcg_res, rd, pass, MO_64); - tcg_temp_free_i64(tcg_op); - tcg_temp_free_i64(tcg_res); } - tcg_temp_free_i64(tcg_idx); clear_vec_high(s, !is_scalar, rd); } else if (!is_long) { /* 32 bit floating point, or 16 or 32 bit integer. @@ -13887,12 +13430,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } else { write_vec_element_i32(s, tcg_res, rd, pass, MO_32); } - - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); } - tcg_temp_free_i32(tcg_idx); clear_vec_high(s, is_q, rd); } else { /* long ops: 16x16->32 or 32x32->64 */ @@ -13933,7 +13472,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx); - tcg_temp_free_i64(tcg_op); if (satop) { /* saturating, doubling */ @@ -13966,9 +13504,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) default: g_assert_not_reached(); } - tcg_temp_free_i64(tcg_passres); } - tcg_temp_free_i64(tcg_idx); clear_vec_high(s, !is_scalar, rd); } else { @@ -14014,7 +13550,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env, tcg_passres, tcg_passres); } - tcg_temp_free_i32(tcg_op); if (opcode == 0xa || opcode == 0xb) { continue; @@ -14043,9 +13578,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) default: g_assert_not_reached(); } - tcg_temp_free_i64(tcg_passres); } - tcg_temp_free_i32(tcg_idx); if (is_scalar) { tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]); @@ -14058,13 +13591,8 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) for (pass = 0; pass < 2; pass++) { write_vec_element(s, tcg_res[pass], rd, pass, MO_64); - tcg_temp_free_i64(tcg_res[pass]); } } - - if (fpst) { - tcg_temp_free_ptr(fpst); - } } /* Crypto AES @@ -14444,12 +13972,6 @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) } write_vec_element(s, tcg_res[0], rd, 0, MO_64); write_vec_element(s, tcg_res[1], rd, 1, MO_64); - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_op3); - tcg_temp_free_i64(tcg_res[0]); - tcg_temp_free_i64(tcg_res[1]); } else { TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero; @@ -14472,11 +13994,6 @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) write_vec_element_i32(s, tcg_zero, rd, 1, MO_32); write_vec_element_i32(s, tcg_zero, rd, 2, MO_32); write_vec_element_i32(s, tcg_res, rd, 3, MO_32); - - tcg_temp_free_i32(tcg_op1); - tcg_temp_free_i32(tcg_op2); - tcg_temp_free_i32(tcg_op3); - tcg_temp_free_i32(tcg_res); } } @@ -14790,8 +14307,6 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, bound = 1; } dc->base.max_insns = MIN(dc->base.max_insns, bound); - - init_tmp_a64_array(dc); } static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -14947,9 +14462,6 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) break; } - /* if we allocated any temporaries, free them here */ - free_tmp_a64(s); - /* * After execution of most insns, btype is reset to 0. * Note that we set btype == -1 when the insn sets btype. @@ -14957,8 +14469,6 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { reset_btype(s); } - - translator_loop_temp_check(&s->base); } static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h index ca24c39dbe..0576c4ea12 100644 --- a/target/arm/tcg/translate-a64.h +++ b/target/arm/tcg/translate-a64.h @@ -18,8 +18,6 @@ #ifndef TARGET_ARM_TRANSLATE_A64_H #define TARGET_ARM_TRANSLATE_A64_H -TCGv_i64 new_tmp_a64(DisasContext *s); -TCGv_i64 new_tmp_a64_zero(DisasContext *s); TCGv_i64 cpu_reg(DisasContext *s, int reg); TCGv_i64 cpu_reg_sp(DisasContext *s, int reg); TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf); diff --git a/target/arm/tcg/translate-m-nocp.c b/target/arm/tcg/translate-m-nocp.c index 5df7d46120..9a89aab785 100644 --- a/target/arm/tcg/translate-m-nocp.c +++ b/target/arm/tcg/translate-m-nocp.c @@ -91,7 +91,6 @@ static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a) } else { gen_helper_v7m_vlstm(cpu_env, fptr); } - tcg_temp_free_i32(fptr); clear_eci_state(s); @@ -303,8 +302,6 @@ static void gen_branch_fpInactive(DisasContext *s, TCGCond cond, tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK); tcg_gen_or_i32(fpca, fpca, aspen); tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label); - tcg_temp_free_i32(aspen); - tcg_temp_free_i32(fpca); } static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, @@ -328,7 +325,6 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, case ARM_VFP_FPSCR: tmp = loadfn(s, opaque, true); gen_helper_vfp_set_fpscr(cpu_env, tmp); - tcg_temp_free_i32(tmp); gen_lookup_tb(s); break; case ARM_VFP_FPSCR_NZCVQC: @@ -351,7 +347,6 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK); tcg_gen_or_i32(fpscr, fpscr, tmp); store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]); - tcg_temp_free_i32(tmp); break; } case ARM_VFP_FPCXT_NS: @@ -400,8 +395,6 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK); gen_helper_vfp_set_fpscr(cpu_env, tmp); s->base.is_jmp = DISAS_UPDATE_NOCHAIN; - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(sfpa); break; } case ARM_VFP_VPR: @@ -423,7 +416,6 @@ static bool gen_M_fp_sysreg_write(DisasContext *s, int regno, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH); store_cpu_field(vpr, v7m.vpr); s->base.is_jmp = DISAS_UPDATE_NOCHAIN; - tcg_temp_free_i32(tmp); break; } default: @@ -491,7 +483,6 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK); tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT); tcg_gen_or_i32(tmp, tmp, sfpa); - tcg_temp_free_i32(sfpa); /* * Store result before updating FPSCR etc, in case * it is a memory write which causes an exception. @@ -505,7 +496,6 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, store_cpu_field(control, v7m.control[M_REG_S]); fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]); gen_helper_vfp_set_fpscr(cpu_env, fpscr); - tcg_temp_free_i32(fpscr); lookup_tb = true; break; } @@ -546,7 +536,6 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK); tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT); tcg_gen_or_i32(tmp, tmp, sfpa); - tcg_temp_free_i32(control); /* Store result before updating FPSCR, in case it faults */ storefn(s, opaque, tmp, true); /* If SFPA is zero then set FPSCR from FPDSCR_NS */ @@ -554,9 +543,6 @@ static bool gen_M_fp_sysreg_read(DisasContext *s, int regno, tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, tcg_constant_i32(0), fpdscr, fpscr); gen_helper_vfp_set_fpscr(cpu_env, fpscr); - tcg_temp_free_i32(sfpa); - tcg_temp_free_i32(fpdscr); - tcg_temp_free_i32(fpscr); break; } case ARM_VFP_VPR: @@ -598,7 +584,6 @@ static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value, if (a->rt == 15) { /* Set the 4 flag bits in the CPSR */ gen_set_nzcv(value); - tcg_temp_free_i32(value); } else { store_reg(s, a->rt, value); } @@ -666,7 +651,6 @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value, if (do_access) { gen_aa32_st_i32(s, value, addr, get_mem_index(s), MO_UL | MO_ALIGN | s->be_data); - tcg_temp_free_i32(value); } if (a->w) { @@ -675,8 +659,6 @@ static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value, tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } } @@ -717,8 +699,6 @@ static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque, tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } return value; } diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c index db7ea3f603..798b4fddfe 100644 --- a/target/arm/tcg/translate-mve.c +++ b/target/arm/tcg/translate-mve.c @@ -178,7 +178,6 @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn, qreg = mve_qreg_ptr(a->qd); fn(cpu_env, qreg, addr); - tcg_temp_free_ptr(qreg); /* * Writeback always happens after the last beat of the insn, @@ -189,8 +188,6 @@ static bool do_ldst(DisasContext *s, arg_VLDR_VSTR *a, MVEGenLdStFn *fn, tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } mve_update_eci(s); return true; @@ -242,9 +239,6 @@ static bool do_ldst_sg(DisasContext *s, arg_vldst_sg *a, MVEGenLdStSGFn fn) qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm, addr); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); - tcg_temp_free_i32(addr); mve_update_eci(s); return true; } @@ -341,8 +335,6 @@ static bool do_ldst_sg_imm(DisasContext *s, arg_vldst_sg_imm *a, qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm, tcg_constant_i32(offset)); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); mve_update_eci(s); return true; } @@ -414,8 +406,6 @@ static bool do_vldst_il(DisasContext *s, arg_vldst_il *a, MVEGenLdStIlFn *fn, if (a->w) { tcg_gen_addi_i32(rn, rn, addrinc); store_reg(s, a->rn, rn); - } else { - tcg_temp_free_i32(rn); } mve_update_and_store_eci(s); return true; @@ -506,9 +496,7 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) qd = mve_qreg_ptr(a->qd); tcg_gen_dup_i32(a->size, rt, rt); gen_helper_mve_vdup(cpu_env, qd, rt); - tcg_temp_free_ptr(qd); } - tcg_temp_free_i32(rt); mve_update_eci(s); return true; } @@ -534,8 +522,6 @@ static bool do_1op_vec(DisasContext *s, arg_1op *a, MVEGenOneOpFn fn, qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); } mve_update_eci(s); return true; @@ -631,8 +617,6 @@ static bool do_vcvt_rmode(DisasContext *s, arg_1op *a, qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm, tcg_constant_i32(arm_rmode_to_sf(rmode))); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); mve_update_eci(s); return true; } @@ -821,9 +805,6 @@ static bool do_2op_vec(DisasContext *s, arg_2op *a, MVEGenTwoOpFn fn, qn = mve_qreg_ptr(a->qn); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qn, qm); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); } mve_update_eci(s); return true; @@ -1076,9 +1057,6 @@ static bool do_2op_scalar(DisasContext *s, arg_2scalar *a, qn = mve_qreg_ptr(a->qn); rm = load_reg(s, a->rm); fn(cpu_env, qd, qn, rm); - tcg_temp_free_i32(rm); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qn); mve_update_eci(s); return true; } @@ -1204,15 +1182,11 @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a, rdalo = load_reg(s, a->rdalo); rdahi = load_reg(s, a->rdahi); tcg_gen_concat_i32_i64(rda, rdalo, rdahi); - tcg_temp_free_i32(rdalo); - tcg_temp_free_i32(rdahi); } else { rda = tcg_const_i64(0); } fn(rda, cpu_env, qn, qm, rda); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); rdalo = tcg_temp_new_i32(); rdahi = tcg_temp_new_i32(); @@ -1220,7 +1194,6 @@ static bool do_long_dual_acc(DisasContext *s, arg_vmlaldav *a, tcg_gen_extrh_i64_i32(rdahi, rda); store_reg(s, a->rdalo, rdalo); store_reg(s, a->rdahi, rdahi); - tcg_temp_free_i64(rda); mve_update_eci(s); return true; } @@ -1312,8 +1285,6 @@ static bool do_dual_acc(DisasContext *s, arg_vmladav *a, MVEGenDualAccOpFn *fn) fn(rda, cpu_env, qn, qm, rda); store_reg(s, a->rda, rda); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); mve_update_eci(s); return true; @@ -1451,7 +1422,6 @@ static bool trans_VADDV(DisasContext *s, arg_VADDV *a) qm = mve_qreg_ptr(a->qm); fns[a->size][a->u](rda, cpu_env, qm, rda); store_reg(s, a->rda, rda); - tcg_temp_free_ptr(qm); mve_update_eci(s); return true; @@ -1494,8 +1464,6 @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) rdalo = load_reg(s, a->rdalo); rdahi = load_reg(s, a->rdahi); tcg_gen_concat_i32_i64(rda, rdalo, rdahi); - tcg_temp_free_i32(rdalo); - tcg_temp_free_i32(rdahi); } else { /* Accumulate starting at zero */ rda = tcg_const_i64(0); @@ -1507,7 +1475,6 @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) } else { gen_helper_mve_vaddlv_s(rda, cpu_env, qm, rda); } - tcg_temp_free_ptr(qm); rdalo = tcg_temp_new_i32(); rdahi = tcg_temp_new_i32(); @@ -1515,7 +1482,6 @@ static bool trans_VADDLV(DisasContext *s, arg_VADDLV *a) tcg_gen_extrh_i64_i32(rdahi, rda); store_reg(s, a->rdalo, rdalo); store_reg(s, a->rdahi, rdahi); - tcg_temp_free_i64(rda); mve_update_eci(s); return true; } @@ -1543,7 +1509,6 @@ static bool do_1imm(DisasContext *s, arg_1imm *a, MVEGenOneOpImmFn *fn, } else { qd = mve_qreg_ptr(a->qd); fn(cpu_env, qd, tcg_constant_i64(imm)); - tcg_temp_free_ptr(qd); } mve_update_eci(s); return true; @@ -1616,8 +1581,6 @@ static bool do_2shift_vec(DisasContext *s, arg_2shift *a, MVEGenTwoOpShiftFn fn, qd = mve_qreg_ptr(a->qd); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qd, qm, tcg_constant_i32(shift)); - tcg_temp_free_ptr(qd); - tcg_temp_free_ptr(qm); } mve_update_eci(s); return true; @@ -1723,8 +1686,6 @@ static bool do_2shift_scalar(DisasContext *s, arg_shl_scalar *a, qda = mve_qreg_ptr(a->qda); rm = load_reg(s, a->rm); fn(cpu_env, qda, qda, rm); - tcg_temp_free_ptr(qda); - tcg_temp_free_i32(rm); mve_update_eci(s); return true; } @@ -1868,7 +1829,6 @@ static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a) rdm = load_reg(s, a->rdm); gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm)); store_reg(s, a->rdm, rdm); - tcg_temp_free_ptr(qd); mve_update_eci(s); return true; } @@ -1898,7 +1858,6 @@ static bool do_vidup(DisasContext *s, arg_vidup *a, MVEGenVIDUPFn *fn) rn = load_reg(s, a->rn); fn(rn, cpu_env, qd, rn, tcg_constant_i32(a->imm)); store_reg(s, a->rn, rn); - tcg_temp_free_ptr(qd); mve_update_eci(s); return true; } @@ -1934,8 +1893,6 @@ static bool do_viwdup(DisasContext *s, arg_viwdup *a, MVEGenVIWDUPFn *fn) rm = load_reg(s, a->rm); fn(rn, cpu_env, qd, rn, rm, tcg_constant_i32(a->imm)); store_reg(s, a->rn, rn); - tcg_temp_free_ptr(qd); - tcg_temp_free_i32(rm); mve_update_eci(s); return true; } @@ -2001,8 +1958,6 @@ static bool do_vcmp(DisasContext *s, arg_vcmp *a, MVEGenCmpFn *fn) qn = mve_qreg_ptr(a->qn); qm = mve_qreg_ptr(a->qm); fn(cpu_env, qn, qm); - tcg_temp_free_ptr(qn); - tcg_temp_free_ptr(qm); if (a->mask) { /* VPT */ gen_vpst(s, a->mask); @@ -2034,8 +1989,6 @@ static bool do_vcmp_scalar(DisasContext *s, arg_vcmp_scalar *a, rm = load_reg(s, a->rm); } fn(cpu_env, qn, rm); - tcg_temp_free_ptr(qn); - tcg_temp_free_i32(rm); if (a->mask) { /* VPT */ gen_vpst(s, a->mask); @@ -2138,7 +2091,6 @@ static bool do_vmaxv(DisasContext *s, arg_vmaxv *a, MVEGenVADDVFn fn) rda = load_reg(s, a->rda); fn(rda, cpu_env, qm, rda); store_reg(s, a->rda, rda); - tcg_temp_free_ptr(qm); mve_update_eci(s); return true; } @@ -2203,8 +2155,6 @@ static bool do_vabav(DisasContext *s, arg_vabav *a, MVEGenVABAVFn *fn) rda = load_reg(s, a->rda); fn(rda, cpu_env, qn, qm, rda); store_reg(s, a->rda, rda); - tcg_temp_free_ptr(qm); - tcg_temp_free_ptr(qn); mve_update_eci(s); return true; } @@ -2297,12 +2247,10 @@ static bool trans_VMOV_from_2gp(DisasContext *s, arg_VMOV_to_2gp *a) if (!mve_skip_vmov(s, vd, a->idx, MO_32)) { tmp = load_reg(s, a->rt); write_neon_element32(tmp, vd, a->idx, MO_32); - tcg_temp_free_i32(tmp); } if (!mve_skip_vmov(s, vd + 1, a->idx, MO_32)) { tmp = load_reg(s, a->rt2); write_neon_element32(tmp, vd + 1, a->idx, MO_32); - tcg_temp_free_i32(tmp); } mve_update_and_store_eci(s); diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c index 4016339d46..af8685a4ac 100644 --- a/target/arm/tcg/translate-neon.c +++ b/target/arm/tcg/translate-neon.c @@ -182,7 +182,6 @@ static bool do_neon_ddda_fpst(DisasContext *s, int q, int vd, int vn, int vm, vfp_reg_offset(1, vm), vfp_reg_offset(1, vd), fpst, opr_sz, opr_sz, data, fn_gvec_ptr); - tcg_temp_free_ptr(fpst); return true; } @@ -236,7 +235,6 @@ static bool trans_VCADD(DisasContext *s, arg_VCADD *a) vfp_reg_offset(1, a->vm), fpst, opr_sz, opr_sz, a->rot, fn_gvec_ptr); - tcg_temp_free_ptr(fpst); return true; } @@ -433,7 +431,6 @@ static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn, TCGv_i32 index; index = load_reg(s, rm); tcg_gen_add_i32(base, base, index); - tcg_temp_free_i32(index); } store_reg(s, rn, base); } @@ -536,8 +533,6 @@ static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a) } } } - tcg_temp_free_i32(addr); - tcg_temp_free_i64(tmp64); gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8); return true; @@ -630,8 +625,6 @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) /* Subsequent memory operations inherit alignment */ mop &= ~MO_AMASK; } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(addr); gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs); @@ -751,8 +744,6 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a) /* Subsequent memory operations inherit alignment */ mop &= ~MO_AMASK; } - tcg_temp_free_i32(addr); - tcg_temp_free_i32(tmp); gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs); @@ -1061,9 +1052,6 @@ static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn) write_neon_element32(tmp, a->vd, 0, MO_32); write_neon_element32(tmp3, a->vd, 1, MO_32); - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp3); return true; } @@ -1126,7 +1114,6 @@ DO_3SAME_VQDMULH(VQRDMULH, qrdmulh) TCGv_ptr fpst = fpstatus_ptr(FPST); \ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \ oprsz, maxsz, 0, FUNC); \ - tcg_temp_free_ptr(fpst); \ } #define DO_3S_FP_GVEC(INSN,SFUNC,HFUNC) \ @@ -1225,7 +1212,6 @@ static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, vfp_reg_offset(1, a->vn), vfp_reg_offset(1, a->vm), fpstatus, 8, 8, 0, fn); - tcg_temp_free_ptr(fpstatus); return true; } @@ -1358,7 +1344,6 @@ static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a, read_neon_element64(tmp, a->vm, pass, MO_64); fn(tmp, cpu_env, tmp, constimm); write_neon_element64(tmp, a->vd, pass, MO_64); - tcg_temp_free_i64(tmp); } return true; } @@ -1403,7 +1388,6 @@ static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a, fn(tmp, cpu_env, tmp, constimm); write_neon_element32(tmp, a->vd, pass, MO_32); } - tcg_temp_free_i32(tmp); return true; } @@ -1474,10 +1458,6 @@ static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a, narrowfn(rd, cpu_env, rm2); write_neon_element32(rd, a->vd, 1, MO_32); - tcg_temp_free_i32(rd); - tcg_temp_free_i64(rm1); - tcg_temp_free_i64(rm2); - return true; } @@ -1537,22 +1517,17 @@ static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a, shiftfn(rm2, rm2, constimm); tcg_gen_concat_i32_i64(rtmp, rm1, rm2); - tcg_temp_free_i32(rm2); narrowfn(rm1, cpu_env, rtmp); write_neon_element32(rm1, a->vd, 0, MO_32); - tcg_temp_free_i32(rm1); shiftfn(rm3, rm3, constimm); shiftfn(rm4, rm4, constimm); tcg_gen_concat_i32_i64(rtmp, rm3, rm4); - tcg_temp_free_i32(rm4); narrowfn(rm3, cpu_env, rtmp); - tcg_temp_free_i64(rtmp); write_neon_element32(rm3, a->vd, 1, MO_32); - tcg_temp_free_i32(rm3); return true; } @@ -1660,7 +1635,6 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a, tmp = tcg_temp_new_i64(); widenfn(tmp, rm0); - tcg_temp_free_i32(rm0); if (a->shift != 0) { tcg_gen_shli_i64(tmp, tmp, a->shift); tcg_gen_andi_i64(tmp, tmp, ~widen_mask); @@ -1668,13 +1642,11 @@ static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a, write_neon_element64(tmp, a->vd, 0, MO_64); widenfn(tmp, rm1); - tcg_temp_free_i32(rm1); if (a->shift != 0) { tcg_gen_shli_i64(tmp, tmp, a->shift); tcg_gen_andi_i64(tmp, tmp, ~widen_mask); } write_neon_element64(tmp, a->vd, 1, MO_64); - tcg_temp_free_i64(tmp); return true; } @@ -1733,7 +1705,6 @@ static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a, fpst = fpstatus_ptr(a->size == MO_16 ? FPST_STD_F16 : FPST_STD); tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, vec_size, vec_size, a->shift, fn); - tcg_temp_free_ptr(fpst); return true; } @@ -1849,7 +1820,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vn, 0, MO_32); widenfn(rn0_64, tmp); - tcg_temp_free_i32(tmp); } if (src2_mop >= 0) { read_neon_element64(rm_64, a->vm, 0, src2_mop); @@ -1857,7 +1827,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vm, 0, MO_32); widenfn(rm_64, tmp); - tcg_temp_free_i32(tmp); } opfn(rn0_64, rn0_64, rm_64); @@ -1872,7 +1841,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vn, 1, MO_32); widenfn(rn1_64, tmp); - tcg_temp_free_i32(tmp); } if (src2_mop >= 0) { read_neon_element64(rm_64, a->vm, 1, src2_mop); @@ -1880,7 +1848,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, TCGv_i32 tmp = tcg_temp_new_i32(); read_neon_element32(tmp, a->vm, 1, MO_32); widenfn(rm_64, tmp); - tcg_temp_free_i32(tmp); } write_neon_element64(rn0_64, a->vd, 0, MO_64); @@ -1888,10 +1855,6 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a, opfn(rn1_64, rn1_64, rm_64); write_neon_element64(rn1_64, a->vd, 1, MO_64); - tcg_temp_free_i64(rn0_64); - tcg_temp_free_i64(rn1_64); - tcg_temp_free_i64(rm_64); - return true; } @@ -1976,11 +1939,6 @@ static bool do_narrow_3d(DisasContext *s, arg_3diff *a, write_neon_element32(rd0, a->vd, 0, MO_32); write_neon_element32(rd1, a->vd, 1, MO_32); - tcg_temp_free_i32(rd0); - tcg_temp_free_i32(rd1); - tcg_temp_free_i64(rn_64); - tcg_temp_free_i64(rm_64); - return true; } @@ -2061,8 +2019,6 @@ static bool do_long_3d(DisasContext *s, arg_3diff *a, read_neon_element32(rn, a->vn, 1, MO_32); read_neon_element32(rm, a->vm, 1, MO_32); opfn(rd1, rn, rm); - tcg_temp_free_i32(rn); - tcg_temp_free_i32(rm); /* Don't store results until after all loads: they might overlap */ if (accfn) { @@ -2071,13 +2027,10 @@ static bool do_long_3d(DisasContext *s, arg_3diff *a, accfn(rd0, tmp, rd0); read_neon_element64(tmp, a->vd, 1, MO_64); accfn(rd1, tmp, rd1); - tcg_temp_free_i64(tmp); } write_neon_element64(rd0, a->vd, 0, MO_64); write_neon_element64(rd1, a->vd, 1, MO_64); - tcg_temp_free_i64(rd0); - tcg_temp_free_i64(rd1); return true; } @@ -2149,9 +2102,6 @@ static void gen_mull_s32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm) tcg_gen_muls2_i32(lo, hi, rn, rm); tcg_gen_concat_i32_i64(rd, lo, hi); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm) @@ -2161,9 +2111,6 @@ static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm) tcg_gen_mulu2_i32(lo, hi, rn, rm); tcg_gen_concat_i32_i64(rd, lo, hi); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } static bool trans_VMULL_S_3d(DisasContext *s, arg_3diff *a) @@ -2344,7 +2291,6 @@ static void gen_neon_dup_low16(TCGv_i32 var) tcg_gen_ext16u_i32(var, var); tcg_gen_shli_i32(tmp, var, 16); tcg_gen_or_i32(var, var, tmp); - tcg_temp_free_i32(tmp); } static void gen_neon_dup_high16(TCGv_i32 var) @@ -2353,7 +2299,6 @@ static void gen_neon_dup_high16(TCGv_i32 var) tcg_gen_andi_i32(var, var, 0xffff0000); tcg_gen_shri_i32(tmp, var, 16); tcg_gen_or_i32(var, var, tmp); - tcg_temp_free_i32(tmp); } static inline TCGv_i32 neon_get_scalar(int size, int reg) @@ -2417,12 +2362,9 @@ static bool do_2scalar(DisasContext *s, arg_2scalar *a, TCGv_i32 rd = tcg_temp_new_i32(); read_neon_element32(rd, a->vd, pass, MO_32); accfn(tmp, rd, tmp); - tcg_temp_free_i32(rd); } write_neon_element32(tmp, a->vd, pass, MO_32); } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(scalar); return true; } @@ -2516,7 +2458,6 @@ static bool do_2scalar_fp_vec(DisasContext *s, arg_2scalar *a, fpstatus = fpstatus_ptr(a->size == 1 ? FPST_STD_F16 : FPST_STD); tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpstatus, vec_size, vec_size, idx, fn); - tcg_temp_free_ptr(fpstatus); return true; } @@ -2616,10 +2557,6 @@ static bool do_vqrdmlah_2sc(DisasContext *s, arg_2scalar *a, opfn(rd, cpu_env, rn, scalar, rd); write_neon_element32(rd, a->vd, pass, MO_32); } - tcg_temp_free_i32(rn); - tcg_temp_free_i32(rd); - tcg_temp_free_i32(scalar); - return true; } @@ -2692,8 +2629,6 @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a, read_neon_element32(rn, a->vn, 1, MO_32); rn1_64 = tcg_temp_new_i64(); opfn(rn1_64, rn, scalar); - tcg_temp_free_i32(rn); - tcg_temp_free_i32(scalar); if (accfn) { TCGv_i64 t64 = tcg_temp_new_i64(); @@ -2701,13 +2636,10 @@ static bool do_2scalar_long(DisasContext *s, arg_2scalar *a, accfn(rn0_64, t64, rn0_64); read_neon_element64(t64, a->vd, 1, MO_64); accfn(rn1_64, t64, rn1_64); - tcg_temp_free_i64(t64); } write_neon_element64(rn0_64, a->vd, 0, MO_64); write_neon_element64(rn1_64, a->vd, 1, MO_64); - tcg_temp_free_i64(rn0_64); - tcg_temp_free_i64(rn1_64); return true; } @@ -2842,10 +2774,6 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a) read_neon_element64(left, a->vm, 0, MO_64); tcg_gen_extract2_i64(dest, right, left, a->imm * 8); write_neon_element64(dest, a->vd, 0, MO_64); - - tcg_temp_free_i64(left); - tcg_temp_free_i64(right); - tcg_temp_free_i64(dest); } else { /* Extract 128 bits from <Vm+1:Vm:Vn+1:Vn> */ TCGv_i64 left, middle, right, destleft, destright; @@ -2872,12 +2800,6 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a) write_neon_element64(destright, a->vd, 0, MO_64); write_neon_element64(destleft, a->vd, 1, MO_64); - - tcg_temp_free_i64(destright); - tcg_temp_free_i64(destleft); - tcg_temp_free_i64(right); - tcg_temp_free_i64(middle); - tcg_temp_free_i64(left); } return true; } @@ -2921,9 +2843,6 @@ static bool trans_VTBL(DisasContext *s, arg_VTBL *a) gen_helper_neon_tbl(val, cpu_env, desc, val, def); write_neon_element64(val, a->vd, 0, MO_64); - - tcg_temp_free_i64(def); - tcg_temp_free_i64(val); return true; } @@ -3002,9 +2921,6 @@ static bool trans_VREV64(DisasContext *s, arg_VREV64 *a) write_neon_element32(tmp[1], a->vd, pass * 2, MO_32); write_neon_element32(tmp[0], a->vd, pass * 2 + 1, MO_32); } - - tcg_temp_free_i32(tmp[0]); - tcg_temp_free_i32(tmp[1]); return true; } @@ -3055,20 +2971,15 @@ static bool do_2misc_pairwise(DisasContext *s, arg_2misc *a, widenfn(rm0_64, tmp); read_neon_element32(tmp, a->vm, pass * 2 + 1, MO_32); widenfn(rm1_64, tmp); - tcg_temp_free_i32(tmp); opfn(rd_64, rm0_64, rm1_64); - tcg_temp_free_i64(rm0_64); - tcg_temp_free_i64(rm1_64); if (accfn) { TCGv_i64 tmp64 = tcg_temp_new_i64(); read_neon_element64(tmp64, a->vd, pass, MO_64); accfn(rd_64, tmp64, rd_64); - tcg_temp_free_i64(tmp64); } write_neon_element64(rd_64, a->vd, pass, MO_64); - tcg_temp_free_i64(rd_64); } return true; } @@ -3192,8 +3103,6 @@ static bool do_zip_uzp(DisasContext *s, arg_2misc *a, pd = vfp_reg_ptr(true, a->vd); pm = vfp_reg_ptr(true, a->vm); fn(pd, pm); - tcg_temp_free_ptr(pd); - tcg_temp_free_ptr(pm); return true; } @@ -3271,9 +3180,6 @@ static bool do_vmovn(DisasContext *s, arg_2misc *a, narrowfn(rd1, cpu_env, rm); write_neon_element32(rd0, a->vd, 0, MO_32); write_neon_element32(rd1, a->vd, 1, MO_32); - tcg_temp_free_i32(rd0); - tcg_temp_free_i32(rd1); - tcg_temp_free_i64(rm); return true; } @@ -3341,10 +3247,6 @@ static bool trans_VSHLL(DisasContext *s, arg_2misc *a) widenfn(rd, rm1); tcg_gen_shli_i64(rd, rd, 8 << a->size); write_neon_element64(rd, a->vd, 1, MO_64); - - tcg_temp_free_i64(rd); - tcg_temp_free_i32(rm0); - tcg_temp_free_i32(rm1); return true; } @@ -3385,11 +3287,6 @@ static bool trans_VCVT_B16_F32(DisasContext *s, arg_2misc *a) write_neon_element32(dst0, a->vd, 0, MO_32); write_neon_element32(dst1, a->vd, 1, MO_32); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i32(dst0); - tcg_temp_free_i32(dst1); - tcg_temp_free_ptr(fpst); return true; } @@ -3432,16 +3329,10 @@ static bool trans_VCVT_F16_F32(DisasContext *s, arg_2misc *a) tmp3 = tcg_temp_new_i32(); read_neon_element32(tmp3, a->vm, 3, MO_32); write_neon_element32(tmp2, a->vd, 0, MO_32); - tcg_temp_free_i32(tmp2); gen_helper_vfp_fcvt_f32_to_f16(tmp3, tmp3, fpst, ahp); tcg_gen_shli_i32(tmp3, tmp3, 16); tcg_gen_or_i32(tmp3, tmp3, tmp); write_neon_element32(tmp3, a->vd, 1, MO_32); - tcg_temp_free_i32(tmp3); - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); - return true; } @@ -3482,18 +3373,12 @@ static bool trans_VCVT_F32_F16(DisasContext *s, arg_2misc *a) tcg_gen_shri_i32(tmp, tmp, 16); gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp); write_neon_element32(tmp, a->vd, 1, MO_32); - tcg_temp_free_i32(tmp); tcg_gen_ext16u_i32(tmp3, tmp2); gen_helper_vfp_fcvt_f16_to_f32(tmp3, tmp3, fpst, ahp); write_neon_element32(tmp3, a->vd, 2, MO_32); - tcg_temp_free_i32(tmp3); tcg_gen_shri_i32(tmp2, tmp2, 16); gen_helper_vfp_fcvt_f16_to_f32(tmp2, tmp2, fpst, ahp); write_neon_element32(tmp2, a->vd, 3, MO_32); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(ahp); - tcg_temp_free_ptr(fpst); - return true; } @@ -3628,8 +3513,6 @@ static bool do_2misc(DisasContext *s, arg_2misc *a, NeonGenOneOpFn *fn) fn(tmp, tmp); write_neon_element32(tmp, a->vd, pass, MO_32); } - tcg_temp_free_i32(tmp); - return true; } @@ -3790,7 +3673,6 @@ static bool trans_VQNEG(DisasContext *s, arg_2misc *a) fpst = fpstatus_ptr(vece == MO_16 ? FPST_STD_F16 : FPST_STD); \ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, 0, \ fns[vece]); \ - tcg_temp_free_ptr(fpst); \ } \ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \ { \ @@ -3841,7 +3723,6 @@ static bool trans_VRINTX(DisasContext *s, arg_2misc *a) fpst = fpstatus_ptr(vece == 1 ? FPST_STD_F16 : FPST_STD); \ tcg_gen_gvec_2_ptr(rd_ofs, rm_ofs, fpst, oprsz, maxsz, \ arm_rmode_to_sf(RMODE), fns[vece]); \ - tcg_temp_free_ptr(fpst); \ } \ static bool trans_##INSN(DisasContext *s, arg_2misc *a) \ { \ @@ -3908,11 +3789,9 @@ static bool trans_VSWP(DisasContext *s, arg_2misc *a) write_neon_element64(rm, a->vd, pass, MO_64); write_neon_element64(rd, a->vm, pass, MO_64); } - tcg_temp_free_i64(rm); - tcg_temp_free_i64(rd); - return true; } + static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1) { TCGv_i32 rd, tmp; @@ -3930,9 +3809,6 @@ static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1) tcg_gen_andi_i32(tmp, t0, 0xff00ff00); tcg_gen_or_i32(t1, t1, tmp); tcg_gen_mov_i32(t0, rd); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(rd); } static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1) @@ -3949,9 +3825,6 @@ static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1) tcg_gen_andi_i32(tmp, t0, 0xffff0000); tcg_gen_or_i32(t1, t1, tmp); tcg_gen_mov_i32(t0, rd); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(rd); } static bool trans_VTRN(DisasContext *s, arg_2misc *a) @@ -4003,8 +3876,6 @@ static bool trans_VTRN(DisasContext *s, arg_2misc *a) write_neon_element32(tmp, a->vd, pass, MO_32); } } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(tmp2); return true; } diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c index 7b87a9df63..e3adba314e 100644 --- a/target/arm/tcg/translate-sme.c +++ b/target/arm/tcg/translate-sme.c @@ -97,7 +97,6 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, /* Add the byte offset to env to produce the final pointer. */ addr = tcg_temp_new_ptr(); tcg_gen_ext_i32_ptr(addr, tmp); - tcg_temp_free_i32(tmp); tcg_gen_add_ptr(addr, addr, cpu_env); return addr; @@ -166,11 +165,6 @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a) h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc); } } - - tcg_temp_free_ptr(t_za); - tcg_temp_free_ptr(t_zr); - tcg_temp_free_ptr(t_pg); - return true; } @@ -237,10 +231,6 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_za); - tcg_temp_free_ptr(t_pg); - tcg_temp_free_i64(addr); return true; } @@ -260,8 +250,6 @@ static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) base = get_tile_rowcol(s, MO_8, a->rv, imm, false); fn(s, base, 0, svl, a->rn, imm * svl); - - tcg_temp_free_ptr(base); return true; } @@ -286,11 +274,6 @@ static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz, pm = pred_full_reg_ptr(s, a->pm); fn(za, zn, pn, pm, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(za); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(pn); - tcg_temp_free_ptr(pm); return true; } @@ -318,11 +301,6 @@ static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz, pm = pred_full_reg_ptr(s, a->pm); fn(za, zn, zm, pn, pm, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(za); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(pn); - tcg_temp_free_ptr(pm); return true; } @@ -346,12 +324,6 @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, fpst = fpstatus_ptr(FPST_FPCR); fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(za); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(pn); - tcg_temp_free_ptr(pm); - tcg_temp_free_ptr(fpst); return true; } diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 718a5bce1b..5bf80b22d7 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -130,7 +130,6 @@ static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), status, vsz, vsz, data, fn); - tcg_temp_free_ptr(status); } return true; } @@ -181,8 +180,6 @@ static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, vec_full_reg_offset(s, rn), vec_full_reg_offset(s, rm), status, vsz, vsz, data, fn); - - tcg_temp_free_ptr(status); } return true; } @@ -249,7 +246,6 @@ static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, { TCGv_ptr status = fpstatus_ptr(flavour); bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status); - tcg_temp_free_ptr(status); return ret; } @@ -271,8 +267,6 @@ static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn, vec_full_reg_offset(s, ra), pred_full_reg_offset(s, pg), status, vsz, vsz, data, fn); - - tcg_temp_free_ptr(status); } return true; } @@ -321,7 +315,6 @@ static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn, vec_full_reg_offset(s, rn), pred_full_reg_offset(s, pg), status, vsz, vsz, data, fn); - tcg_temp_free_ptr(status); } return true; } @@ -374,7 +367,6 @@ static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn, vec_full_reg_offset(s, rm), pred_full_reg_offset(s, pg), status, vsz, vsz, data, fn); - tcg_temp_free_ptr(status); } return true; } @@ -508,7 +500,6 @@ static void do_predtest1(TCGv_i64 d, TCGv_i64 g) gen_helper_sve_predtest1(t, d, g); do_pred_flags(t); - tcg_temp_free_i32(t); } static void do_predtest(DisasContext *s, int dofs, int gofs, int words) @@ -521,11 +512,8 @@ static void do_predtest(DisasContext *s, int dofs, int gofs, int words) tcg_gen_addi_ptr(gptr, cpu_env, gofs); gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words)); - tcg_temp_free_ptr(dptr); - tcg_temp_free_ptr(gptr); do_pred_flags(t); - tcg_temp_free_i32(t); } /* For each element size, the bits within a predicate word that are active. */ @@ -561,7 +549,6 @@ static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) tcg_gen_andi_i64(d, d, mask); tcg_gen_andi_i64(t, t, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) @@ -575,7 +562,6 @@ static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) tcg_gen_andi_i64(d, d, mask); tcg_gen_andi_i64(t, t, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh) @@ -984,11 +970,8 @@ static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); fn(temp, t_zn, t_pg, desc); - tcg_temp_free_ptr(t_zn); - tcg_temp_free_ptr(t_pg); write_fp_dreg(s, a->rd, temp); - tcg_temp_free_i64(temp); return true; } @@ -1253,11 +1236,7 @@ static bool do_index(DisasContext *s, int esz, int rd, tcg_gen_extrl_i64_i32(s32, start); tcg_gen_extrl_i64_i32(i32, incr); fns[esz](t_zd, s32, i32, desc); - - tcg_temp_free_i32(s32); - tcg_temp_free_i32(i32); } - tcg_temp_free_ptr(t_zd); return true; } @@ -1419,11 +1398,6 @@ static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a, tcg_gen_st_i64(pd, cpu_env, dofs); do_predtest1(pd, pg); - - tcg_temp_free_i64(pd); - tcg_temp_free_i64(pn); - tcg_temp_free_i64(pm); - tcg_temp_free_i64(pg); } else { /* The operation and flags generation is large. The computation * of the flags depends on the original contents of the guarding @@ -1694,9 +1668,6 @@ static bool trans_PTEST(DisasContext *s, arg_PTEST *a) tcg_gen_ld_i64(pn, cpu_env, nofs); tcg_gen_ld_i64(pg, cpu_env, gofs); do_predtest1(pn, pg); - - tcg_temp_free_i64(pn); - tcg_temp_free_i64(pg); } else { do_predtest(s, nofs, gofs, words); } @@ -1810,8 +1781,6 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) } done: - tcg_temp_free_i64(t); - /* PTRUES */ if (setflag) { tcg_gen_movi_i32(cpu_NF, -(word != 0)); @@ -1869,11 +1838,8 @@ static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, t = tcg_temp_new_i32(); gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc)); - tcg_temp_free_ptr(t_pd); - tcg_temp_free_ptr(t_pg); do_pred_flags(t); - tcg_temp_free_i32(t); return true; } @@ -1950,9 +1916,7 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) t2 = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg); } - tcg_temp_free_i64(t1); } - tcg_temp_free_i64(t0); } /* Similarly with a vector and a scalar operand. */ @@ -1982,7 +1946,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, } else { gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc); } - tcg_temp_free_i32(t32); break; case MO_16: @@ -1996,7 +1959,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, } else { gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc); } - tcg_temp_free_i32(t32); break; case MO_32: @@ -2011,7 +1973,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, } else { gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc); } - tcg_temp_free_i64(t64); break; case MO_64: @@ -2025,7 +1986,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, t64 = tcg_temp_new_i64(); tcg_gen_neg_i64(t64, val); gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc); - tcg_temp_free_i64(t64); } else { gen_helper_sve_sqaddi_d(dptr, nptr, val, desc); } @@ -2034,9 +1994,6 @@ static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, default: g_assert_not_reached(); } - - tcg_temp_free_ptr(dptr); - tcg_temp_free_ptr(nptr); } static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) @@ -2222,10 +2179,6 @@ static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); fns[esz](t_zd, t_zn, t_pg, val, desc); - - tcg_temp_free_ptr(t_zd); - tcg_temp_free_ptr(t_zn); - tcg_temp_free_ptr(t_pg); } static bool trans_FCPY(DisasContext *s, arg_FCPY *a) @@ -2372,9 +2325,6 @@ static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); fns[a->esz](t_zd, t_zn, val, desc); - - tcg_temp_free_ptr(t_zd); - tcg_temp_free_ptr(t_zn); } static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) @@ -2386,7 +2336,6 @@ static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64)); do_insr_i64(s, a, t); - tcg_temp_free_i64(t); } return true; } @@ -2476,10 +2425,6 @@ static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm)); fn(t_d, t_n, t_m, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_d); - tcg_temp_free_ptr(t_n); - tcg_temp_free_ptr(t_m); return true; } @@ -2503,9 +2448,6 @@ static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); fn(t_d, t_n, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_d); - tcg_temp_free_ptr(t_n); return true; } @@ -2597,8 +2539,6 @@ static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg) tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg)); gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_p); } /* Increment LAST to the offset of the next element in the vector, @@ -2661,7 +2601,6 @@ static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, int rm, int esz) { TCGv_ptr p = tcg_temp_new_ptr(); - TCGv_i64 r; /* Convert offset into vector into offset into ENV. * The final adjustment for the vector register base @@ -2676,10 +2615,7 @@ static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, tcg_gen_ext_i32_ptr(p, last); tcg_gen_add_ptr(p, p, cpu_env); - r = load_esz(p, vec_full_reg_offset(s, rm), esz); - tcg_temp_free_ptr(p); - - return r; + return load_esz(p, vec_full_reg_offset(s, rm), esz); } /* Compute CLAST for a Zreg. */ @@ -2709,11 +2645,9 @@ static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) } ele = load_last_active(s, last, a->rm, esz); - tcg_temp_free_i32(last); vsz = vec_full_reg_size(s); tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele); - tcg_temp_free_i64(ele); /* If this insn used MOVPRFX, we may need a second move. */ if (a->rd != a->rn) { @@ -2756,13 +2690,9 @@ static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, * a conditional move. */ ele = load_last_active(s, last, rm, esz); - tcg_temp_free_i32(last); tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0), ele, reg_val); - - tcg_temp_free_i64(cmp); - tcg_temp_free_i64(ele); } /* Compute CLAST for a Vreg. */ @@ -2775,7 +2705,6 @@ static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before) do_clast_scalar(s, esz, a->pg, a->rn, before, reg); write_fp_dreg(s, a->rd, reg); - tcg_temp_free_i64(reg); } return true; } @@ -2821,7 +2750,6 @@ static TCGv_i64 do_last_scalar(DisasContext *s, int esz, int pg, int rm, bool before) { TCGv_i32 last = tcg_temp_new_i32(); - TCGv_i64 ret; find_last_active(s, last, esz, pg); if (before) { @@ -2830,9 +2758,7 @@ static TCGv_i64 do_last_scalar(DisasContext *s, int esz, incr_last_active(s, last, esz); } - ret = load_last_active(s, last, rm, esz); - tcg_temp_free_i32(last); - return ret; + return load_last_active(s, last, rm, esz); } /* Compute LAST for a Vreg. */ @@ -2841,7 +2767,6 @@ static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before) if (sve_access_check(s)) { TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); write_fp_dreg(s, a->rd, val); - tcg_temp_free_i64(val); } return true; } @@ -2855,7 +2780,6 @@ static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) if (sve_access_check(s)) { TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); tcg_gen_mov_i64(cpu_reg(s, a->rd), val); - tcg_temp_free_i64(val); } return true; } @@ -2883,7 +2807,6 @@ static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) int ofs = vec_reg_offset(s, a->rn, 0, a->esz); TCGv_i64 t = load_esz(cpu_env, ofs, a->esz); do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t); - tcg_temp_free_i64(t); } return true; } @@ -2942,14 +2865,7 @@ static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0))); - tcg_temp_free_ptr(pd); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(zm); - tcg_temp_free_ptr(pg); - do_pred_flags(t); - - tcg_temp_free_i32(t); return true; } @@ -3021,13 +2937,7 @@ static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm))); - tcg_temp_free_ptr(pd); - tcg_temp_free_ptr(zn); - tcg_temp_free_ptr(pg); - do_pred_flags(t); - - tcg_temp_free_i32(t); return true; } @@ -3081,14 +2991,9 @@ static bool do_brk3(DisasContext *s, arg_rprr_s *a, TCGv_i32 t = tcg_temp_new_i32(); fn_s(t, d, n, m, g, desc); do_pred_flags(t); - tcg_temp_free_i32(t); } else { fn(d, n, m, g, desc); } - tcg_temp_free_ptr(d); - tcg_temp_free_ptr(n); - tcg_temp_free_ptr(m); - tcg_temp_free_ptr(g); return true; } @@ -3115,13 +3020,9 @@ static bool do_brk2(DisasContext *s, arg_rpr_s *a, TCGv_i32 t = tcg_temp_new_i32(); fn_s(t, d, n, g, desc); do_pred_flags(t); - tcg_temp_free_i32(t); } else { fn(d, n, g, desc); } - tcg_temp_free_ptr(d); - tcg_temp_free_ptr(n); - tcg_temp_free_ptr(g); return true; } @@ -3159,7 +3060,6 @@ static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) TCGv_i64 g = tcg_temp_new_i64(); tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg)); tcg_gen_and_i64(val, val, g); - tcg_temp_free_i64(g); } /* Reduce the pred_esz_masks value simply to reduce the @@ -3181,8 +3081,6 @@ static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc)); - tcg_temp_free_ptr(t_pn); - tcg_temp_free_ptr(t_pg); } } @@ -3212,7 +3110,6 @@ static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) } else { tcg_gen_add_i64(reg, reg, val); } - tcg_temp_free_i64(val); } return true; } @@ -3297,7 +3194,6 @@ static bool trans_CTERM(DisasContext *s, arg_CTERM *a) tcg_gen_setcond_i64(cond, cmp, rn, rm); tcg_gen_extrl_i64_i32(cpu_NF, cmp); - tcg_temp_free_i64(cmp); /* VF = !NF & !CF. */ tcg_gen_xori_i32(cpu_VF, cpu_NF, 1); @@ -3394,12 +3290,10 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) /* Set the count to zero if the condition is false. */ tcg_gen_movi_i64(t1, 0); tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1); - tcg_temp_free_i64(t1); /* Since we're bounded, pass as a 32-bit type. */ t2 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t2, t0); - tcg_temp_free_i64(t0); /* Scale elements to bits. */ tcg_gen_shli_i32(t2, t2, a->esz); @@ -3416,9 +3310,6 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a) gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc)); } do_pred_flags(t2); - - tcg_temp_free_ptr(ptr); - tcg_temp_free_i32(t2); return true; } @@ -3450,7 +3341,6 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) tcg_gen_sub_i64(diff, op0, op1); tcg_gen_sub_i64(t1, op1, op0); tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1); - tcg_temp_free_i64(t1); /* Round down to a multiple of ESIZE. */ tcg_gen_andi_i64(diff, diff, -1 << a->esz); /* If op1 == op0, diff == 0, and the condition is always true. */ @@ -3470,7 +3360,6 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) /* Since we're bounded, pass as a 32-bit type. */ t2 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t2, diff); - tcg_temp_free_i64(diff); desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); @@ -3480,9 +3369,6 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); do_pred_flags(t2); - - tcg_temp_free_ptr(ptr); - tcg_temp_free_i32(t2); return true; } @@ -3814,12 +3700,8 @@ static bool do_reduce(DisasContext *s, arg_rpr_esz *a, status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); fn(temp, t_zn, t_pg, status, t_desc); - tcg_temp_free_ptr(t_zn); - tcg_temp_free_ptr(t_pg); - tcg_temp_free_ptr(status); write_fp_dreg(s, a->rd, temp); - tcg_temp_free_i64(temp); return true; } @@ -3873,7 +3755,6 @@ static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a, vec_full_reg_offset(s, a->rn), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); } return true; } @@ -3942,12 +3823,7 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc); - tcg_temp_free_ptr(t_fpst); - tcg_temp_free_ptr(t_pg); - tcg_temp_free_ptr(t_rm); - write_fp_dreg(s, a->rd, t_val); - tcg_temp_free_i64(t_val); return true; } @@ -4020,11 +3896,6 @@ static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); fn(t_zd, t_zn, t_pg, scalar, status, desc); - - tcg_temp_free_ptr(status); - tcg_temp_free_ptr(t_pg); - tcg_temp_free_ptr(t_zn); - tcg_temp_free_ptr(t_zd); } static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, @@ -4080,7 +3951,6 @@ static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, vec_full_reg_offset(s, a->rm), pred_full_reg_offset(s, a->pg), status, vsz, vsz, 0, fn); - tcg_temp_free_ptr(status); } return true; } @@ -4237,8 +4107,6 @@ static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, status, vsz, vsz, 0, fn); gen_helper_set_rmode(tmode, tmode, status); - tcg_temp_free_i32(tmode); - tcg_temp_free_ptr(status); return true; } @@ -4321,7 +4189,6 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, dirty_addr = tcg_temp_new_i64(); tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); - tcg_temp_free_i64(dirty_addr); /* * Note that unpredicated load/store of vector/predicate registers @@ -4339,7 +4206,6 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_st_i64(t0, base, vofs + i); tcg_gen_addi_i64(clean_addr, clean_addr, 8); } - tcg_temp_free_i64(t0); } else { TCGLabel *loop = gen_new_label(); TCGv_ptr tp, i = tcg_const_ptr(0); @@ -4354,11 +4220,8 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_add_ptr(tp, base, i); tcg_gen_addi_ptr(i, i, 8); tcg_gen_st_i64(t0, tp, vofs); - tcg_temp_free_ptr(tp); - tcg_temp_free_i64(t0); tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); - tcg_temp_free_ptr(i); } /* @@ -4381,14 +4244,12 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_addi_i64(clean_addr, clean_addr, 4); tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW); tcg_gen_deposit_i64(t0, t0, t1, 32, 32); - tcg_temp_free_i64(t1); break; default: g_assert_not_reached(); } tcg_gen_st_i64(t0, base, vofs + len_align); - tcg_temp_free_i64(t0); } } @@ -4405,7 +4266,6 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, dirty_addr = tcg_temp_new_i64(); tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); - tcg_temp_free_i64(dirty_addr); /* Note that unpredicated load/store of vector/predicate registers * are defined as a stream of bytes, which equates to little-endian @@ -4424,7 +4284,6 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); tcg_gen_addi_i64(clean_addr, clean_addr, 8); } - tcg_temp_free_i64(t0); } else { TCGLabel *loop = gen_new_label(); TCGv_ptr tp, i = tcg_const_ptr(0); @@ -4436,14 +4295,11 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, tcg_gen_add_ptr(tp, base, i); tcg_gen_ld_i64(t0, tp, vofs); tcg_gen_addi_ptr(i, i, 8); - tcg_temp_free_ptr(tp); tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); tcg_gen_addi_i64(clean_addr, clean_addr, 8); - tcg_temp_free_i64(t0); tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); - tcg_temp_free_ptr(i); } /* Predicate register stores can be any multiple of 2. */ @@ -4469,7 +4325,6 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, default: g_assert_not_reached(); } - tcg_temp_free_i64(t0); } } @@ -4578,8 +4433,6 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); fn(cpu_env, t_pg, addr, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_pg); } /* Indexed by [mte][be][dtype][nreg] */ @@ -4721,7 +4574,7 @@ static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) return false; } if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); @@ -4737,7 +4590,7 @@ static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), (a->imm * elements * (a->nreg + 1)) @@ -4840,7 +4693,7 @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) } s->is_nonstreaming = true; if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, @@ -4945,7 +4798,7 @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; int off = (a->imm * elements) << dtype_msz(a->dtype); - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off); do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, @@ -4977,7 +4830,6 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) poff = offsetof(CPUARMState, vfp.preg_tmp); tcg_gen_st_i64(tmp, cpu_env, poff); - tcg_temp_free_i64(tmp); } t_pg = tcg_temp_new_ptr(); @@ -4987,8 +4839,6 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt))); - tcg_temp_free_ptr(t_pg); - /* Replicate that first quadword. */ if (vsz > 16) { int doff = vec_full_reg_offset(s, zt); @@ -5003,7 +4853,7 @@ static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) } if (sve_access_check(s)) { int msz = dtype_msz(a->dtype); - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_ldrq(s, a->rd, a->pg, addr, a->dtype); @@ -5017,7 +4867,7 @@ static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a) return false; } if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16); do_ldrq(s, a->rd, a->pg, addr, a->dtype); } @@ -5059,7 +4909,6 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) poff = offsetof(CPUARMState, vfp.preg_tmp); tcg_gen_st_i64(tmp, cpu_env, poff); - tcg_temp_free_i64(tmp); } t_pg = tcg_temp_new_ptr(); @@ -5069,8 +4918,6 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt))); - tcg_temp_free_ptr(t_pg); - /* * Replicate that first octaword. * The replication happens in units of 32; if the full vector size @@ -5097,7 +4944,7 @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) } s->is_nonstreaming = true; if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_ldro(s, a->rd, a->pg, addr, a->dtype); @@ -5112,7 +4959,7 @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) } s->is_nonstreaming = true; if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); do_ldro(s, a->rd, a->pg, addr, a->dtype); } @@ -5148,12 +4995,10 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg)); tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask); tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over); - tcg_temp_free_i64(temp); } else { TCGv_i32 t32 = tcg_temp_new_i32(); find_last_active(s, t32, esz, a->pg); tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over); - tcg_temp_free_i32(t32); } /* Load the data. */ @@ -5167,7 +5012,6 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) /* Broadcast to *all* elements. */ tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, temp); - tcg_temp_free_i64(temp); /* Zero the inactive elements. */ gen_set_label(over); @@ -5307,7 +5151,7 @@ static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) return false; } if (sve_access_check(s)) { - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz); tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); @@ -5326,7 +5170,7 @@ static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> a->esz; - TCGv_i64 addr = new_tmp_a64(s); + TCGv_i64 addr = tcg_temp_new_i64(); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), (a->imm * elements * (a->nreg + 1)) << a->msz); @@ -5363,10 +5207,6 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt)); fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); - - tcg_temp_free_ptr(t_zt); - tcg_temp_free_ptr(t_zm); - tcg_temp_free_ptr(t_pg); } /* Indexed by [mte][be][ff][xs][u][msz]. */ @@ -6301,7 +6141,6 @@ static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) TCGv_vec t = tcg_temp_new_vec_matching(d); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } else { tcg_gen_sari_vec(vece, d, n, halfbits); tcg_gen_shli_vec(vece, d, d, shl); @@ -6359,7 +6198,6 @@ static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) TCGv_vec t = tcg_temp_new_vec_matching(d); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } else { tcg_gen_shri_vec(vece, d, n, halfbits); tcg_gen_shli_vec(vece, d, d, shl); @@ -6369,7 +6207,6 @@ static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) TCGv_vec t = tcg_temp_new_vec_matching(d); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } else { tcg_gen_shli_vec(vece, d, n, halfbits); tcg_gen_shri_vec(vece, d, d, halfbits - shl); @@ -6549,7 +6386,6 @@ static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_smin_vec(vece, d, d, t); tcg_gen_dupi_vec(vece, t, mask); tcg_gen_and_vec(vece, d, d, t); - tcg_temp_free_vec(t); } static const GVecGen2 sqxtnb_ops[3] = { @@ -6583,7 +6419,6 @@ static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_dupi_vec(vece, t, mask); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const GVecGen2 sqxtnt_ops[3] = { @@ -6617,7 +6452,6 @@ static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_dupi_vec(vece, t, max); tcg_gen_umin_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const GVecGen2 uqxtnb_ops[3] = { @@ -6646,7 +6480,6 @@ static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_umin_vec(vece, n, n, t); tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const GVecGen2 uqxtnt_ops[3] = { @@ -6682,7 +6515,6 @@ static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_smax_vec(vece, d, n, t); tcg_gen_dupi_vec(vece, t, max); tcg_gen_umin_vec(vece, d, d, t); - tcg_temp_free_vec(t); } static const GVecGen2 sqxtunb_ops[3] = { @@ -6713,7 +6545,6 @@ static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) tcg_gen_umin_vec(vece, n, n, t); tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const GVecGen2 sqxtunt_ops[3] = { @@ -6784,7 +6615,6 @@ static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) tcg_gen_shri_vec(vece, n, n, shr); tcg_gen_dupi_vec(vece, t, mask); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 }; @@ -6843,7 +6673,6 @@ static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) tcg_gen_shli_vec(vece, n, n, halfbits - shr); tcg_gen_dupi_vec(vece, t, mask); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 }; @@ -6894,7 +6723,6 @@ static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, tcg_gen_smax_vec(vece, n, n, t); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_umin_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const TCGOpcode sqshrunb_vec_list[] = { @@ -6929,7 +6757,6 @@ static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, tcg_gen_umin_vec(vece, n, n, t); tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const TCGOpcode sqshrunt_vec_list[] = { @@ -6984,7 +6811,6 @@ static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, tcg_gen_smin_vec(vece, n, n, t); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_and_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const TCGOpcode sqshrnb_vec_list[] = { @@ -7022,7 +6848,6 @@ static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const TCGOpcode sqshrnt_vec_list[] = { @@ -7071,7 +6896,6 @@ static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, tcg_gen_shri_vec(vece, n, n, shr); tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); tcg_gen_umin_vec(vece, d, n, t); - tcg_temp_free_vec(t); } static const TCGOpcode uqshrnb_vec_list[] = { @@ -7104,7 +6928,6 @@ static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, tcg_gen_umin_vec(vece, n, n, t); tcg_gen_shli_vec(vece, n, n, halfbits); tcg_gen_bitsel_vec(vece, d, t, d, n); - tcg_temp_free_vec(t); } static const TCGOpcode uqshrnt_vec_list[] = { @@ -7440,11 +7263,6 @@ static bool trans_PSEL(DisasContext *s, arg_psel *a) /* Apply to either copy the source, or write zeros. */ tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), pred_full_reg_offset(s, a->pn), tmp, pl, pl); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(dbit); - tcg_temp_free_i64(didx); - tcg_temp_free_ptr(ptr); return true; } diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c index 5c5d58d2c6..757a2bf7d9 100644 --- a/target/arm/tcg/translate-vfp.c +++ b/target/arm/tcg/translate-vfp.c @@ -178,7 +178,6 @@ static void gen_update_fp_context(DisasContext *s) fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]); gen_helper_vfp_set_fpscr(cpu_env, fpscr); - tcg_temp_free_i32(fpscr); if (dc_isar_feature(aa32_mve, s)) { store_cpu_field(tcg_constant_i32(0), v7m.vpr); } @@ -365,24 +364,15 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tmp = tcg_temp_new_i64(); tcg_gen_xor_i64(tmp, vf, nf); tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, frn, frm); - tcg_temp_free_i64(tmp); break; case 3: /* gt: !Z && N == V */ tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero, frn, frm); tmp = tcg_temp_new_i64(); tcg_gen_xor_i64(tmp, vf, nf); tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero, dest, frm); - tcg_temp_free_i64(tmp); break; } vfp_store_reg64(dest, rd); - tcg_temp_free_i64(frn); - tcg_temp_free_i64(frm); - tcg_temp_free_i64(dest); - - tcg_temp_free_i64(zf); - tcg_temp_free_i64(nf); - tcg_temp_free_i64(vf); } else { TCGv_i32 frn, frm, dest; TCGv_i32 tmp, zero; @@ -405,14 +395,12 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, frn, frm); - tcg_temp_free_i32(tmp); break; case 3: /* gt: !Z && N == V */ tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero, frn, frm); tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF); tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero, dest, frm); - tcg_temp_free_i32(tmp); break; } /* For fp16 the top half is always zeroes */ @@ -420,9 +408,6 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) tcg_gen_andi_i32(dest, dest, 0xffff); } vfp_store_reg32(dest, rd); - tcg_temp_free_i32(frn); - tcg_temp_free_i32(frm); - tcg_temp_free_i32(dest); } return true; @@ -490,8 +475,6 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) vfp_load_reg64(tcg_op, rm); gen_helper_rintd(tcg_res, tcg_op, fpst); vfp_store_reg64(tcg_res, rd); - tcg_temp_free_i64(tcg_op); - tcg_temp_free_i64(tcg_res); } else { TCGv_i32 tcg_op; TCGv_i32 tcg_res; @@ -504,14 +487,9 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) gen_helper_rints(tcg_res, tcg_op, fpst); } vfp_store_reg32(tcg_res, rd); - tcg_temp_free_i32(tcg_op); - tcg_temp_free_i32(tcg_res); } gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); - - tcg_temp_free_ptr(fpst); return true; } @@ -573,9 +551,6 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) } tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); vfp_store_reg32(tcg_tmp, rd); - tcg_temp_free_i32(tcg_tmp); - tcg_temp_free_i64(tcg_res); - tcg_temp_free_i64(tcg_double); } else { TCGv_i32 tcg_single, tcg_res; tcg_single = tcg_temp_new_i32(); @@ -595,15 +570,9 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) } } vfp_store_reg32(tcg_res, rd); - tcg_temp_free_i32(tcg_res); - tcg_temp_free_i32(tcg_single); } gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); - tcg_temp_free_i32(tcg_rmode); - - tcg_temp_free_ptr(fpst); - return true; } @@ -729,7 +698,6 @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) if (!mve_skip_vmov(s, a->vn, a->index, a->size)) { tmp = load_reg(s, a->rt); write_neon_element32(tmp, a->vn, a->index, a->size); - tcg_temp_free_i32(tmp); } if (dc_isar_feature(aa32_mve, s)) { @@ -777,8 +745,6 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) tmp = load_reg(s, a->rt); tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn), vec_size, vec_size, tmp); - tcg_temp_free_i32(tmp); - return true; } @@ -883,7 +849,6 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) if (a->rt == 15) { /* Set the 4 flag bits in the CPSR. */ gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); } else { store_reg(s, a->rt, tmp); } @@ -899,7 +864,6 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) case ARM_VFP_FPSCR: tmp = load_reg(s, a->rt); gen_helper_vfp_set_fpscr(cpu_env, tmp); - tcg_temp_free_i32(tmp); gen_lookup_tb(s); break; case ARM_VFP_FPEXC: @@ -954,7 +918,6 @@ static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a) tmp = load_reg(s, a->rt); tcg_gen_andi_i32(tmp, tmp, 0xffff); vfp_store_reg32(tmp, a->vn); - tcg_temp_free_i32(tmp); } return true; @@ -979,7 +942,6 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) if (a->rt == 15) { /* Set the 4 flag bits in the CPSR. */ gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); } else { store_reg(s, a->rt, tmp); } @@ -987,7 +949,6 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a) /* general purpose register to VFP */ tmp = load_reg(s, a->rt); vfp_store_reg32(tmp, a->vn); - tcg_temp_free_i32(tmp); } return true; @@ -1021,10 +982,8 @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a) /* gpreg to fpreg */ tmp = load_reg(s, a->rt); vfp_store_reg32(tmp, a->vm); - tcg_temp_free_i32(tmp); tmp = load_reg(s, a->rt2); vfp_store_reg32(tmp, a->vm + 1); - tcg_temp_free_i32(tmp); } return true; @@ -1064,10 +1023,8 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a) /* gpreg to fpreg */ tmp = load_reg(s, a->rt); vfp_store_reg32(tmp, a->vm * 2); - tcg_temp_free_i32(tmp); tmp = load_reg(s, a->rt2); vfp_store_reg32(tmp, a->vm * 2 + 1); - tcg_temp_free_i32(tmp); } return true; @@ -1102,9 +1059,6 @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a) vfp_load_reg32(tmp, a->vd); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN); } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(addr); - return true; } @@ -1136,9 +1090,6 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) vfp_load_reg32(tmp, a->vd); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); } - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(addr); - return true; } @@ -1177,9 +1128,6 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) vfp_load_reg64(tmp, a->vd); gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4); } - tcg_temp_free_i64(tmp); - tcg_temp_free_i32(addr); - return true; } @@ -1246,7 +1194,6 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) } tcg_gen_addi_i32(addr, addr, offset); } - tcg_temp_free_i32(tmp); if (a->w) { /* writeback */ if (a->p) { @@ -1254,8 +1201,6 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } clear_eci_state(s); @@ -1332,7 +1277,6 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) } tcg_gen_addi_i32(addr, addr, offset); } - tcg_temp_free_i64(tmp); if (a->w) { /* writeback */ if (a->p) { @@ -1347,8 +1291,6 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) tcg_gen_addi_i32(addr, addr, offset); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } clear_eci_state(s); @@ -1485,12 +1427,6 @@ static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn, vfp_load_reg32(f1, vm); } } - - tcg_temp_free_i32(f0); - tcg_temp_free_i32(f1); - tcg_temp_free_i32(fd); - tcg_temp_free_ptr(fpst); - return true; } @@ -1533,12 +1469,6 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn, } fn(fd, f0, f1, fpst); vfp_store_reg32(fd, vd); - - tcg_temp_free_i32(f0); - tcg_temp_free_i32(f1); - tcg_temp_free_i32(fd); - tcg_temp_free_ptr(fpst); - return true; } @@ -1615,12 +1545,6 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, vfp_load_reg64(f1, vm); } } - - tcg_temp_free_i64(f0); - tcg_temp_free_i64(f1); - tcg_temp_free_i64(fd); - tcg_temp_free_ptr(fpst); - return true; } @@ -1688,10 +1612,6 @@ static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) vm = vfp_advance_sreg(vm, delta_m); vfp_load_reg32(f0, vm); } - - tcg_temp_free_i32(f0); - tcg_temp_free_i32(fd); - return true; } @@ -1724,7 +1644,6 @@ static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm) vfp_load_reg32(f0, vm); fn(f0, f0); vfp_store_reg32(f0, vd); - tcg_temp_free_i32(f0); return true; } @@ -1798,10 +1717,6 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) vd = vfp_advance_dreg(vm, delta_m); vfp_load_reg64(f0, vm); } - - tcg_temp_free_i64(f0); - tcg_temp_free_i64(fd); - return true; } @@ -1812,7 +1727,6 @@ static void gen_VMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_mulh(tmp, vn, vm, fpst); gen_helper_vfp_addh(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VMLA_hp(DisasContext *s, arg_VMLA_sp *a) @@ -1827,7 +1741,6 @@ static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_muls(tmp, vn, vm, fpst); gen_helper_vfp_adds(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a) @@ -1842,7 +1755,6 @@ static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) gen_helper_vfp_muld(tmp, vn, vm, fpst); gen_helper_vfp_addd(vd, vd, tmp, fpst); - tcg_temp_free_i64(tmp); } static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a) @@ -1861,7 +1773,6 @@ static void gen_VMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_mulh(tmp, vn, vm, fpst); gen_helper_vfp_negh(tmp, tmp); gen_helper_vfp_addh(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VMLS_hp(DisasContext *s, arg_VMLS_sp *a) @@ -1880,7 +1791,6 @@ static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_muls(tmp, vn, vm, fpst); gen_helper_vfp_negs(tmp, tmp); gen_helper_vfp_adds(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a) @@ -1899,7 +1809,6 @@ static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) gen_helper_vfp_muld(tmp, vn, vm, fpst); gen_helper_vfp_negd(tmp, tmp); gen_helper_vfp_addd(vd, vd, tmp, fpst); - tcg_temp_free_i64(tmp); } static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a) @@ -1920,7 +1829,6 @@ static void gen_VNMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_mulh(tmp, vn, vm, fpst); gen_helper_vfp_negh(vd, vd); gen_helper_vfp_addh(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VNMLS_hp(DisasContext *s, arg_VNMLS_sp *a) @@ -1941,7 +1849,6 @@ static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_muls(tmp, vn, vm, fpst); gen_helper_vfp_negs(vd, vd); gen_helper_vfp_adds(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a) @@ -1962,7 +1869,6 @@ static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) gen_helper_vfp_muld(tmp, vn, vm, fpst); gen_helper_vfp_negd(vd, vd); gen_helper_vfp_addd(vd, vd, tmp, fpst); - tcg_temp_free_i64(tmp); } static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a) @@ -1979,7 +1885,6 @@ static void gen_VNMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_negh(tmp, tmp); gen_helper_vfp_negh(vd, vd); gen_helper_vfp_addh(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VNMLA_hp(DisasContext *s, arg_VNMLA_sp *a) @@ -1996,7 +1901,6 @@ static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst) gen_helper_vfp_negs(tmp, tmp); gen_helper_vfp_negs(vd, vd); gen_helper_vfp_adds(vd, vd, tmp, fpst); - tcg_temp_free_i32(tmp); } static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a) @@ -2013,7 +1917,6 @@ static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst) gen_helper_vfp_negd(tmp, tmp); gen_helper_vfp_negd(vd, vd); gen_helper_vfp_addd(vd, vd, tmp, fpst); - tcg_temp_free_i64(tmp); } static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a) @@ -2225,12 +2128,6 @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst); vfp_store_reg32(vd, a->vd); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(vn); - tcg_temp_free_i32(vm); - tcg_temp_free_i32(vd); - return true; } @@ -2290,12 +2187,6 @@ static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_vfp_muladds(vd, vn, vm, vd, fpst); vfp_store_reg32(vd, a->vd); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(vn); - tcg_temp_free_i32(vm); - tcg_temp_free_i32(vd); - return true; } @@ -2361,12 +2252,6 @@ static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst); vfp_store_reg64(vd, a->vd); - - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(vn); - tcg_temp_free_i64(vm); - tcg_temp_free_i64(vd); - return true; } @@ -2591,10 +2476,6 @@ static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a) } else { gen_helper_vfp_cmph(vd, vm, cpu_env); } - - tcg_temp_free_i32(vd); - tcg_temp_free_i32(vm); - return true; } @@ -2630,10 +2511,6 @@ static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a) } else { gen_helper_vfp_cmps(vd, vm, cpu_env); } - - tcg_temp_free_i32(vd); - tcg_temp_free_i32(vm); - return true; } @@ -2674,10 +2551,6 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) } else { gen_helper_vfp_cmpd(vd, vm, cpu_env); } - - tcg_temp_free_i64(vd); - tcg_temp_free_i64(vm); - return true; } @@ -2702,9 +2575,6 @@ static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a) tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t)); gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2740,10 +2610,6 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) vd = tcg_temp_new_i64(); gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode); vfp_store_reg64(vd, a->vd); - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); - tcg_temp_free_i64(vd); return true; } @@ -2766,8 +2632,6 @@ static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a) vfp_load_reg32(tmp, a->vm); gen_helper_bfcvt(tmp, tmp, fpst); tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2792,9 +2656,6 @@ static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a) vfp_load_reg32(tmp, a->vm); gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode); tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2829,11 +2690,7 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) vfp_load_reg64(vm, a->vm); gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode); - tcg_temp_free_i64(vm); tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t)); - tcg_temp_free_i32(ahp_mode); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2855,8 +2712,6 @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a) fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_rinth(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2878,8 +2733,6 @@ static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rints(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -2910,8 +2763,6 @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rintd(tmp, tmp, fpst); vfp_store_reg64(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tmp); return true; } @@ -2937,9 +2788,6 @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a) gen_helper_rinth(tmp, tmp, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_rmode); - tcg_temp_free_i32(tmp); return true; } @@ -2965,9 +2813,6 @@ static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a) gen_helper_rints(tmp, tmp, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tcg_rmode); - tcg_temp_free_i32(tmp); return true; } @@ -3002,9 +2847,6 @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) gen_helper_rintd(tmp, tmp, fpst); gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); vfp_store_reg64(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tmp); - tcg_temp_free_i32(tcg_rmode); return true; } @@ -3026,8 +2868,6 @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a) fpst = fpstatus_ptr(FPST_FPCR_F16); gen_helper_rinth_exact(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -3049,8 +2889,6 @@ static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rints_exact(tmp, tmp, fpst); vfp_store_reg32(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i32(tmp); return true; } @@ -3081,8 +2919,6 @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) fpst = fpstatus_ptr(FPST_FPCR); gen_helper_rintd_exact(tmp, tmp, fpst); vfp_store_reg64(tmp, a->vd); - tcg_temp_free_ptr(fpst); - tcg_temp_free_i64(tmp); return true; } @@ -3109,8 +2945,6 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) vfp_load_reg32(vm, a->vm); gen_helper_vfp_fcvtds(vd, vm, cpu_env); vfp_store_reg64(vd, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_i64(vd); return true; } @@ -3137,8 +2971,6 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) vfp_load_reg64(vm, a->vm); gen_helper_vfp_fcvtsd(vd, vm, cpu_env); vfp_store_reg32(vd, a->vd); - tcg_temp_free_i32(vd); - tcg_temp_free_i64(vm); return true; } @@ -3166,8 +2998,6 @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a) gen_helper_vfp_uitoh(vm, vm, fpst); } vfp_store_reg32(vm, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3195,8 +3025,6 @@ static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a) gen_helper_vfp_uitos(vm, vm, fpst); } vfp_store_reg32(vm, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3231,9 +3059,6 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) gen_helper_vfp_uitod(vd, vm, fpst); } vfp_store_reg64(vd, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_i64(vd); - tcg_temp_free_ptr(fpst); return true; } @@ -3264,8 +3089,6 @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) vfp_load_reg64(vm, a->vm); gen_helper_vjcvt(vd, vm, cpu_env); vfp_store_reg32(vd, a->vd); - tcg_temp_free_i64(vm); - tcg_temp_free_i32(vd); return true; } @@ -3322,8 +3145,6 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a) } vfp_store_reg32(vd, a->vd); - tcg_temp_free_i32(vd); - tcg_temp_free_ptr(fpst); return true; } @@ -3380,8 +3201,6 @@ static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a) } vfp_store_reg32(vd, a->vd); - tcg_temp_free_i32(vd); - tcg_temp_free_ptr(fpst); return true; } @@ -3444,8 +3263,6 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) } vfp_store_reg64(vd, a->vd); - tcg_temp_free_i64(vd); - tcg_temp_free_ptr(fpst); return true; } @@ -3480,8 +3297,6 @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a) } } vfp_store_reg32(vm, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3516,8 +3331,6 @@ static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a) } } vfp_store_reg32(vm, a->vd); - tcg_temp_free_i32(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3559,9 +3372,6 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) } } vfp_store_reg32(vd, a->vd); - tcg_temp_free_i32(vd); - tcg_temp_free_i64(vm); - tcg_temp_free_ptr(fpst); return true; } @@ -3588,8 +3398,6 @@ static bool trans_VINS(DisasContext *s, arg_VINS *a) vfp_load_reg32(rd, a->vd); tcg_gen_deposit_i32(rd, rd, rm, 16, 16); vfp_store_reg32(rd, a->vd); - tcg_temp_free_i32(rm); - tcg_temp_free_i32(rd); return true; } @@ -3614,6 +3422,5 @@ static bool trans_VMOVX(DisasContext *s, arg_VINS *a) vfp_load_reg32(rm, a->vm); tcg_gen_shri_i32(rm, rm, 16); vfp_store_reg32(rm, a->vd); - tcg_temp_free_i32(rm); return true; } diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index f042069dc2..b70b628000 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -195,7 +195,6 @@ void store_cpu_offset(TCGv_i32 var, int offset, int size) default: g_assert_not_reached(); } - tcg_temp_free_i32(var); } /* Save the syndrome information for a Data Abort */ @@ -325,7 +324,6 @@ void store_reg(DisasContext *s, int reg, TCGv_i32 var) tcg_gen_andi_i32(var, var, ~3); } tcg_gen_mov_i32(cpu_R[reg], var); - tcg_temp_free_i32(var); } /* @@ -420,12 +418,10 @@ static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b) tcg_gen_ext16s_i32(tmp1, a); tcg_gen_ext16s_i32(tmp2, b); tcg_gen_mul_i32(tmp1, tmp1, tmp2); - tcg_temp_free_i32(tmp2); tcg_gen_sari_i32(a, a, 16); tcg_gen_sari_i32(b, b, 16); tcg_gen_mul_i32(b, b, a); tcg_gen_mov_i32(a, tmp1); - tcg_temp_free_i32(tmp1); } /* Byteswap each halfword. */ @@ -438,7 +434,6 @@ void gen_rev16(TCGv_i32 dest, TCGv_i32 var) tcg_gen_and_i32(var, var, mask); tcg_gen_shli_i32(var, var, 8); tcg_gen_or_i32(dest, var, tmp); - tcg_temp_free_i32(tmp); } /* Byteswap low halfword and sign extend. */ @@ -463,7 +458,6 @@ static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tcg_gen_andi_i32(t1, t1, ~0x8000); tcg_gen_add_i32(t0, t0, t1); tcg_gen_xor_i32(dest, t0, tmp); - tcg_temp_free_i32(tmp); } /* Set N and Z flags from var. */ @@ -498,7 +492,6 @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tcg_gen_xor_i32(cpu_VF, cpu_NF, t0); tcg_gen_xor_i32(tmp, t0, t1); tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); - tcg_temp_free_i32(tmp); tcg_gen_mov_i32(dest, cpu_NF); } @@ -519,14 +512,11 @@ static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tcg_gen_extu_i32_i64(q1, cpu_CF); tcg_gen_add_i64(q0, q0, q1); tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0); - tcg_temp_free_i64(q0); - tcg_temp_free_i64(q1); } tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_xor_i32(cpu_VF, cpu_NF, t0); tcg_gen_xor_i32(tmp, t0, t1); tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); - tcg_temp_free_i32(tmp); tcg_gen_mov_i32(dest, cpu_NF); } @@ -541,7 +531,6 @@ static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tmp = tcg_temp_new_i32(); tcg_gen_xor_i32(tmp, t0, t1); tcg_gen_and_i32(cpu_VF, cpu_VF, tmp); - tcg_temp_free_i32(tmp); tcg_gen_mov_i32(dest, cpu_NF); } @@ -551,7 +540,6 @@ static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_not_i32(tmp, t1); gen_adc_CC(dest, t0, tmp); - tcg_temp_free_i32(tmp); } #define GEN_SHIFT(name) \ @@ -564,8 +552,6 @@ static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \ tcg_gen_##name##_i32(tmpd, t0, tmp1); \ tcg_gen_andi_i32(tmp1, t1, 0xe0); \ tcg_gen_movcond_i32(TCG_COND_NE, dest, tmp1, zero, zero, tmpd); \ - tcg_temp_free_i32(tmpd); \ - tcg_temp_free_i32(tmp1); \ } GEN_SHIFT(shl) GEN_SHIFT(shr) @@ -578,7 +564,6 @@ static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) tcg_gen_andi_i32(tmp1, t1, 0xff); tcg_gen_umin_i32(tmp1, tmp1, tcg_constant_i32(31)); tcg_gen_sar_i32(dest, t0, tmp1); - tcg_temp_free_i32(tmp1); } static void shifter_out_im(TCGv_i32 var, int shift) @@ -631,7 +616,6 @@ static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop, shifter_out_im(var, 0); tcg_gen_shri_i32(var, var, 1); tcg_gen_or_i32(var, var, tmp); - tcg_temp_free_i32(tmp); } } }; @@ -661,7 +645,6 @@ static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop, tcg_gen_rotr_i32(var, var, shift); break; } } - tcg_temp_free_i32(shift); } /* @@ -672,7 +655,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) { TCGv_i32 value; TCGCond cond; - bool global = true; switch (cc) { case 0: /* eq: Z */ @@ -703,7 +685,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) case 9: /* ls: !C || Z -> !(C && !Z) */ cond = TCG_COND_NE; value = tcg_temp_new_i32(); - global = false; /* CF is 1 for C, so -CF is an all-bits-set mask for C; ZF is non-zero for !Z; so AND the two subexpressions. */ tcg_gen_neg_i32(value, cpu_CF); @@ -715,7 +696,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) /* Since we're only interested in the sign bit, == 0 is >= 0. */ cond = TCG_COND_GE; value = tcg_temp_new_i32(); - global = false; tcg_gen_xor_i32(value, cpu_VF, cpu_NF); break; @@ -723,7 +703,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) case 13: /* le: Z || N != V */ cond = TCG_COND_NE; value = tcg_temp_new_i32(); - global = false; /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate * the sign bit then AND with ZF to yield the result. */ tcg_gen_xor_i32(value, cpu_VF, cpu_NF); @@ -751,14 +730,6 @@ void arm_test_cc(DisasCompare *cmp, int cc) no_invert: cmp->cond = cond; cmp->value = value; - cmp->value_global = global; -} - -void arm_free_cc(DisasCompare *cmp) -{ - if (!cmp->value_global) { - tcg_temp_free_i32(cmp->value); - } } void arm_jump_cc(DisasCompare *cmp, TCGLabel *label) @@ -771,7 +742,6 @@ void arm_gen_test_cc(int cc, TCGLabel *label) DisasCompare cmp; arm_test_cc(&cmp, cc); arm_jump_cc(&cmp, label); - arm_free_cc(&cmp); } void gen_set_condexec(DisasContext *s) @@ -888,7 +858,6 @@ static inline void gen_bxns(DisasContext *s, int rm) * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise. */ gen_helper_v7m_bxns(cpu_env, var); - tcg_temp_free_i32(var); s->base.is_jmp = DISAS_EXIT; } @@ -902,7 +871,6 @@ static inline void gen_blxns(DisasContext *s, int rm) */ gen_update_pc(s, curr_insn_len(s)); gen_helper_v7m_blxns(cpu_env, var); - tcg_temp_free_i32(var); s->base.is_jmp = DISAS_EXIT; } @@ -982,7 +950,6 @@ void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val, { TCGv addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_ld_i32(val, addr, index, opc); - tcg_temp_free(addr); } void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, @@ -990,7 +957,6 @@ void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val, { TCGv addr = gen_aa32_addr(s, a32, opc); tcg_gen_qemu_st_i32(val, addr, index, opc); - tcg_temp_free(addr); } void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, @@ -1004,7 +970,6 @@ void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val, if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { tcg_gen_rotri_i64(val, val, 32); } - tcg_temp_free(addr); } void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, @@ -1017,11 +982,9 @@ void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val, TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_rotri_i64(tmp, val, 32); tcg_gen_qemu_st_i64(tmp, addr, index, opc); - tcg_temp_free_i64(tmp); } else { tcg_gen_qemu_st_i64(val, addr, index, opc); } - tcg_temp_free(addr); } void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32, @@ -1333,7 +1296,6 @@ static inline TCGv_i32 iwmmxt_load_creg(int reg) static inline void iwmmxt_store_creg(int reg, TCGv_i32 var) { tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); - tcg_temp_free_i32(var); } static inline void gen_op_iwmmxt_movq_wRn_M0(int rn) @@ -1492,10 +1454,9 @@ static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, else tcg_gen_addi_i32(tmp, tmp, -offset); tcg_gen_mov_i32(dest, tmp); - if (insn & (1 << 21)) + if (insn & (1 << 21)) { store_reg(s, rd, tmp); - else - tcg_temp_free_i32(tmp); + } } else if (insn & (1 << 21)) { /* Post indexed */ tcg_gen_mov_i32(dest, tmp); @@ -1527,7 +1488,6 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest) } tcg_gen_andi_i32(tmp, tmp, mask); tcg_gen_mov_i32(dest, tmp); - tcg_temp_free_i32(tmp); return 0; } @@ -1560,7 +1520,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) wrd = (insn >> 12) & 0xf; addr = tcg_temp_new_i32(); if (gen_iwmmxt_address(s, insn, addr)) { - tcg_temp_free_i32(addr); return 1; } if (insn & ARM_CP_RW_BIT) { @@ -1588,7 +1547,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) } if (i) { tcg_gen_extu_i32_i64(cpu_M0, tmp); - tcg_temp_free_i32(tmp); } gen_op_iwmmxt_movq_wRn_M0(wrd); } @@ -1616,9 +1574,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) } } } - tcg_temp_free_i32(tmp); } - tcg_temp_free_i32(addr); return 0; } @@ -1653,7 +1609,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) tmp = iwmmxt_load_creg(wrd); tmp2 = load_reg(s, rd); tcg_gen_andc_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); iwmmxt_store_creg(wrd, tmp); break; case ARM_IWMMXT_wCGR0: @@ -1866,7 +1821,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) tcg_gen_andi_i32(tmp, tmp, 7); iwmmxt_load_reg(cpu_V1, rd1); gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -1894,7 +1848,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) g_assert_not_reached(); } gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -1948,7 +1901,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) } tcg_gen_shli_i32(tmp, tmp, 28); gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); break; case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ if (((insn >> 6) & 3) == 3) @@ -1967,7 +1919,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_bcstl(cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -1996,8 +1947,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) break; } gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); break; case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ wrd = (insn >> 12) & 0xf; @@ -2044,8 +1993,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) break; } gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); break; case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ rd = (insn >> 12) & 0xf; @@ -2170,7 +2117,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = tcg_temp_new_i32(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - tcg_temp_free_i32(tmp); return 1; } switch ((insn >> 22) & 3) { @@ -2184,7 +2130,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2198,7 +2143,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = tcg_temp_new_i32(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - tcg_temp_free_i32(tmp); return 1; } switch ((insn >> 22) & 3) { @@ -2212,7 +2156,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2226,7 +2169,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = tcg_temp_new_i32(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - tcg_temp_free_i32(tmp); return 1; } switch ((insn >> 22) & 3) { @@ -2240,7 +2182,6 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2256,27 +2197,23 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) switch ((insn >> 22) & 3) { case 1: if (gen_iwmmxt_shift(insn, 0xf, tmp)) { - tcg_temp_free_i32(tmp); return 1; } gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp); break; case 2: if (gen_iwmmxt_shift(insn, 0x1f, tmp)) { - tcg_temp_free_i32(tmp); return 1; } gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp); break; case 3: if (gen_iwmmxt_shift(insn, 0x3f, tmp)) { - tcg_temp_free_i32(tmp); return 1; } gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp); break; } - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); @@ -2515,12 +2452,8 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); break; default: - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); return 1; } - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; @@ -2569,8 +2502,6 @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn) default: return 1; } - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); gen_op_iwmmxt_movq_wRn_M0(acc); return 0; @@ -2747,7 +2678,6 @@ static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0) } else { gen_set_cpsr(t0, mask); } - tcg_temp_free_i32(t0); gen_lookup_tb(s); return 0; } @@ -2895,7 +2825,6 @@ static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn, gen_exception_insn_el_v(s, 0, EXCP_UDEF, syn_uncategorized(), tcg_el); - tcg_temp_free_i32(tcg_el); return false; } break; @@ -2939,7 +2868,6 @@ static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn) gen_helper_msr_banked(cpu_env, tcg_reg, tcg_constant_i32(tgtmode), tcg_constant_i32(regno)); - tcg_temp_free_i32(tcg_reg); s->base.is_jmp = DISAS_UPDATE_EXIT; } @@ -2970,7 +2898,6 @@ static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn) static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc) { tcg_gen_mov_i32(cpu_R[15], pc); - tcg_temp_free_i32(pc); } /* Generate a v6 exception return. Marks both values as dead. */ @@ -2985,7 +2912,6 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) gen_io_start(); } gen_helper_cpsr_write_eret(cpu_env, cpsr); - tcg_temp_free_i32(cpsr); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; } @@ -3005,7 +2931,6 @@ static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc)); tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr, opr_sz, max_sz, 0, fn); - tcg_temp_free_ptr(qc_ptr); } void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -3251,7 +3176,6 @@ static void gen_srshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); tcg_gen_vec_sar8i_i64(d, a, sh); tcg_gen_vec_add8_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3262,7 +3186,6 @@ static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); tcg_gen_vec_sar16i_i64(d, a, sh); tcg_gen_vec_add16_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) @@ -3278,7 +3201,6 @@ static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) tcg_gen_extract_i32(t, a, sh - 1, 1); tcg_gen_sari_i32(d, a, sh); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3288,7 +3210,6 @@ static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_extract_i64(t, a, sh - 1, 1); tcg_gen_sari_i64(d, a, sh); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) @@ -3301,9 +3222,6 @@ static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) tcg_gen_and_vec(vece, t, t, ones); tcg_gen_sari_vec(vece, d, a, sh); tcg_gen_add_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(ones); } void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3359,7 +3277,6 @@ static void gen_srsra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_srshr8_i64(t, a, sh); tcg_gen_vec_add8_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3368,7 +3285,6 @@ static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_srshr16_i64(t, a, sh); tcg_gen_vec_add16_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) @@ -3377,7 +3293,6 @@ static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) gen_srshr32_i32(t, a, sh); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3386,7 +3301,6 @@ static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_srshr64_i64(t, a, sh); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) @@ -3395,7 +3309,6 @@ static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) gen_srshr_vec(vece, t, a, sh); tcg_gen_add_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3458,7 +3371,6 @@ static void gen_urshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_andi_i64(t, t, dup_const(MO_8, 1)); tcg_gen_vec_shr8i_i64(d, a, sh); tcg_gen_vec_add8_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3469,7 +3381,6 @@ static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_andi_i64(t, t, dup_const(MO_16, 1)); tcg_gen_vec_shr16i_i64(d, a, sh); tcg_gen_vec_add16_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) @@ -3485,7 +3396,6 @@ static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) tcg_gen_extract_i32(t, a, sh - 1, 1); tcg_gen_shri_i32(d, a, sh); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3495,7 +3405,6 @@ static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) tcg_gen_extract_i64(t, a, sh - 1, 1); tcg_gen_shri_i64(d, a, sh); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift) @@ -3508,9 +3417,6 @@ static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift) tcg_gen_and_vec(vece, t, t, ones); tcg_gen_shri_vec(vece, d, a, shift); tcg_gen_add_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(ones); } void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3569,7 +3475,6 @@ static void gen_ursra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_urshr8_i64(t, a, sh); } tcg_gen_vec_add8_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3582,7 +3487,6 @@ static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_urshr16_i64(t, a, sh); } tcg_gen_vec_add16_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) @@ -3595,7 +3499,6 @@ static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh) gen_urshr32_i32(t, a, sh); } tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) @@ -3608,7 +3511,6 @@ static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh) gen_urshr64_i64(t, a, sh); } tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) @@ -3621,7 +3523,6 @@ static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) gen_urshr_vec(vece, t, a, sh); } tcg_gen_add_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3674,7 +3575,6 @@ static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) tcg_gen_andi_i64(t, t, mask); tcg_gen_andi_i64(d, d, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) @@ -3686,7 +3586,6 @@ static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) tcg_gen_andi_i64(t, t, mask); tcg_gen_andi_i64(d, d, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) @@ -3710,9 +3609,6 @@ static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) tcg_gen_shri_vec(vece, t, a, sh); tcg_gen_and_vec(vece, d, d, m); tcg_gen_or_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(m); } void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -3769,7 +3665,6 @@ static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) tcg_gen_andi_i64(t, t, mask); tcg_gen_andi_i64(d, d, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) @@ -3781,7 +3676,6 @@ static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) tcg_gen_andi_i64(t, t, mask); tcg_gen_andi_i64(d, d, ~mask); tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) @@ -3803,9 +3697,6 @@ static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh)); tcg_gen_and_vec(vece, d, d, m); tcg_gen_or_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(m); } void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, @@ -4046,11 +3937,6 @@ void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) tcg_gen_shr_i32(rval, src, rsh); tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero); tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst); - - tcg_temp_free_i32(lval); - tcg_temp_free_i32(rval); - tcg_temp_free_i32(lsh); - tcg_temp_free_i32(rsh); } void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) @@ -4073,11 +3959,6 @@ void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) tcg_gen_shr_i64(rval, src, rsh); tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero); tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst); - - tcg_temp_free_i64(lval); - tcg_temp_free_i64(rval); - tcg_temp_free_i64(lsh); - tcg_temp_free_i64(rsh); } static void gen_ushl_vec(unsigned vece, TCGv_vec dst, @@ -4097,7 +3978,6 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst, tcg_gen_dupi_vec(vece, msk, 0xff); tcg_gen_and_vec(vece, lsh, shift, msk); tcg_gen_and_vec(vece, rsh, rsh, msk); - tcg_temp_free_vec(msk); } /* @@ -4130,12 +4010,6 @@ static void gen_ushl_vec(unsigned vece, TCGv_vec dst, tcg_gen_and_vec(vece, rval, rval, rsh); } tcg_gen_or_vec(vece, dst, lval, rval); - - tcg_temp_free_vec(max); - tcg_temp_free_vec(lval); - tcg_temp_free_vec(rval); - tcg_temp_free_vec(lsh); - tcg_temp_free_vec(rsh); } void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4187,11 +4061,6 @@ void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) tcg_gen_sar_i32(rval, src, rsh); tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero); tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval); - - tcg_temp_free_i32(lval); - tcg_temp_free_i32(rval); - tcg_temp_free_i32(lsh); - tcg_temp_free_i32(rsh); } void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) @@ -4215,11 +4084,6 @@ void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) tcg_gen_sar_i64(rval, src, rsh); tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero); tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval); - - tcg_temp_free_i64(lval); - tcg_temp_free_i64(rval); - tcg_temp_free_i64(lsh); - tcg_temp_free_i64(rsh); } static void gen_sshl_vec(unsigned vece, TCGv_vec dst, @@ -4264,12 +4128,6 @@ static void gen_sshl_vec(unsigned vece, TCGv_vec dst, tcg_gen_dupi_vec(vece, tmp, 0x80); tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval); } - - tcg_temp_free_vec(lval); - tcg_temp_free_vec(rval); - tcg_temp_free_vec(lsh); - tcg_temp_free_vec(rsh); - tcg_temp_free_vec(tmp); } void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4308,7 +4166,6 @@ static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, tcg_gen_usadd_vec(vece, t, a, b); tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(vece, sat, sat, x); - tcg_temp_free_vec(x); } void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4351,7 +4208,6 @@ static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, tcg_gen_ssadd_vec(vece, t, a, b); tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(vece, sat, sat, x); - tcg_temp_free_vec(x); } void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4394,7 +4250,6 @@ static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, tcg_gen_ussub_vec(vece, t, a, b); tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(vece, sat, sat, x); - tcg_temp_free_vec(x); } void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4437,7 +4292,6 @@ static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, tcg_gen_sssub_vec(vece, t, a, b); tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t); tcg_gen_or_vec(vece, sat, sat, x); - tcg_temp_free_vec(x); } void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4479,7 +4333,6 @@ static void gen_sabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) tcg_gen_sub_i32(t, a, b); tcg_gen_sub_i32(d, b, a); tcg_gen_movcond_i32(TCG_COND_LT, d, a, b, d, t); - tcg_temp_free_i32(t); } static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -4489,7 +4342,6 @@ static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) tcg_gen_sub_i64(t, a, b); tcg_gen_sub_i64(d, b, a); tcg_gen_movcond_i64(TCG_COND_LT, d, a, b, d, t); - tcg_temp_free_i64(t); } static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) @@ -4499,7 +4351,6 @@ static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) tcg_gen_smin_vec(vece, t, a, b); tcg_gen_smax_vec(vece, d, a, b); tcg_gen_sub_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4539,7 +4390,6 @@ static void gen_uabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) tcg_gen_sub_i32(t, a, b); tcg_gen_sub_i32(d, b, a); tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, d, t); - tcg_temp_free_i32(t); } static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -4549,7 +4399,6 @@ static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) tcg_gen_sub_i64(t, a, b); tcg_gen_sub_i64(d, b, a); tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, d, t); - tcg_temp_free_i64(t); } static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) @@ -4559,7 +4408,6 @@ static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) tcg_gen_umin_vec(vece, t, a, b); tcg_gen_umax_vec(vece, d, a, b); tcg_gen_sub_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4597,7 +4445,6 @@ static void gen_saba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) TCGv_i32 t = tcg_temp_new_i32(); gen_sabd_i32(t, a, b); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -4605,7 +4452,6 @@ static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) TCGv_i64 t = tcg_temp_new_i64(); gen_sabd_i64(t, a, b); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) @@ -4613,7 +4459,6 @@ static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) TCGv_vec t = tcg_temp_new_vec_matching(d); gen_sabd_vec(vece, t, a, b); tcg_gen_add_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4656,7 +4501,6 @@ static void gen_uaba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) TCGv_i32 t = tcg_temp_new_i32(); gen_uabd_i32(t, a, b); tcg_gen_add_i32(d, d, t); - tcg_temp_free_i32(t); } static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) @@ -4664,7 +4508,6 @@ static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) TCGv_i64 t = tcg_temp_new_i64(); gen_uabd_i64(t, a, b); tcg_gen_add_i64(d, d, t); - tcg_temp_free_i64(t); } static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) @@ -4672,7 +4515,6 @@ static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) TCGv_vec t = tcg_temp_new_vec_matching(d); gen_uabd_vec(vece, t, a, b); tcg_gen_add_vec(vece, d, d, t); - tcg_temp_free_vec(t); } void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, @@ -4779,7 +4621,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2)); tcg_gen_andi_i32(t, t, 1u << maskbit); tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label); - tcg_temp_free_i32(t); gen_exception_insn(s, 0, EXCP_UDEF, syndrome); set_disas_label(s, over); @@ -4844,7 +4685,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, case 0: break; case ARM_CP_NOP: - goto exit; + return; case ARM_CP_WFI: if (isread) { unallocated_encoding(s); @@ -4852,7 +4693,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, gen_update_pc(s, curr_insn_len(s)); s->base.is_jmp = DISAS_WFI; } - goto exit; + return; default: g_assert_not_reached(); } @@ -4883,7 +4724,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, store_reg(s, rt, tmp); tmp = tcg_temp_new_i32(); tcg_gen_extrh_i64_i32(tmp, tmp64); - tcg_temp_free_i64(tmp64); store_reg(s, rt2, tmp); } else { TCGv_i32 tmp; @@ -4903,7 +4743,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, * the condition codes from the high 4 bits of the value */ gen_set_nzcv(tmp); - tcg_temp_free_i32(tmp); } else { store_reg(s, rt, tmp); } @@ -4912,7 +4751,7 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, /* Write */ if (ri->type & ARM_CP_CONST) { /* If not forbidden by access permissions, treat as WI */ - goto exit; + return; } if (is64) { @@ -4921,8 +4760,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, tmplo = load_reg(s, rt); tmphi = load_reg(s, rt2); tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi); - tcg_temp_free_i32(tmplo); - tcg_temp_free_i32(tmphi); if (ri->writefn) { if (!tcg_ri) { tcg_ri = gen_lookup_cp_reg(key); @@ -4931,7 +4768,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, } else { tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset); } - tcg_temp_free_i64(tmp64); } else { TCGv_i32 tmp = load_reg(s, rt); if (ri->writefn) { @@ -4939,7 +4775,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, tcg_ri = gen_lookup_cp_reg(key); } gen_helper_set_cp_reg(cpu_env, tcg_ri, tmp); - tcg_temp_free_i32(tmp); } else { store_cpu_offset(tmp, ri->fieldoffset, 4); } @@ -4966,11 +4801,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, if (need_exit_tb) { gen_lookup_tb(s); } - - exit: - if (tcg_ri) { - tcg_temp_free_ptr(tcg_ri); - } } /* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */ @@ -5015,10 +4845,7 @@ static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) tmph = load_reg(s, rhigh); tmp = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(tmp, tmpl, tmph); - tcg_temp_free_i32(tmpl); - tcg_temp_free_i32(tmph); tcg_gen_add_i64(val, val, tmp); - tcg_temp_free_i64(tmp); } /* Set N and Z flags from hi|lo. */ @@ -5057,15 +4884,12 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv taddr = gen_aa32_addr(s, addr, opc); tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc); - tcg_temp_free(taddr); tcg_gen_mov_i64(cpu_exclusive_val, t64); if (s->be_data == MO_BE) { tcg_gen_extr_i64_i32(tmp2, tmp, t64); } else { tcg_gen_extr_i64_i32(tmp, tmp2, t64); } - tcg_temp_free_i64(t64); - store_reg(s, rt2, tmp2); } else { gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc); @@ -5102,7 +4926,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, extaddr = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(extaddr, addr); tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label); - tcg_temp_free_i64(extaddr); taddr = gen_aa32_addr(s, addr, opc); t0 = tcg_temp_new_i32(); @@ -5127,27 +4950,19 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, } else { tcg_gen_concat_i32_i64(n64, t1, t2); } - tcg_temp_free_i32(t2); tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64, get_mem_index(s), opc); - tcg_temp_free_i64(n64); tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val); tcg_gen_extrl_i64_i32(t0, o64); - - tcg_temp_free_i64(o64); } else { t2 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val); tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc); tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2); - tcg_temp_free_i32(t2); } - tcg_temp_free_i32(t1); - tcg_temp_free(taddr); tcg_gen_mov_i32(cpu_R[rd], t0); - tcg_temp_free_i32(t0); tcg_gen_br(done_label); gen_set_label(fail_label); @@ -5249,11 +5064,9 @@ static void gen_srs(DisasContext *s, tcg_gen_addi_i32(addr, addr, offset); tmp = load_reg(s, 14); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); tmp = load_cpu_field(spsr); tcg_gen_addi_i32(addr, addr, 4); gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); if (writeback) { switch (amode) { case 0: @@ -5274,7 +5087,6 @@ static void gen_srs(DisasContext *s, tcg_gen_addi_i32(addr, addr, offset); gen_helper_set_r13_banked(cpu_env, tcg_constant_i32(mode), addr); } - tcg_temp_free_i32(addr); s->base.is_jmp = DISAS_UPDATE_EXIT; } @@ -5466,7 +5278,6 @@ static bool store_reg_kind(DisasContext *s, int rd, { switch (kind) { case STREG_NONE: - tcg_temp_free_i32(val); return true; case STREG_NORMAL: /* See ALUWritePC: Interworking only from a32 mode. */ @@ -5503,7 +5314,6 @@ static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a, tmp1 = load_reg(s, a->rn); gen(tmp1, tmp1, tmp2); - tcg_temp_free_i32(tmp2); if (logic_cc) { gen_logic_CC(tmp1); @@ -5545,7 +5355,6 @@ static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a, tmp1 = load_reg(s, a->rn); gen(tmp1, tmp1, tmp2); - tcg_temp_free_i32(tmp2); if (logic_cc) { gen_logic_CC(tmp1); @@ -5804,7 +5613,6 @@ static bool do_mve_shl_ri(DisasContext *s, arg_mve_shl_ri *a, tcg_gen_extrh_i64_i32(rdahi, rda); store_reg(s, a->rdalo, rdalo); store_reg(s, a->rdahi, rdahi); - tcg_temp_free_i64(rda); return true; } @@ -5888,7 +5696,6 @@ static bool do_mve_shl_rr(DisasContext *s, arg_mve_shl_rr *a, WideShiftFn *fn) tcg_gen_extrh_i64_i32(rdahi, rda); store_reg(s, a->rdalo, rdalo); store_reg(s, a->rdahi, rdahi); - tcg_temp_free_i64(rda); return true; } @@ -6016,11 +5823,9 @@ static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add) t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); tcg_gen_mul_i32(t1, t1, t2); - tcg_temp_free_i32(t2); if (add) { t2 = load_reg(s, a->ra); tcg_gen_add_i32(t1, t1, t2); - tcg_temp_free_i32(t2); } if (a->s) { gen_logic_CC(t1); @@ -6049,10 +5854,8 @@ static bool trans_MLS(DisasContext *s, arg_MLS *a) t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); tcg_gen_mul_i32(t1, t1, t2); - tcg_temp_free_i32(t2); t2 = load_reg(s, a->ra); tcg_gen_sub_i32(t1, t2, t1); - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -6072,8 +5875,6 @@ static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add) t2 = load_reg(s, a->ra); t3 = load_reg(s, a->rd); tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } if (a->s) { gen_logicq_cc(t0, t1); @@ -6119,10 +5920,8 @@ static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a) zero = tcg_constant_i32(0); t2 = load_reg(s, a->ra); tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero); - tcg_temp_free_i32(t2); t2 = load_reg(s, a->rd); tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero); - tcg_temp_free_i32(t2); store_reg(s, a->ra, t0); store_reg(s, a->rd, t1); return true; @@ -6152,7 +5951,6 @@ static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub) } else { gen_helper_sub_saturate(t0, cpu_env, t0, t1); } - tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); return true; } @@ -6188,7 +5986,6 @@ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, t0 = load_reg(s, a->rn); t1 = load_reg(s, a->rm); gen_mulxy(t0, t1, nt, mt); - tcg_temp_free_i32(t1); switch (add_long) { case 0: @@ -6197,7 +5994,6 @@ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, case 1: t1 = load_reg(s, a->ra); gen_helper_add_setq(t0, cpu_env, t0, t1); - tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); break; case 2: @@ -6207,8 +6003,6 @@ static bool op_smlaxxx(DisasContext *s, arg_rrrr *a, t1 = tcg_temp_new_i32(); tcg_gen_sari_i32(t1, t0, 31); tcg_gen_add2_i32(tl, th, tl, th, t0, t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); store_reg(s, a->ra, tl); store_reg(s, a->rd, th); break; @@ -6261,11 +6055,9 @@ static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt) tcg_gen_shli_i32(t1, t1, 16); } tcg_gen_muls2_i32(t0, t1, t0, t1); - tcg_temp_free_i32(t0); if (add) { t0 = load_reg(s, a->ra); gen_helper_add_setq(t1, cpu_env, t1, t0); - tcg_temp_free_i32(t0); } store_reg(s, a->rd, t1); return true; @@ -6398,7 +6190,6 @@ static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz) } else { gen_helper_crc32(t1, t1, t2, t3); } - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -6497,7 +6288,6 @@ static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a) addr = tcg_constant_i32((a->mask << 10) | a->sysm); reg = load_reg(s, a->rn); gen_helper_v7m_msr(cpu_env, addr, reg); - tcg_temp_free_i32(reg); /* If we wrote to CONTROL, the EL might have changed */ gen_rebuild_hflags(s, true); gen_lookup_tb(s); @@ -6707,7 +6497,6 @@ static bool trans_TT(DisasContext *s, arg_TT *a) addr = load_reg(s, a->rn); tmp = tcg_temp_new_i32(); gen_helper_v7m_tt(tmp, cpu_env, addr, tcg_constant_i32((a->A << 1) | a->T)); - tcg_temp_free_i32(addr); store_reg(s, a->rd, tmp); return true; } @@ -6748,7 +6537,6 @@ static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a) } else { tcg_gen_sub_i32(addr, addr, ofs); } - tcg_temp_free_i32(ofs); } return addr; } @@ -6764,9 +6552,7 @@ static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a, } else { tcg_gen_sub_i32(addr, addr, ofs); } - tcg_temp_free_i32(ofs); } else if (!a->w) { - tcg_temp_free_i32(addr); return; } tcg_gen_addi_i32(addr, addr, address_offset); @@ -6813,7 +6599,6 @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a, tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, mop); disas_set_da_iss(s, mop, issinfo); - tcg_temp_free_i32(tmp); op_addr_rr_post(s, a, addr, 0); return true; @@ -6864,13 +6649,11 @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a) tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = load_reg(s, a->rt + 1); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); op_addr_rr_post(s, a, addr, -4); return true; @@ -6899,7 +6682,6 @@ static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a) TCGv_i32 newsp = tcg_temp_new_i32(); tcg_gen_addi_i32(newsp, cpu_R[13], ofs); gen_helper_v8m_stackcheck(cpu_env, newsp); - tcg_temp_free_i32(newsp); } else { gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]); } @@ -6918,7 +6700,6 @@ static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a, address_offset -= a->imm; } } else if (!a->w) { - tcg_temp_free_i32(addr); return; } tcg_gen_addi_i32(addr, addr, address_offset); @@ -6965,7 +6746,6 @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a, tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, mop); disas_set_da_iss(s, mop, issinfo); - tcg_temp_free_i32(tmp); op_addr_ri_post(s, a, addr, 0); return true; @@ -7019,13 +6799,11 @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2) tmp = load_reg(s, a->rt); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = load_reg(s, rt2); gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); op_addr_ri_post(s, a, addr, -4); return true; @@ -7090,11 +6868,9 @@ static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc) opc |= s->be_data; addr = load_reg(s, a->rn); taddr = gen_aa32_addr(s, addr, opc); - tcg_temp_free_i32(addr); tmp = load_reg(s, a->rt2); tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc); - tcg_temp_free(taddr); store_reg(s, a->rt, tmp); return true; @@ -7141,7 +6917,6 @@ static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel) tcg_gen_addi_i32(addr, addr, a->imm); gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop); - tcg_temp_free_i32(addr); return true; } @@ -7253,8 +7028,6 @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop) gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN); disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite); - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(addr); return true; } @@ -7294,7 +7067,6 @@ static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq) tcg_gen_addi_i32(addr, addr, a->imm); gen_load_exclusive(s, a->rt, a->rt2, addr, mop); - tcg_temp_free_i32(addr); if (acq) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); @@ -7408,7 +7180,6 @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop) tmp = tcg_temp_new_i32(); gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | MO_ALIGN); disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel); - tcg_temp_free_i32(addr); store_reg(s, a->rt, tmp); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); @@ -7445,11 +7216,9 @@ static bool trans_USADA8(DisasContext *s, arg_USADA8 *a) t1 = load_reg(s, a->rn); t2 = load_reg(s, a->rm); gen_helper_usad8(t1, t1, t2); - tcg_temp_free_i32(t2); if (a->ra != 15) { t2 = load_reg(s, a->ra); tcg_gen_add_i32(t1, t1, t2); - tcg_temp_free_i32(t2); } store_reg(s, a->rd, t1); return true; @@ -7516,7 +7285,6 @@ static bool trans_BFCI(DisasContext *s, arg_BFCI *a) if (width != 32) { TCGv_i32 tmp2 = load_reg(s, a->rd); tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width); - tcg_temp_free_i32(tmp2); } store_reg(s, a->rd, tmp); return true; @@ -7548,7 +7316,6 @@ static bool op_par_addsub(DisasContext *s, arg_rrr *a, gen(t0, t0, t1); - tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); return true; } @@ -7573,8 +7340,6 @@ static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a, tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE)); gen(t0, t0, t1, ge); - tcg_temp_free_ptr(ge); - tcg_temp_free_i32(t1); store_reg(s, a->rd, t0); return true; } @@ -7665,7 +7430,6 @@ static bool trans_PKH(DisasContext *s, arg_PKH *a) tcg_gen_shli_i32(tm, tm, shift); tcg_gen_deposit_i32(tn, tm, tn, 0, 16); } - tcg_temp_free_i32(tm); store_reg(s, a->rd, tn); return true; } @@ -7740,7 +7504,6 @@ static bool op_xta(DisasContext *s, arg_rrr_rot *a, if (a->rn != 15) { TCGv_i32 tmp2 = load_reg(s, a->rn); gen_add(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); } store_reg(s, a->rd, tmp); return true; @@ -7797,8 +7560,6 @@ static bool trans_SEL(DisasContext *s, arg_rrr *a) t3 = tcg_temp_new_i32(); tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE)); gen_helper_sel_flags(t1, t3, t1, t2); - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -7872,17 +7633,14 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) * addition of Ra. */ tcg_gen_sub_i32(t1, t1, t2); - tcg_temp_free_i32(t2); if (a->ra != 15) { t2 = load_reg(s, a->ra); gen_helper_add_setq(t1, cpu_env, t1, t2); - tcg_temp_free_i32(t2); } } else if (a->ra == 15) { /* Single saturation-checking addition */ gen_helper_add_setq(t1, cpu_env, t1, t2); - tcg_temp_free_i32(t2); } else { /* * We need to add the products and Ra together and then @@ -7902,10 +7660,8 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) load_reg_var(s, t2, a->ra); tcg_gen_ext_i32_i64(q64, t2); tcg_gen_add_i64(p64, p64, q64); - tcg_temp_free_i64(q64); tcg_gen_extr_i64_i32(t1, t2, p64); - tcg_temp_free_i64(p64); /* * t1 is the low half of the result which goes into Rd. * We have overflow and must set Q if the high half (t2) @@ -7917,8 +7673,6 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) one = tcg_constant_i32(1); tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf); store_cpu_field(qf, QF); - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); } store_reg(s, a->rd, t1); return true; @@ -7964,19 +7718,15 @@ static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) l2 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(l1, t1); tcg_gen_ext_i32_i64(l2, t2); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); if (sub) { tcg_gen_sub_i64(l1, l1, l2); } else { tcg_gen_add_i64(l1, l1, l2); } - tcg_temp_free_i64(l2); gen_addq(s, l1, a->ra, a->rd); gen_storeq_reg(s, a->ra, a->rd, l1); - tcg_temp_free_i64(l1); return true; } @@ -8026,7 +7776,6 @@ static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub) } else { tcg_gen_add_i32(t1, t1, t3); } - tcg_temp_free_i32(t3); } if (round) { /* @@ -8036,7 +7785,6 @@ static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub) tcg_gen_shri_i32(t2, t2, 31); tcg_gen_add_i32(t1, t1, t2); } - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -8078,7 +7826,6 @@ static bool op_div(DisasContext *s, arg_rrr *a, bool u) } else { gen_helper_sdiv(t1, cpu_env, t1, t2); } - tcg_temp_free_i32(t2); store_reg(s, a->rd, t1); return true; } @@ -8150,8 +7897,6 @@ static void op_addr_block_post(DisasContext *s, arg_ldst_block *a, tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); } store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } } @@ -8194,7 +7939,6 @@ static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n) tmp = load_reg(s, i); } gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); - tcg_temp_free_i32(tmp); /* No need to add after the last transfer. */ if (++j != n) { @@ -8274,7 +8018,6 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | MO_ALIGN); if (user) { gen_helper_set_user_reg(cpu_env, tcg_constant_i32(i), tmp); - tcg_temp_free_i32(tmp); } else if (i == a->rn) { loaded_var = tmp; loaded_base = true; @@ -8304,7 +8047,6 @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n) gen_io_start(); } gen_helper_cpsr_write_eret(cpu_env, tmp); - tcg_temp_free_i32(tmp); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; } @@ -8670,7 +8412,6 @@ static bool trans_LE(DisasContext *s, arg_LE *a) DisasLabel skipexc = gen_disas_label(s); tmp = load_cpu_field(v7m.ltpsize); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label); - tcg_temp_free_i32(tmp); gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized()); set_disas_label(s, skipexc); } @@ -8700,12 +8441,10 @@ static bool trans_LE(DisasContext *s, arg_LE *a) TCGv_i32 ltpsize = load_cpu_field(v7m.ltpsize); tcg_gen_sub_i32(decr, tcg_constant_i32(4), ltpsize); tcg_gen_shl_i32(decr, tcg_constant_i32(1), decr); - tcg_temp_free_i32(ltpsize); tcg_gen_brcond_i32(TCG_COND_LEU, cpu_R[14], decr, loopend.label); tcg_gen_sub_i32(cpu_R[14], cpu_R[14], decr); - tcg_temp_free_i32(decr); } /* Jump back to the loop start */ gen_jmp(s, jmp_diff(s, -a->imm)); @@ -8769,8 +8508,6 @@ static bool trans_VCTP(DisasContext *s, arg_VCTP *a) masklen, tcg_constant_i32(1 << (4 - a->size)), rn_shifted, tcg_constant_i32(16)); gen_helper_mve_vctp(cpu_env, masklen); - tcg_temp_free_i32(masklen); - tcg_temp_free_i32(rn_shifted); /* This insn updates predication bits */ s->base.is_jmp = DISAS_UPDATE_NOCHAIN; mve_update_eci(s); @@ -8793,7 +8530,6 @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half) tcg_gen_add_i32(tmp, tmp, tmp); gen_pc_plus_diff(s, addr, jmp_diff(s, 0)); tcg_gen_add_i32(tmp, tmp, addr); - tcg_temp_free_i32(addr); store_reg(s, 15, tmp); return true; } @@ -8815,7 +8551,6 @@ static bool trans_CBZ(DisasContext *s, arg_CBZ *a) arm_gen_condlabel(s); tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE, tmp, 0, s->condlabel.label); - tcg_temp_free_i32(tmp); gen_jmp(s, jmp_diff(s, a->imm)); return true; } @@ -8882,8 +8617,6 @@ static bool trans_RFE(DisasContext *s, arg_RFE *a) /* Base writeback. */ tcg_gen_addi_i32(addr, addr, post_offset[a->pu]); store_reg(s, a->rn, addr); - } else { - tcg_temp_free_i32(addr); } gen_rfe(s, t1, t2); return true; @@ -9125,11 +8858,8 @@ static bool trans_CSEL(DisasContext *s, arg_CSEL *a) arm_test_cc(&c, a->fcond); tcg_gen_movcond_i32(c.cond, rn, c.value, zero, rn, rm); - arm_free_cc(&c); store_reg(s, a->rd, rn); - tcg_temp_free_i32(rm); - return true; } @@ -9600,7 +9330,6 @@ static void arm_post_translate_insn(DisasContext *dc) gen_set_label(dc->condlabel.label); dc->condjmp = 0; } - translator_loop_temp_check(&dc->base); } static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 4001372acd..20f3ca7aca 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -149,15 +149,11 @@ typedef struct DisasContext { int c15_cpar; /* TCG op of the current insn_start. */ TCGOp *insn_start; -#define TMP_A64_MAX 16 - int tmp_a64_count; - TCGv_i64 tmp_a64[TMP_A64_MAX]; } DisasContext; typedef struct DisasCompare { TCGCond cond; TCGv_i32 value; - bool value_global; } DisasCompare; /* Share the TCG temporaries common between 32 and 64 bit modes. */ @@ -304,7 +300,6 @@ static inline void gen_a64_update_pc(DisasContext *s, target_long diff) #endif void arm_test_cc(DisasCompare *cmp, int cc); -void arm_free_cc(DisasCompare *cmp); void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); void arm_gen_test_cc(int cc, TCGLabel *label); MemOp pow2_align(unsigned i); @@ -336,7 +331,6 @@ static inline void set_pstate_bits(uint32_t bits) tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_ori_i32(p, p, bits); tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); - tcg_temp_free_i32(p); } /* Clear bits within PSTATE. */ @@ -349,7 +343,6 @@ static inline void clear_pstate_bits(uint32_t bits) tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); tcg_gen_andi_i32(p, p, ~bits); tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); - tcg_temp_free_i32(p); } /* If the singlestep state is Active-not-pending, advance to Active-pending. */ diff --git a/target/avr/translate.c b/target/avr/translate.c index e40d8e9681..b9506a8d86 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -107,11 +107,6 @@ struct DisasContext { * tcg_gen_brcond_tl(skip_cond, skip_var0, skip_var1, skip_label); * } * - * if (free_skip_var0) { - * tcg_temp_free(skip_var0); - * free_skip_var0 = false; - * } - * * translate(ctx); * * if (skip_label) { @@ -121,7 +116,6 @@ struct DisasContext { TCGv skip_var0; TCGv skip_var1; TCGCond skip_cond; - bool free_skip_var0; }; void avr_cpu_tcg_init(void) @@ -227,10 +221,6 @@ static void gen_add_CHf(TCGv R, TCGv Rd, TCGv Rr) tcg_gen_shri_tl(cpu_Cf, t1, 7); /* Cf = t1(7) */ tcg_gen_shri_tl(cpu_Hf, t1, 3); /* Hf = t1(3) */ tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_add_Vf(TCGv R, TCGv Rd, TCGv Rr) @@ -245,9 +235,6 @@ static void gen_add_Vf(TCGv R, TCGv Rd, TCGv Rr) tcg_gen_andc_tl(t1, t1, t2); tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */ - - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_sub_CHf(TCGv R, TCGv Rd, TCGv Rr) @@ -265,10 +252,6 @@ static void gen_sub_CHf(TCGv R, TCGv Rd, TCGv Rr) tcg_gen_shri_tl(cpu_Cf, t2, 7); /* Cf = t2(7) */ tcg_gen_shri_tl(cpu_Hf, t2, 3); /* Hf = t2(3) */ tcg_gen_andi_tl(cpu_Hf, cpu_Hf, 1); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_sub_Vf(TCGv R, TCGv Rd, TCGv Rr) @@ -283,9 +266,6 @@ static void gen_sub_Vf(TCGv R, TCGv Rd, TCGv Rr) tcg_gen_and_tl(t1, t1, t2); tcg_gen_shri_tl(cpu_Vf, t1, 7); /* Vf = t1(7) */ - - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_NSf(TCGv R) @@ -323,9 +303,6 @@ static bool trans_ADD(DisasContext *ctx, arg_ADD *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -350,9 +327,6 @@ static bool trans_ADC(DisasContext *ctx, arg_ADC *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -391,10 +365,6 @@ static bool trans_ADIW(DisasContext *ctx, arg_ADIW *a) /* update output registers */ tcg_gen_andi_tl(RdL, R, 0xff); tcg_gen_shri_tl(RdH, R, 8); - - tcg_temp_free_i32(Rd); - tcg_temp_free_i32(R); - return true; } @@ -419,9 +389,6 @@ static bool trans_SUB(DisasContext *ctx, arg_SUB *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -446,10 +413,6 @@ static bool trans_SUBI(DisasContext *ctx, arg_SUBI *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - tcg_temp_free_i32(Rr); - return true; } @@ -481,10 +444,6 @@ static bool trans_SBC(DisasContext *ctx, arg_SBC *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(zero); - tcg_temp_free_i32(R); - return true; } @@ -515,11 +474,6 @@ static bool trans_SBCI(DisasContext *ctx, arg_SBCI *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(zero); - tcg_temp_free_i32(R); - tcg_temp_free_i32(Rr); - return true; } @@ -558,10 +512,6 @@ static bool trans_SBIW(DisasContext *ctx, arg_SBIW *a) /* update output registers */ tcg_gen_andi_tl(RdL, R, 0xff); tcg_gen_shri_tl(RdH, R, 8); - - tcg_temp_free_i32(Rd); - tcg_temp_free_i32(R); - return true; } @@ -584,9 +534,6 @@ static bool trans_AND(DisasContext *ctx, arg_AND *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -626,9 +573,6 @@ static bool trans_OR(DisasContext *ctx, arg_OR *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(R); - return true; } @@ -676,7 +620,6 @@ static bool trans_EOR(DisasContext *ctx, arg_EOR *a) static bool trans_COM(DisasContext *ctx, arg_COM *a) { TCGv Rd = cpu_r[a->rd]; - TCGv R = tcg_temp_new_i32(); tcg_gen_xori_tl(Rd, Rd, 0xff); @@ -684,9 +627,6 @@ static bool trans_COM(DisasContext *ctx, arg_COM *a) tcg_gen_movi_tl(cpu_Cf, 1); /* Cf = 1 */ tcg_gen_movi_tl(cpu_Vf, 0); /* Vf = 0 */ gen_ZNSf(Rd); - - tcg_temp_free_i32(R); - return true; } @@ -710,10 +650,6 @@ static bool trans_NEG(DisasContext *ctx, arg_NEG *a) /* update output registers */ tcg_gen_mov_tl(Rd, R); - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -783,9 +719,6 @@ static bool trans_MUL(DisasContext *ctx, arg_MUL *a) /* update status register */ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ - - tcg_temp_free_i32(R); - return true; } @@ -816,11 +749,6 @@ static bool trans_MULS(DisasContext *ctx, arg_MULS *a) /* update status register */ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -850,10 +778,6 @@ static bool trans_MULSU(DisasContext *ctx, arg_MULSU *a) /* update status register */ tcg_gen_shri_tl(cpu_Cf, R, 15); /* Cf = R(15) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_Zf, R, 0); /* Zf = R == 0 */ - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -884,10 +808,6 @@ static bool trans_FMUL(DisasContext *ctx, arg_FMUL *a) tcg_gen_andi_tl(R0, R, 0xff); tcg_gen_shri_tl(R1, R, 8); tcg_gen_andi_tl(R1, R1, 0xff); - - - tcg_temp_free_i32(R); - return true; } @@ -923,11 +843,6 @@ static bool trans_FMULS(DisasContext *ctx, arg_FMULS *a) tcg_gen_andi_tl(R0, R, 0xff); tcg_gen_shri_tl(R1, R, 8); tcg_gen_andi_tl(R1, R1, 0xff); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -961,10 +876,6 @@ static bool trans_FMULSU(DisasContext *ctx, arg_FMULSU *a) tcg_gen_andi_tl(R0, R, 0xff); tcg_gen_shri_tl(R1, R, 8); tcg_gen_andi_tl(R1, R1, 0xff); - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(R); - return true; } @@ -1019,25 +930,17 @@ static void gen_jmp_z(DisasContext *ctx) static void gen_push_ret(DisasContext *ctx, int ret) { if (avr_feature(ctx->env, AVR_FEATURE_1_BYTE_PC)) { - TCGv t0 = tcg_const_i32((ret & 0x0000ff)); tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_UB); tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); - - tcg_temp_free_i32(t0); } else if (avr_feature(ctx->env, AVR_FEATURE_2_BYTE_PC)) { - TCGv t0 = tcg_const_i32((ret & 0x00ffff)); tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); tcg_gen_qemu_st_tl(t0, cpu_sp, MMU_DATA_IDX, MO_BEUW); tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); - - tcg_temp_free_i32(t0); - } else if (avr_feature(ctx->env, AVR_FEATURE_3_BYTE_PC)) { - TCGv lo = tcg_const_i32((ret & 0x0000ff)); TCGv hi = tcg_const_i32((ret & 0xffff00) >> 8); @@ -1045,9 +948,6 @@ static void gen_push_ret(DisasContext *ctx, int ret) tcg_gen_subi_tl(cpu_sp, cpu_sp, 2); tcg_gen_qemu_st_tl(hi, cpu_sp, MMU_DATA_IDX, MO_BEUW); tcg_gen_subi_tl(cpu_sp, cpu_sp, 1); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } } @@ -1071,9 +971,6 @@ static void gen_pop_ret(DisasContext *ctx, TCGv ret) tcg_gen_qemu_ld_tl(lo, cpu_sp, MMU_DATA_IDX, MO_UB); tcg_gen_deposit_tl(ret, lo, hi, 8, 16); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } } @@ -1301,9 +1198,6 @@ static bool trans_CP(DisasContext *ctx, arg_CP *a) gen_sub_CHf(R, Rd, Rr); gen_sub_Vf(R, Rd, Rr); gen_ZNSf(R); - - tcg_temp_free_i32(R); - return true; } @@ -1332,10 +1226,6 @@ static bool trans_CPC(DisasContext *ctx, arg_CPC *a) * cleared otherwise. */ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_Zf, R, zero, cpu_Zf, zero); - - tcg_temp_free_i32(zero); - tcg_temp_free_i32(R); - return true; } @@ -1358,10 +1248,6 @@ static bool trans_CPI(DisasContext *ctx, arg_CPI *a) gen_sub_CHf(R, Rd, Rr); gen_sub_Vf(R, Rd, Rr); gen_ZNSf(R); - - tcg_temp_free_i32(R); - tcg_temp_free_i32(Rr); - return true; } @@ -1375,7 +1261,6 @@ static bool trans_SBRC(DisasContext *ctx, arg_SBRC *a) ctx->skip_cond = TCG_COND_EQ; ctx->skip_var0 = tcg_temp_new(); - ctx->free_skip_var0 = true; tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit); return true; @@ -1391,7 +1276,6 @@ static bool trans_SBRS(DisasContext *ctx, arg_SBRS *a) ctx->skip_cond = TCG_COND_NE; ctx->skip_var0 = tcg_temp_new(); - ctx->free_skip_var0 = true; tcg_gen_andi_tl(ctx->skip_var0, Rr, 1 << a->bit); return true; @@ -1410,7 +1294,6 @@ static bool trans_SBIC(DisasContext *ctx, arg_SBIC *a) tcg_gen_andi_tl(temp, temp, 1 << a->bit); ctx->skip_cond = TCG_COND_EQ; ctx->skip_var0 = temp; - ctx->free_skip_var0 = true; return true; } @@ -1428,7 +1311,6 @@ static bool trans_SBIS(DisasContext *ctx, arg_SBIS *a) tcg_gen_andi_tl(temp, temp, 1 << a->bit); ctx->skip_cond = TCG_COND_NE; ctx->skip_var0 = temp; - ctx->free_skip_var0 = true; return true; } @@ -1697,9 +1579,6 @@ static bool trans_LDS(DisasContext *ctx, arg_LDS *a) tcg_gen_ori_tl(addr, addr, a->imm); gen_data_load(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1734,9 +1613,6 @@ static bool trans_LDX1(DisasContext *ctx, arg_LDX1 *a) TCGv addr = gen_get_xaddr(); gen_data_load(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1749,9 +1625,6 @@ static bool trans_LDX2(DisasContext *ctx, arg_LDX2 *a) tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_xaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1763,9 +1636,6 @@ static bool trans_LDX3(DisasContext *ctx, arg_LDX3 *a) tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ gen_data_load(ctx, Rd, addr); gen_set_xaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1803,9 +1673,6 @@ static bool trans_LDY2(DisasContext *ctx, arg_LDY2 *a) tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_yaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1817,9 +1684,6 @@ static bool trans_LDY3(DisasContext *ctx, arg_LDY3 *a) tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ gen_data_load(ctx, Rd, addr); gen_set_yaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1830,9 +1694,6 @@ static bool trans_LDDY(DisasContext *ctx, arg_LDDY *a) tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ gen_data_load(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1874,9 +1735,6 @@ static bool trans_LDZ2(DisasContext *ctx, arg_LDZ2 *a) tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1889,9 +1747,6 @@ static bool trans_LDZ3(DisasContext *ctx, arg_LDZ3 *a) gen_data_load(ctx, Rd, addr); gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1902,9 +1757,6 @@ static bool trans_LDDZ(DisasContext *ctx, arg_LDDZ *a) tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ gen_data_load(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1931,9 +1783,6 @@ static bool trans_STS(DisasContext *ctx, arg_STS *a) tcg_gen_shli_tl(addr, addr, 16); tcg_gen_ori_tl(addr, addr, a->imm); gen_data_store(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1964,9 +1813,6 @@ static bool trans_STX1(DisasContext *ctx, arg_STX1 *a) TCGv addr = gen_get_xaddr(); gen_data_store(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1978,9 +1824,6 @@ static bool trans_STX2(DisasContext *ctx, arg_STX2 *a) gen_data_store(ctx, Rd, addr); tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_xaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -1992,9 +1835,6 @@ static bool trans_STX3(DisasContext *ctx, arg_STX3 *a) tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ gen_data_store(ctx, Rd, addr); gen_set_xaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2029,9 +1869,6 @@ static bool trans_STY2(DisasContext *ctx, arg_STY2 *a) gen_data_store(ctx, Rd, addr); tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_yaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2043,9 +1880,6 @@ static bool trans_STY3(DisasContext *ctx, arg_STY3 *a) tcg_gen_subi_tl(addr, addr, 1); /* addr = addr - 1 */ gen_data_store(ctx, Rd, addr); gen_set_yaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2056,9 +1890,6 @@ static bool trans_STDY(DisasContext *ctx, arg_STDY *a) tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ gen_data_store(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2094,9 +1925,6 @@ static bool trans_STZ2(DisasContext *ctx, arg_STZ2 *a) tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2109,9 +1937,6 @@ static bool trans_STZ3(DisasContext *ctx, arg_STZ3 *a) gen_data_store(ctx, Rd, addr); gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2122,9 +1947,6 @@ static bool trans_STDZ(DisasContext *ctx, arg_STDZ *a) tcg_gen_addi_tl(addr, addr, a->imm); /* addr = addr + q */ gen_data_store(ctx, Rd, addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2156,9 +1978,6 @@ static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a) tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ tcg_gen_or_tl(addr, addr, L); tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ - - tcg_temp_free_i32(addr); - return true; } @@ -2176,9 +1995,6 @@ static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a) tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */ tcg_gen_or_tl(addr, addr, L); tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ - - tcg_temp_free_i32(addr); - return true; } @@ -2200,9 +2016,6 @@ static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a) tcg_gen_andi_tl(L, addr, 0xff); tcg_gen_shri_tl(addr, addr, 8); tcg_gen_andi_tl(H, addr, 0xff); - - tcg_temp_free_i32(addr); - return true; } @@ -2231,9 +2044,6 @@ static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a) TCGv addr = gen_get_zaddr(); tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ - - tcg_temp_free_i32(addr); - return true; } @@ -2247,9 +2057,6 @@ static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a) TCGv addr = gen_get_zaddr(); tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ - - tcg_temp_free_i32(addr); - return true; } @@ -2265,9 +2072,6 @@ static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a) tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */ tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */ gen_set_zaddr(addr); - - tcg_temp_free_i32(addr); - return true; } @@ -2321,9 +2125,6 @@ static bool trans_IN(DisasContext *ctx, arg_IN *a) TCGv port = tcg_const_i32(a->imm); gen_helper_inb(Rd, cpu_env, port); - - tcg_temp_free_i32(port); - return true; } @@ -2337,9 +2138,6 @@ static bool trans_OUT(DisasContext *ctx, arg_OUT *a) TCGv port = tcg_const_i32(a->imm); gen_helper_outb(cpu_env, port, Rd); - - tcg_temp_free_i32(port); - return true; } @@ -2407,10 +2205,6 @@ static bool trans_XCH(DisasContext *ctx, arg_XCH *a) gen_data_load(ctx, t0, addr); gen_data_store(ctx, Rd, addr); tcg_gen_mov_tl(Rd, t0); - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(addr); - return true; } @@ -2440,11 +2234,6 @@ static bool trans_LAS(DisasContext *ctx, arg_LAS *a) tcg_gen_or_tl(t1, t0, Rr); tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(addr); - return true; } @@ -2475,11 +2264,6 @@ static bool trans_LAC(DisasContext *ctx, arg_LAC *a) tcg_gen_andc_tl(t1, t0, Rr); /* t1 = t0 & (0xff - Rr) = t0 & ~Rr */ tcg_gen_mov_tl(Rr, t0); /* Rr = t0 */ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(addr); - return true; } @@ -2510,11 +2294,6 @@ static bool trans_LAT(DisasContext *ctx, arg_LAT *a) tcg_gen_xor_tl(t1, t0, Rd); tcg_gen_mov_tl(Rd, t0); /* Rd = t0 */ gen_data_store(ctx, t1, addr); /* mem[addr] = t1 */ - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(addr); - return true; } @@ -2573,9 +2352,6 @@ static bool trans_ROR(DisasContext *ctx, arg_ROR *a) /* update status register */ gen_rshift_ZNVSf(Rd); - - tcg_temp_free_i32(t0); - return true; } @@ -2600,9 +2376,6 @@ static bool trans_ASR(DisasContext *ctx, arg_ASR *a) /* update status register */ gen_rshift_ZNVSf(Rd); - - tcg_temp_free_i32(t0); - return true; } @@ -2620,10 +2393,6 @@ static bool trans_SWAP(DisasContext *ctx, arg_SWAP *a) tcg_gen_andi_tl(t1, Rd, 0xf0); tcg_gen_shri_tl(t1, t1, 4); tcg_gen_or_tl(Rd, t0, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t0); - return true; } @@ -2639,10 +2408,6 @@ static bool trans_SBI(DisasContext *ctx, arg_SBI *a) gen_helper_inb(data, cpu_env, port); tcg_gen_ori_tl(data, data, 1 << a->bit); gen_helper_outb(cpu_env, port, data); - - tcg_temp_free_i32(port); - tcg_temp_free_i32(data); - return true; } @@ -2658,10 +2423,6 @@ static bool trans_CBI(DisasContext *ctx, arg_CBI *a) gen_helper_inb(data, cpu_env, port); tcg_gen_andi_tl(data, data, ~(1 << a->bit)); gen_helper_outb(cpu_env, port, data); - - tcg_temp_free_i32(data); - tcg_temp_free_i32(port); - return true; } @@ -2689,9 +2450,6 @@ static bool trans_BLD(DisasContext *ctx, arg_BLD *a) tcg_gen_andi_tl(Rd, Rd, ~(1u << a->bit)); /* clear bit */ tcg_gen_shli_tl(t1, cpu_Tf, a->bit); /* create mask */ tcg_gen_or_tl(Rd, Rd, t1); - - tcg_temp_free_i32(t1); - return true; } @@ -2886,10 +2644,6 @@ static bool canonicalize_skip(DisasContext *ctx) ctx->skip_cond = TCG_COND_NE; break; } - if (ctx->free_skip_var0) { - tcg_temp_free(ctx->skip_var0); - ctx->free_skip_var0 = false; - } ctx->skip_var0 = cpu_skip; return true; } @@ -2944,7 +2698,6 @@ static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) * This ensures that cpu_skip is non-zero after the label * if and only if the skipped insn itself sets a skip. */ - ctx->free_skip_var0 = true; ctx->skip_var0 = tcg_temp_new(); tcg_gen_mov_tl(ctx->skip_var0, cpu_skip); tcg_gen_movi_tl(cpu_skip, 0); @@ -2956,10 +2709,6 @@ static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ctx->skip_var1, skip_label); ctx->skip_var1 = NULL; } - if (ctx->free_skip_var0) { - tcg_temp_free(ctx->skip_var0); - ctx->free_skip_var0 = false; - } ctx->skip_cond = TCG_COND_NEVER; ctx->skip_var0 = NULL; } diff --git a/target/cris/translate.c b/target/cris/translate.c index a959b27373..5172c9b9b2 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -178,7 +178,6 @@ static const int preg_sizes[] = { do { \ TCGv tc = tcg_const_tl(c); \ t_gen_mov_env_TN(member, tc); \ - tcg_temp_free(tc); \ } while (0) static inline void t_gen_mov_TN_preg(TCGv tn, int r) @@ -271,7 +270,6 @@ static inline void t_gen_raise_exception(uint32_t index) { TCGv_i32 tmp = tcg_const_i32(index); gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); } static void t_gen_lsl(TCGv d, TCGv a, TCGv b) @@ -286,8 +284,6 @@ static void t_gen_lsl(TCGv d, TCGv a, TCGv b) tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_and_tl(t0, t0, d); tcg_gen_xor_tl(d, d, t0); - tcg_temp_free(t0); - tcg_temp_free(t_31); } static void t_gen_lsr(TCGv d, TCGv a, TCGv b) @@ -303,8 +299,6 @@ static void t_gen_lsr(TCGv d, TCGv a, TCGv b) tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_and_tl(t0, t0, d); tcg_gen_xor_tl(d, d, t0); - tcg_temp_free(t0); - tcg_temp_free(t_31); } static void t_gen_asr(TCGv d, TCGv a, TCGv b) @@ -319,8 +313,6 @@ static void t_gen_asr(TCGv d, TCGv a, TCGv b) tcg_gen_sub_tl(t0, t_31, b); tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_or_tl(d, d, t0); - tcg_temp_free(t0); - tcg_temp_free(t_31); } static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b) @@ -335,7 +327,6 @@ static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b) tcg_gen_shli_tl(d, a, 1); tcg_gen_sub_tl(t, d, b); tcg_gen_movcond_tl(TCG_COND_GEU, d, d, b, t, d); - tcg_temp_free(t); } static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs) @@ -353,7 +344,6 @@ static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs) tcg_gen_sari_tl(t, t, 31); tcg_gen_and_tl(t, t, b); tcg_gen_add_tl(d, d, t); - tcg_temp_free(t); } /* Extended arithmetics on CRIS. */ @@ -369,7 +359,6 @@ static inline void t_gen_add_flag(TCGv d, int flag) tcg_gen_shri_tl(c, c, flag); } tcg_gen_add_tl(d, d, c); - tcg_temp_free(c); } static inline void t_gen_addx_carry(DisasContext *dc, TCGv d) @@ -381,7 +370,6 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv d) /* C flag is already at bit 0. */ tcg_gen_andi_tl(c, c, C_FLAG); tcg_gen_add_tl(d, d, c); - tcg_temp_free(c); } } @@ -394,7 +382,6 @@ static inline void t_gen_subx_carry(DisasContext *dc, TCGv d) /* C flag is already at bit 0. */ tcg_gen_andi_tl(c, c, C_FLAG); tcg_gen_sub_tl(d, d, c); - tcg_temp_free(c); } } @@ -414,8 +401,6 @@ static inline void t_gen_swapb(TCGv d, TCGv s) tcg_gen_shri_tl(t, org_s, 8); tcg_gen_andi_tl(t, t, 0x00ff00ff); tcg_gen_or_tl(d, d, t); - tcg_temp_free(t); - tcg_temp_free(org_s); } /* Swap the halfwords of the s operand. */ @@ -428,7 +413,6 @@ static inline void t_gen_swapw(TCGv d, TCGv s) tcg_gen_shli_tl(d, t, 16); tcg_gen_shri_tl(t, t, 16); tcg_gen_or_tl(d, d, t); - tcg_temp_free(t); } /* Reverse the within each byte. @@ -475,8 +459,6 @@ static void t_gen_swapr(TCGv d, TCGv s) tcg_gen_andi_tl(t, t, bitrev[i].mask); tcg_gen_or_tl(d, d, t); } - tcg_temp_free(t); - tcg_temp_free(org_s); } static bool use_goto_tb(DisasContext *dc, target_ulong dest) @@ -778,9 +760,6 @@ static void cris_alu(DisasContext *dc, int op, } tcg_gen_or_tl(d, d, tmp); } - if (tmp != d) { - tcg_temp_free(tmp); - } } static int arith_cc(DisasContext *dc) @@ -919,8 +898,6 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond) tcg_gen_shli_tl(cc, tmp, 2); tcg_gen_and_tl(cc, tmp, cc); tcg_gen_andi_tl(cc, cc, Z_FLAG); - - tcg_temp_free(tmp); } break; case CC_GE: @@ -959,9 +936,6 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond) tcg_gen_xori_tl(n, n, 2); tcg_gen_and_tl(cc, z, n); tcg_gen_andi_tl(cc, cc, 2); - - tcg_temp_free(n); - tcg_temp_free(z); } break; case CC_LE: @@ -980,9 +954,6 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond) tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]); tcg_gen_or_tl(cc, z, n); tcg_gen_andi_tl(cc, cc, 2); - - tcg_temp_free(n); - tcg_temp_free(z); } break; case CC_P: @@ -1282,7 +1253,6 @@ static int dec_addq(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(dc->op1); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_moveq(CPUCRISState *env, DisasContext *dc) @@ -1307,7 +1277,6 @@ static int dec_subq(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(dc->op1); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_cmpq(CPUCRISState *env, DisasContext *dc) @@ -1323,7 +1292,6 @@ static int dec_cmpq(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(imm); cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_andq(CPUCRISState *env, DisasContext *dc) @@ -1339,7 +1307,6 @@ static int dec_andq(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(imm); cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_orq(CPUCRISState *env, DisasContext *dc) @@ -1354,7 +1321,6 @@ static int dec_orq(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(imm); cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], cpu_R[dc->op2], c, 4); - tcg_temp_free(c); return 2; } static int dec_btstq(CPUCRISState *env, DisasContext *dc) @@ -1368,7 +1334,6 @@ static int dec_btstq(CPUCRISState *env, DisasContext *dc) cris_evaluate_flags(dc); gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2], c, cpu_PR[PR_CCS]); - tcg_temp_free(c); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4); cris_update_cc_op(dc, CC_OP_FLAGS, 4); @@ -1437,7 +1402,6 @@ static int dec_move_r(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, size); - tcg_temp_free(t0); } return 2; } @@ -1467,14 +1431,6 @@ static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t) } } -static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t) -{ - if (size != 4) { - tcg_temp_free(t[0]); - tcg_temp_free(t[1]); - } -} - static int dec_and_r(CPUCRISState *env, DisasContext *dc) { TCGv t[2]; @@ -1488,7 +1444,6 @@ static int dec_and_r(CPUCRISState *env, DisasContext *dc) cris_alu_alloc_temps(dc, size, t); dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1501,7 +1456,6 @@ static int dec_lz_r(CPUCRISState *env, DisasContext *dc) t0 = tcg_temp_new(); dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0); cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1518,7 +1472,6 @@ static int dec_lsl_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); tcg_gen_andi_tl(t[1], t[1], 63); cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1535,7 +1488,6 @@ static int dec_lsr_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); tcg_gen_andi_tl(t[1], t[1], 63); cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1552,7 +1504,6 @@ static int dec_asr_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]); tcg_gen_andi_tl(t[1], t[1], 63); cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1568,7 +1519,6 @@ static int dec_muls_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]); cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1584,7 +1534,6 @@ static int dec_mulu_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1610,7 +1559,6 @@ static int dec_xor_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1624,7 +1572,6 @@ static int dec_bound_r(CPUCRISState *env, DisasContext *dc) l0 = tcg_temp_new(); dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4); - tcg_temp_free(l0); return 2; } @@ -1639,7 +1586,6 @@ static int dec_cmp_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1666,7 +1612,6 @@ static int dec_add_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1741,7 +1686,6 @@ static int dec_swap_r(CPUCRISState *env, DisasContext *dc) t_gen_swapr(t0, t0); } cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1755,7 +1699,6 @@ static int dec_or_r(CPUCRISState *env, DisasContext *dc) cris_alu_alloc_temps(dc, size, t); dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1768,7 +1711,6 @@ static int dec_addi_r(CPUCRISState *env, DisasContext *dc) t0 = tcg_temp_new(); tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize); tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0); - tcg_temp_free(t0); return 2; } @@ -1781,7 +1723,6 @@ static int dec_addi_acr(CPUCRISState *env, DisasContext *dc) t0 = tcg_temp_new(); tcg_gen_shli_tl(t0, cpu_R[dc->op2], dc->zzsize); tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0); - tcg_temp_free(t0); return 2; } @@ -1796,7 +1737,6 @@ static int dec_neg_r(CPUCRISState *env, DisasContext *dc) dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1825,7 +1765,6 @@ static int dec_sub_r(CPUCRISState *env, DisasContext *dc) cris_alu_alloc_temps(dc, size, t); dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size); - cris_alu_free_temps(dc, size, t); return 2; } @@ -1842,7 +1781,6 @@ static int dec_movu_r(CPUCRISState *env, DisasContext *dc) t0 = tcg_temp_new(); dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1861,7 +1799,6 @@ static int dec_movs_r(CPUCRISState *env, DisasContext *dc) t_gen_sext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op1], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1879,7 +1816,6 @@ static int dec_addu_r(CPUCRISState *env, DisasContext *dc) /* Size can only be qi or hi. */ t_gen_zext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1898,7 +1834,6 @@ static int dec_adds_r(CPUCRISState *env, DisasContext *dc) t_gen_sext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1917,7 +1852,6 @@ static int dec_subu_r(CPUCRISState *env, DisasContext *dc) t_gen_zext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -1936,7 +1870,6 @@ static int dec_subs_r(CPUCRISState *env, DisasContext *dc) t_gen_sext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); - tcg_temp_free(t0); return 2; } @@ -2016,8 +1949,6 @@ static int dec_move_rs(CPUCRISState *env, DisasContext *dc) c2 = tcg_const_tl(dc->op2); cris_cc_mask(dc, 0); gen_helper_movl_sreg_reg(cpu_env, c2, c1); - tcg_temp_free(c1); - tcg_temp_free(c2); return 2; } static int dec_move_sr(CPUCRISState *env, DisasContext *dc) @@ -2028,8 +1959,6 @@ static int dec_move_sr(CPUCRISState *env, DisasContext *dc) c2 = tcg_const_tl(dc->op2); cris_cc_mask(dc, 0); gen_helper_movl_reg_sreg(cpu_env, c1, c2); - tcg_temp_free(c1); - tcg_temp_free(c2); return 2; } @@ -2049,7 +1978,6 @@ static int dec_move_rp(CPUCRISState *env, DisasContext *dc) tcg_gen_andi_tl(t[0], t[0], 0x39f); tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f); tcg_gen_or_tl(t[0], t[1], t[0]); - tcg_temp_free(t[1]); } } else { tcg_gen_mov_tl(t[0], cpu_R[dc->op1]); @@ -2060,7 +1988,6 @@ static int dec_move_rp(CPUCRISState *env, DisasContext *dc) cris_update_cc_op(dc, CC_OP_FLAGS, 4); dc->flags_uptodate = 1; } - tcg_temp_free(t[0]); return 2; } static int dec_move_pr(CPUCRISState *env, DisasContext *dc) @@ -2081,7 +2008,6 @@ static int dec_move_pr(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, preg_sizes[dc->op2]); - tcg_temp_free(t0); } return 2; } @@ -2109,7 +2035,6 @@ static int dec_move_mr(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize); - tcg_temp_free(t0); } do_postinc(dc, memsize); return insn_len; @@ -2121,12 +2046,6 @@ static inline void cris_alu_m_alloc_temps(TCGv *t) t[1] = tcg_temp_new(); } -static inline void cris_alu_m_free_temps(TCGv *t) -{ - tcg_temp_free(t[0]); - tcg_temp_free(t[1]); -} - static int dec_movs_m(CPUCRISState *env, DisasContext *dc) { TCGv t[2]; @@ -2144,7 +2063,6 @@ static int dec_movs_m(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2165,7 +2083,6 @@ static int dec_addu_m(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2185,7 +2102,6 @@ static int dec_adds_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2205,7 +2121,6 @@ static int dec_subu_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2225,7 +2140,6 @@ static int dec_subs_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2245,7 +2159,6 @@ static int dec_movu_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2264,7 +2177,6 @@ static int dec_cmpu_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2285,7 +2197,6 @@ static int dec_cmps_m(CPUCRISState *env, DisasContext *dc) cpu_R[dc->op2], cpu_R[dc->op2], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2306,7 +2217,6 @@ static int dec_cmp_m(CPUCRISState *env, DisasContext *dc) cpu_R[dc->op2], cpu_R[dc->op2], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2330,9 +2240,7 @@ static int dec_test_m(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(0); cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[1], c, memsize_zz(dc)); - tcg_temp_free(c); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2351,7 +2259,6 @@ static int dec_and_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2371,7 +2278,6 @@ static int dec_add_m(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2390,7 +2296,6 @@ static int dec_addo_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, 0); cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2410,8 +2315,6 @@ static int dec_bound_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4); do_postinc(dc, memsize); - tcg_temp_free(l[0]); - tcg_temp_free(l[1]); return insn_len; } @@ -2433,7 +2336,6 @@ static int dec_addc_mr(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4); do_postinc(dc, 4); - cris_alu_m_free_temps(t); return insn_len; } @@ -2452,7 +2354,6 @@ static int dec_sub_m(CPUCRISState *env, DisasContext *dc) cris_cc_mask(dc, CC_MASK_NZVC); cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2472,7 +2373,6 @@ static int dec_or_m(CPUCRISState *env, DisasContext *dc) cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc)); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2504,7 +2404,6 @@ static int dec_move_mp(CPUCRISState *env, DisasContext *dc) t_gen_mov_preg_TN(dc, dc->op2, t[1]); do_postinc(dc, memsize); - cris_alu_m_free_temps(t); return insn_len; } @@ -2527,7 +2426,6 @@ static int dec_move_pm(CPUCRISState *env, DisasContext *dc) t_gen_mov_TN_preg(t0, dc->op2); cris_flush_cc_state(dc); gen_store(dc, cpu_R[dc->op1], t0, memsize); - tcg_temp_free(t0); cris_cc_mask(dc, 0); if (dc->postinc) { @@ -2562,17 +2460,14 @@ static int dec_movem_mr(CPUCRISState *env, DisasContext *dc) } else { tmp32 = NULL; } - tcg_temp_free(addr); for (i = 0; i < (nr >> 1); i++) { tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]); tcg_gen_shri_i64(tmp[i], tmp[i], 32); tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]); - tcg_temp_free_i64(tmp[i]); } if (nr & 1) { tcg_gen_mov_tl(cpu_R[dc->op2], tmp32); - tcg_temp_free(tmp32); } /* writeback the updated pointer value. */ @@ -2610,8 +2505,6 @@ static int dec_movem_rm(CPUCRISState *env, DisasContext *dc) tcg_gen_mov_tl(cpu_R[dc->op1], addr); } cris_cc_mask(dc, 0); - tcg_temp_free(tmp); - tcg_temp_free(addr); return 2; } @@ -2691,7 +2584,6 @@ static int dec_jas_r(CPUCRISState *env, DisasContext *dc) } c = tcg_const_tl(dc->pc + 4); t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); cris_prepare_jmp(dc, JMP_INDIRECT); return 2; @@ -2709,7 +2601,6 @@ static int dec_jas_im(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(dc->pc + 8); /* Store the return address in Pd. */ t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); dc->jmp_pc = imm; cris_prepare_jmp(dc, JMP_DIRECT); @@ -2728,7 +2619,6 @@ static int dec_jasc_im(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(dc->pc + 8 + 4); /* Store the return address in Pd. */ t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); dc->jmp_pc = imm; cris_prepare_jmp(dc, JMP_DIRECT); @@ -2744,7 +2634,6 @@ static int dec_jasc_r(CPUCRISState *env, DisasContext *dc) tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]); c = tcg_const_tl(dc->pc + 4 + 4); t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); cris_prepare_jmp(dc, JMP_INDIRECT); return 2; } @@ -2778,7 +2667,6 @@ static int dec_bas_im(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(dc->pc + 8); /* Store the return address in Pd. */ t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); dc->jmp_pc = dc->pc + simm; cris_prepare_jmp(dc, JMP_DIRECT); @@ -2796,7 +2684,6 @@ static int dec_basc_im(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(dc->pc + 12); /* Store the return address in Pd. */ t_gen_mov_preg_TN(dc, dc->op2, c); - tcg_temp_free(c); dc->jmp_pc = dc->pc + simm; cris_prepare_jmp(dc, JMP_DIRECT); diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc index 9660f28584..b03b2ef746 100644 --- a/target/cris/translate_v10.c.inc +++ b/target/cris/translate_v10.c.inc @@ -90,9 +90,6 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val, gen_set_label(l1); tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */ tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/ - tcg_temp_free(t1); - tcg_temp_free(tval); - tcg_temp_free(taddr); } static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val, @@ -215,7 +212,6 @@ static int dec10_prep_move_m(CPUCRISState *env, DisasContext *dc, else t_gen_zext(dst, dst, memsize); insn_len += crisv10_post_memaddr(dc, memsize); - tcg_temp_free(addr); } if (dc->mode == CRISV10_MODE_INDIRECT && (dc->tb_flags & PFIX_FLAG)) { @@ -258,7 +254,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) c = tcg_const_tl(simm); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_CMPQ: LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst); @@ -267,7 +262,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) c = tcg_const_tl(simm); cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_ADDQ: LOG_DIS("addq %d, $r%d\n", imm, dc->dst); @@ -276,7 +270,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) c = tcg_const_tl(imm); cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_ANDQ: LOG_DIS("andq %d, $r%d\n", simm, dc->dst); @@ -285,7 +278,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) c = tcg_const_tl(simm); cris_alu(dc, CC_OP_AND, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_ASHQ: LOG_DIS("ashq %d, $r%d\n", simm, dc->dst); @@ -303,7 +295,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst], c, cpu_PR[PR_CCS]); } - tcg_temp_free(c); break; case CRISV10_QIMM_LSHQ: LOG_DIS("lshq %d, $r%d\n", simm, dc->dst); @@ -317,7 +308,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) c = tcg_const_tl(imm); cris_alu(dc, op, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_SUBQ: LOG_DIS("subq %d, $r%d\n", imm, dc->dst); @@ -326,7 +316,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) c = tcg_const_tl(imm); cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_ORQ: LOG_DIS("andq %d, $r%d\n", simm, dc->dst); @@ -335,7 +324,6 @@ static unsigned int dec10_quick_imm(DisasContext *dc) c = tcg_const_tl(simm); cris_alu(dc, CC_OP_OR, cpu_R[dc->dst], cpu_R[dc->dst], c, 4); - tcg_temp_free(c); break; case CRISV10_QIMM_BCC_R0: @@ -426,8 +414,6 @@ static void dec10_reg_alu(DisasContext *dc, int op, int size, int sext) assert(dc->dst != 15); cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], size); - tcg_temp_free(t[0]); - tcg_temp_free(t[1]); } static void dec10_reg_bound(DisasContext *dc, int size) @@ -437,7 +423,6 @@ static void dec10_reg_bound(DisasContext *dc, int size) t = tcg_temp_new(); t_gen_zext(t, cpu_R[dc->src], size); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); - tcg_temp_free(t); } static void dec10_reg_mul(DisasContext *dc, int size, int sext) @@ -451,9 +436,6 @@ static void dec10_reg_mul(DisasContext *dc, int size, int sext) t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]); cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], 4); - - tcg_temp_free(t[0]); - tcg_temp_free(t[1]); } @@ -472,7 +454,6 @@ static void dec10_reg_movs(DisasContext *dc) t_gen_zext(t, cpu_R[dc->src], size); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); - tcg_temp_free(t); } static void dec10_reg_alux(DisasContext *dc, int op) @@ -490,7 +471,6 @@ static void dec10_reg_alux(DisasContext *dc, int op) t_gen_zext(t, cpu_R[dc->src], size); cris_alu(dc, op, cpu_R[dc->dst], cpu_R[dc->dst], t, 4); - tcg_temp_free(t); } static void dec10_reg_mov_pr(DisasContext *dc) @@ -522,7 +502,6 @@ static void dec10_reg_abs(DisasContext *dc) tcg_gen_sub_tl(t0, cpu_R[dc->dst], t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t0, 4); - tcg_temp_free(t0); } static void dec10_reg_swap(DisasContext *dc) @@ -543,7 +522,6 @@ static void dec10_reg_swap(DisasContext *dc) if (dc->dst & 1) t_gen_swapr(t0, t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src], cpu_R[dc->src], t0, 4); - tcg_temp_free(t0); } static void dec10_reg_scc(DisasContext *dc) @@ -623,7 +601,6 @@ static unsigned int dec10_reg(DisasContext *dc) LOG_DIS("addi r%d r%d size=%d\n", dc->src, dc->dst, dc->size); tcg_gen_shli_tl(t, cpu_R[dc->dst], dc->size & 3); tcg_gen_add_tl(cpu_R[dc->src], cpu_R[dc->src], t); - tcg_temp_free(t); break; case CRISV10_REG_LSL: LOG_DIS("lsl $r%d, $r%d sz=%d\n", dc->src, dc->dst, size); @@ -669,7 +646,6 @@ static unsigned int dec10_reg(DisasContext *dc) } else { tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_R[dc->src], t); } - tcg_temp_free(t); cris_set_prefix(dc); break; @@ -778,7 +754,6 @@ static unsigned int dec10_ind_move_m_r(CPUCRISState *env, DisasContext *dc, dc->delayed_branch = 1; } - tcg_temp_free(t); return insn_len; } @@ -792,7 +767,6 @@ static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size) crisv10_prepare_memaddr(dc, addr, size); gen_store_v10(dc, addr, cpu_R[dc->dst], size); insn_len += crisv10_post_memaddr(dc, size); - tcg_temp_free(addr); return insn_len; } @@ -800,12 +774,11 @@ static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size) static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc) { unsigned int insn_len = 2, rd = dc->dst; - TCGv t, addr; + TCGv t; LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src); cris_lock_irq(dc); - addr = tcg_temp_new(); t = tcg_temp_new(); insn_len += dec10_prep_move_m(env, dc, 0, 4, t); if (rd == 15) { @@ -816,8 +789,6 @@ static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc) tcg_gen_mov_tl(cpu_PR[rd], t); dc->cpustate_changed = 1; } - tcg_temp_free(addr); - tcg_temp_free(t); return insn_len; } @@ -835,12 +806,10 @@ static unsigned int dec10_ind_move_pr_m(DisasContext *dc) cris_evaluate_flags(dc); tcg_gen_andi_tl(t0, cpu_PR[PR_CCS], ~PFIX_FLAG); gen_store_v10(dc, addr, t0, size); - tcg_temp_free(t0); } else { gen_store_v10(dc, addr, cpu_PR[dc->dst], size); } insn_len += crisv10_post_memaddr(dc, size); - tcg_temp_free(addr); cris_lock_irq(dc); return insn_len; @@ -874,8 +843,6 @@ static void dec10_movem_r_m(DisasContext *dc) if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) { tcg_gen_mov_tl(cpu_R[dc->src], addr); } - tcg_temp_free(addr); - tcg_temp_free(t0); } static void dec10_movem_m_r(DisasContext *dc) @@ -902,8 +869,6 @@ static void dec10_movem_m_r(DisasContext *dc) if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) { tcg_gen_mov_tl(cpu_R[dc->src], addr); } - tcg_temp_free(addr); - tcg_temp_free(t0); } static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc, @@ -922,9 +887,6 @@ static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc, dc->delayed_branch = 1; return insn_len; } - - cris_alu_m_free_temps(t); - return insn_len; } @@ -944,7 +906,6 @@ static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc, dc->delayed_branch = 1; } - tcg_temp_free(t); return insn_len; } @@ -969,7 +930,6 @@ static int dec10_alux_m(CPUCRISState *env, DisasContext *dc, int op) dc->delayed_branch = 1; } - tcg_temp_free(t); return insn_len; } @@ -1057,8 +1017,6 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(0); cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst], t[0], c, size); - tcg_temp_free(c); - cris_alu_m_free_temps(t); break; case CRISV10_IND_ADD: LOG_DIS("add size=%d op=%d %d\n", size, dc->src, dc->dst); @@ -1155,7 +1113,6 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) c = tcg_const_tl(dc->pc + insn_len); t_gen_mov_preg_TN(dc, dc->dst, c); - tcg_temp_free(c); dc->jmp_pc = imm; cris_prepare_jmp(dc, JMP_DIRECT); dc->delayed_branch--; /* v10 has no dslot here. */ @@ -1166,7 +1123,6 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) tcg_gen_movi_tl(env_pc, dc->pc + 2); c = tcg_const_tl(dc->src + 2); t_gen_mov_env_TN(trap_vector, c); - tcg_temp_free(c); t_gen_raise_exception(EXCP_BREAK); dc->base.is_jmp = DISAS_NORETURN; return insn_len; @@ -1176,13 +1132,11 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) t[0] = tcg_temp_new(); c = tcg_const_tl(dc->pc + insn_len); t_gen_mov_preg_TN(dc, dc->dst, c); - tcg_temp_free(c); crisv10_prepare_memaddr(dc, t[0], size); gen_load(dc, env_btarget, t[0], 4, 0); insn_len += crisv10_post_memaddr(dc, size); cris_prepare_jmp(dc, JMP_INDIRECT); dc->delayed_branch--; /* v10 has no dslot here. */ - tcg_temp_free(t[0]); } break; @@ -1201,7 +1155,6 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]); c = tcg_const_tl(dc->pc + insn_len); t_gen_mov_preg_TN(dc, dc->dst, c); - tcg_temp_free(c); cris_prepare_jmp(dc, JMP_INDIRECT); dc->delayed_branch--; /* v10 has no dslot here. */ break; diff --git a/target/hexagon/README b/target/hexagon/README index 2e32639fb7..251960b862 100644 --- a/target/hexagon/README +++ b/target/hexagon/README @@ -88,7 +88,6 @@ tcg_funcs_generated.c.inc gen_helper_A2_add(RdV, cpu_env, RsV, RtV); gen_log_reg_write(RdN, RdV); ctx_log_reg_write(ctx, RdN); - tcg_temp_free(RdV); } helper_funcs_generated.c.inc @@ -160,12 +159,8 @@ istruction. tcg_gen_addi_ptr(VvV, cpu_env, VvV_off); TCGv slot = tcg_constant_tl(insn->slot); gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV, slot); - tcg_temp_free(slot); gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false); ctx_log_vreg_write(ctx, VdN, EXT_DFL, false); - tcg_temp_free_ptr(VdV); - tcg_temp_free_ptr(VuV); - tcg_temp_free_ptr(VvV); } Notice that we also generate a variable named <operand>_off for each operand of diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index a219a7f5dd..b2e7880b5c 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -77,7 +77,6 @@ tcg_gen_mov_tl(EA, RxV); \ gen_read_ireg(ireg, MuV, (SHIFT)); \ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ - tcg_temp_free(ireg); \ } while (0) /* Instructions with multiple definitions */ @@ -116,7 +115,6 @@ gen_read_ireg(ireg, MuV, SHIFT); \ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ LOAD; \ - tcg_temp_free(ireg); \ } while (0) #define fGEN_TCG_L2_loadrub_pcr(SHORTCODE) \ @@ -168,8 +166,6 @@ for (int i = 0; i < 2; i++) { \ gen_set_half(i, RdV, gen_get_byte(byte, i, tmp, (SIGN))); \ } \ - tcg_temp_free(tmp); \ - tcg_temp_free(byte); \ } while (0) #define fGEN_TCG_L2_loadbzw2_io(SHORTCODE) \ @@ -222,8 +218,6 @@ for (int i = 0; i < 4; i++) { \ gen_set_half_i64(i, RddV, gen_get_byte(byte, i, tmp, (SIGN))); \ } \ - tcg_temp_free(tmp); \ - tcg_temp_free(byte); \ } while (0) #define fGEN_TCG_L2_loadbzw4_io(SHORTCODE) \ @@ -273,8 +267,6 @@ tcg_gen_extu_i32_i64(tmp_i64, tmp); \ tcg_gen_shri_i64(RyyV, RyyV, 16); \ tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 48, 16); \ - tcg_temp_free(tmp); \ - tcg_temp_free_i64(tmp_i64); \ } while (0) #define fGEN_TCG_L4_loadalignh_ur(SHORTCODE) \ @@ -304,8 +296,6 @@ tcg_gen_extu_i32_i64(tmp_i64, tmp); \ tcg_gen_shri_i64(RyyV, RyyV, 8); \ tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 56, 8); \ - tcg_temp_free(tmp); \ - tcg_temp_free_i64(tmp_i64); \ } while (0) #define fGEN_TCG_L2_loadalignb_io(SHORTCODE) \ @@ -347,7 +337,6 @@ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \ fLOAD(1, SIZE, SIGN, EA, RdV); \ gen_set_label(label); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_L2_ploadrubt_pi(SHORTCODE) \ @@ -407,7 +396,6 @@ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \ fLOAD(1, 8, u, EA, RddV); \ gen_set_label(label); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_L2_ploadrdt_pi(SHORTCODE) \ @@ -434,8 +422,6 @@ TCGv HALF = tcg_temp_new(); \ TCGv BYTE = tcg_temp_new(); \ SHORTCODE; \ - tcg_temp_free(HALF); \ - tcg_temp_free(BYTE); \ } while (0) #define fGEN_TCG_STORE_pcr(SHIFT, STORE) \ @@ -447,9 +433,6 @@ gen_read_ireg(ireg, MuV, SHIFT); \ gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \ STORE; \ - tcg_temp_free(ireg); \ - tcg_temp_free(HALF); \ - tcg_temp_free(BYTE); \ } while (0) #define fGEN_TCG_S2_storerb_pbr(SHORTCODE) \ @@ -531,7 +514,6 @@ gen_helper_sfrecipa(tmp, cpu_env, RsV, RtV); \ tcg_gen_extrh_i64_i32(RdV, tmp); \ tcg_gen_extrl_i64_i32(PeV, tmp); \ - tcg_temp_free_i64(tmp); \ } while (0) /* @@ -547,7 +529,6 @@ gen_helper_sfinvsqrta(tmp, cpu_env, RsV); \ tcg_gen_extrh_i64_i32(RdV, tmp); \ tcg_gen_extrl_i64_i32(PeV, tmp); \ - tcg_temp_free_i64(tmp); \ } while (0) /* @@ -565,7 +546,6 @@ tcg_gen_add2_i64(RddV, carry, RddV, carry, RttV, zero); \ tcg_gen_extrl_i64_i32(PxV, carry); \ gen_8bitsof(PxV, PxV); \ - tcg_temp_free_i64(carry); \ } while (0) /* r5:4 = sub(r1:0, r3:2, p1):carry */ @@ -581,8 +561,6 @@ tcg_gen_add2_i64(RddV, carry, RddV, carry, not_RttV, zero); \ tcg_gen_extrl_i64_i32(PxV, carry); \ gen_8bitsof(PxV, PxV); \ - tcg_temp_free_i64(carry); \ - tcg_temp_free_i64(not_RttV); \ } while (0) /* @@ -607,9 +585,6 @@ tcg_gen_umin_tl(tmp, left, right); \ gen_set_byte_i64(i, RddV, tmp); \ } \ - tcg_temp_free(left); \ - tcg_temp_free(right); \ - tcg_temp_free(tmp); \ } while (0) #define fGEN_TCG_J2_call(SHORTCODE) \ @@ -815,14 +790,12 @@ TCGv LSB = tcg_temp_new(); \ COND; \ gen_cond_jump(ctx, TCG_COND_EQ, LSB, riV); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_cond_jumpf(COND) \ do { \ TCGv LSB = tcg_temp_new(); \ COND; \ gen_cond_jump(ctx, TCG_COND_NE, LSB, riV); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_J2_jumpt(SHORTCODE) \ @@ -863,14 +836,12 @@ TCGv LSB = tcg_temp_new(); \ COND; \ gen_cond_jumpr(ctx, RsV, TCG_COND_EQ, LSB); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_cond_jumprf(COND) \ do { \ TCGv LSB = tcg_temp_new(); \ COND; \ gen_cond_jumpr(ctx, RsV, TCG_COND_NE, LSB); \ - tcg_temp_free(LSB); \ } while (0) #define fGEN_TCG_J2_jumprt(SHORTCODE) \ diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py index dfc90712fb..02cb52c21e 100755 --- a/target/hexagon/gen_tcg_funcs.py +++ b/target/hexagon/gen_tcg_funcs.py @@ -26,10 +26,7 @@ import hex_common ## Helpers for gen_tcg_func ## def gen_decl_ea_tcg(f, tag): - f.write(" TCGv EA = tcg_temp_new();\n") - -def gen_free_ea_tcg(f): - f.write(" tcg_temp_free(EA);\n") + f.write(" TCGv EA G_GNUC_UNUSED = tcg_temp_new();\n") def genptr_decl_pair_writable(f, tag, regtype, regid, regno): regN="%s%sN" % (regtype,regid) @@ -269,73 +266,6 @@ def genptr_decl_imm(f,immlett): f.write(" int %s = insn->immed[%d];\n" % \ (hex_common.imm_name(immlett), i)) -def genptr_free(f, tag, regtype, regid, regno): - if (regtype == "R"): - if (regid in {"dd", "ss", "tt", "xx", "yy"}): - f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid)) - elif (regid in {"d", "e", "x", "y"}): - f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) - elif (regid not in {"s", "t", "u", "v"}): - print("Bad register parse: ",regtype,regid) - elif (regtype == "P"): - if (regid in {"d", "e", "x"}): - f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) - elif (regid not in {"s", "t", "u", "v"}): - print("Bad register parse: ",regtype,regid) - elif (regtype == "C"): - if (regid in {"dd", "ss"}): - f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid)) - elif (regid in {"d", "s"}): - f.write(" tcg_temp_free(%s%sV);\n" % (regtype, regid)) - else: - print("Bad register parse: ",regtype,regid) - elif (regtype == "M"): - if (regid != "u"): - print("Bad register parse: ", regtype, regid) - elif (regtype == "V"): - if (regid in {"dd", "uu", "vv", "xx", \ - "d", "s", "u", "v", "w", "x", "y"}): - if (not hex_common.skip_qemu_helper(tag)): - f.write(" tcg_temp_free_ptr(%s%sV);\n" % \ - (regtype, regid)) - else: - print("Bad register parse: ", regtype, regid) - elif (regtype == "Q"): - if (regid in {"d", "e", "s", "t", "u", "v", "x"}): - if (not hex_common.skip_qemu_helper(tag)): - f.write(" tcg_temp_free_ptr(%s%sV);\n" % \ - (regtype, regid)) - else: - print("Bad register parse: ", regtype, regid) - else: - print("Bad register parse: ", regtype, regid) - -def genptr_free_new(f, tag, regtype, regid, regno): - if (regtype == "N"): - if (regid not in {"s", "t"}): - print("Bad register parse: ", regtype, regid) - elif (regtype == "P"): - if (regid not in {"t", "u", "v"}): - print("Bad register parse: ", regtype, regid) - elif (regtype == "O"): - if (regid != "s"): - print("Bad register parse: ", regtype, regid) - else: - print("Bad register parse: ", regtype, regid) - -def genptr_free_opn(f,regtype,regid,i,tag): - if (hex_common.is_pair(regid)): - genptr_free(f, tag, regtype, regid, i) - elif (hex_common.is_single(regid)): - if hex_common.is_old_val(regtype, regid, tag): - genptr_free(f, tag, regtype, regid, i) - elif hex_common.is_new_val(regtype, regid, tag): - genptr_free_new(f, tag, regtype, regid, i) - else: - print("Bad register parse: ",regtype,regid,toss,numregs) - else: - print("Bad register parse: ",regtype,regid,toss,numregs) - def genptr_src_read(f, tag, regtype, regid): if (regtype == "R"): if (regid in {"ss", "tt", "xx", "yy"}): @@ -578,7 +508,6 @@ def genptr_dst_write_opn(f,regtype, regid, tag): ## <GEN> ## gen_log_reg_write(RdN, RdV); ## ctx_log_reg_write(ctx, RdN); -## tcg_temp_free(RdV); ## } ## ## where <GEN> depends on hex_common.skip_qemu_helper(tag) @@ -692,12 +621,6 @@ def gen_tcg_func(f, tag, regs, imms): if (hex_common.is_written(regid)): genptr_dst_write_opn(f,regtype, regid, tag) - ## Free all the operands (regs and immediates) - if hex_common.need_ea(tag): gen_free_ea_tcg(f) - for regtype,regid,toss,numregs in regs: - genptr_free_opn(f,regtype,regid,i,tag) - i += 1 - f.write("}\n\n") def gen_def_tcg_func(f, tag, tagregs, tagimms): diff --git a/target/hexagon/gen_tcg_hvx.h b/target/hexagon/gen_tcg_hvx.h index 083f4d92c6..94f272e286 100644 --- a/target/hexagon/gen_tcg_hvx.h +++ b/target/hexagon/gen_tcg_hvx.h @@ -136,7 +136,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) TCGLabel *end_label = gen_new_label(); \ tcg_gen_andi_tl(lsb, PsV, 1); \ tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \ - tcg_temp_free(lsb); \ tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \ sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_br(end_label); \ @@ -212,7 +211,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 15); \ tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \ @@ -224,7 +222,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vasrw(SHORTCODE) \ @@ -233,7 +230,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 31); \ tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \ @@ -245,7 +241,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vlsrb(SHORTCODE) \ @@ -254,7 +249,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 7); \ tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vlsrh(SHORTCODE) \ @@ -263,7 +257,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 15); \ tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vlsrw(SHORTCODE) \ @@ -272,7 +265,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 31); \ tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) /* Vector shift left - various forms */ @@ -282,7 +274,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 7); \ tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vaslh(SHORTCODE) \ @@ -291,7 +282,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 15); \ tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \ @@ -303,7 +293,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vaslw(SHORTCODE) \ @@ -312,7 +301,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) tcg_gen_andi_tl(shift, RtV, 31); \ tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) #define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \ @@ -324,7 +312,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) sizeof(MMVector), sizeof(MMVector)); \ tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \ sizeof(MMVector), sizeof(MMVector)); \ - tcg_temp_free(shift); \ } while (0) /* Vector max - various forms */ @@ -564,7 +551,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) GET_EA; \ PRED; \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \ - tcg_temp_free(LSB); \ gen_vreg_load(ctx, DSTOFF, EA, true); \ INC; \ tcg_gen_br(end_label); \ @@ -735,7 +721,6 @@ static inline void assert_vhist_tmp(DisasContext *ctx) GET_EA; \ PRED; \ tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \ - tcg_temp_free(LSB); \ gen_vreg_store(ctx, EA, SRCOFF, insn->slot, ALIGN); \ INC; \ tcg_gen_br(end_label); \ diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index 591461b043..86bd093ce8 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -65,8 +65,6 @@ static inline void gen_masked_reg_write(TCGv new_val, TCGv cur_val, tcg_gen_andi_tl(new_val, new_val, ~reg_mask); tcg_gen_andi_tl(tmp, cur_val, reg_mask); tcg_gen_or_tl(new_val, new_val, tmp); - - tcg_temp_free(tmp); } } @@ -90,8 +88,6 @@ static inline void gen_log_predicated_reg_write(int rnum, TCGv val, tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero); tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); } - - tcg_temp_free(slot_mask); } void gen_log_reg_write(int rnum, TCGv val) @@ -137,9 +133,6 @@ static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, tcg_gen_or_tl(hex_reg_written[rnum + 1], hex_reg_written[rnum + 1], slot_mask); } - - tcg_temp_free(val32); - tcg_temp_free(slot_mask); } static void gen_log_reg_write_pair(int rnum, TCGv_i64 val) @@ -165,8 +158,6 @@ static void gen_log_reg_write_pair(int rnum, TCGv_i64 val) /* Do this so HELPER(debug_commit_end) will know */ tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1); } - - tcg_temp_free(val32); } void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) @@ -189,8 +180,6 @@ void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val) hex_new_pred_value[pnum], base_val); } tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum); - - tcg_temp_free(base_val); } static inline void gen_read_p3_0(TCGv control_reg) @@ -238,7 +227,6 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, TCGv p3_0 = tcg_temp_new(); gen_read_p3_0(p3_0); tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]); - tcg_temp_free(p3_0); } else if (reg_num == HEX_REG_PC - 1) { TCGv pc = tcg_constant_tl(ctx->base.pc_next); tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc); @@ -250,14 +238,11 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, tcg_gen_addi_tl(insn_cnt, hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns); tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt); - tcg_temp_free(pkt_cnt); - tcg_temp_free(insn_cnt); } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { TCGv hvx_cnt = tcg_temp_new(); tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT], ctx->num_hvx_insns); tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]); - tcg_temp_free(hvx_cnt); } else { tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], @@ -273,7 +258,6 @@ static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg) gen_log_pred_write(ctx, i, hex_p8); ctx_log_pred_write(ctx, i); } - tcg_temp_free(hex_p8); } /* @@ -312,7 +296,6 @@ static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, gen_write_p3_0(ctx, val32); tcg_gen_extrh_i64_i32(val32, val); gen_log_reg_write(reg_num + 1, val32); - tcg_temp_free(val32); ctx_log_reg_write(ctx, reg_num + 1); } else { gen_log_reg_write_pair(reg_num, val); @@ -346,7 +329,6 @@ TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign) tcg_gen_extract_i64(res64, src, N * 8, 8); } tcg_gen_extrl_i64_i32(result, res64); - tcg_temp_free_i64(res64); return result; } @@ -371,7 +353,6 @@ void gen_set_half_i64(int N, TCGv_i64 result, TCGv src) TCGv_i64 src64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(src64, src); tcg_gen_deposit_i64(result, result, src64, N * 16, 16); - tcg_temp_free_i64(src64); } void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) @@ -379,7 +360,6 @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src) TCGv_i64 src64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(src64, src); tcg_gen_deposit_i64(result, result, src64, N * 8, 8); - tcg_temp_free_i64(src64); } static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index) @@ -412,7 +392,6 @@ static inline void gen_store_conditional4(DisasContext *ctx, ctx->mem_idx, MO_32); tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val, one, zero); - tcg_temp_free(tmp); tcg_gen_br(done); gen_set_label(fail); @@ -439,7 +418,6 @@ static inline void gen_store_conditional8(DisasContext *ctx, tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64, one, zero); tcg_gen_extrl_i64_i32(pred, tmp); - tcg_temp_free_i64(tmp); tcg_gen_br(done); gen_set_label(fail); @@ -607,12 +585,10 @@ static void gen_cmpnd_cmp_jmp(DisasContext *ctx, TCGv pred = tcg_temp_new(); gen_compare(cond1, pred, arg1, arg2); gen_log_pred_write(ctx, pnum, pred); - tcg_temp_free(pred); } else { TCGv pred = tcg_temp_new(); tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]); gen_cond_jump(ctx, cond2, pred, pc_off); - tcg_temp_free(pred); } } @@ -666,12 +642,10 @@ static void gen_cmpnd_tstbit0_jmp(DisasContext *ctx, tcg_gen_andi_tl(pred, arg, 1); gen_8bitsof(pred, pred); gen_log_pred_write(ctx, pnum, pred); - tcg_temp_free(pred); } else { TCGv pred = tcg_temp_new(); tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]); gen_cond_jump(ctx, cond, pred, pc_off); - tcg_temp_free(pred); } } @@ -681,7 +655,6 @@ static void gen_testbit0_jumpnv(DisasContext *ctx, TCGv pred = tcg_temp_new(); tcg_gen_andi_tl(pred, arg, 1); gen_cond_jump(ctx, cond, pred, pc_off); - tcg_temp_free(pred); } static void gen_jump(DisasContext *ctx, int pc_off) @@ -711,7 +684,6 @@ static void gen_cond_call(DisasContext *ctx, TCGv pred, tcg_gen_andi_tl(lsb, pred, 1); gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb); tcg_gen_brcondi_tl(cond, lsb, 0, skip); - tcg_temp_free(lsb); next_PC = tcg_constant_tl(ctx->pkt->pc + ctx->pkt->encod_pkt_size_in_bytes); gen_log_reg_write(HEX_REG_LR, next_PC); @@ -771,8 +743,6 @@ static void gen_endloop0(DisasContext *ctx) } gen_set_label(label3); } - - tcg_temp_free(lpcfg); } static void gen_cmp_jumpnv(DisasContext *ctx, @@ -781,7 +751,6 @@ static void gen_cmp_jumpnv(DisasContext *ctx, TCGv pred = tcg_temp_new(); tcg_gen_setcond_tl(cond, pred, val, src); gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off); - tcg_temp_free(pred); } static void gen_cmpi_jumpnv(DisasContext *ctx, @@ -790,7 +759,6 @@ static void gen_cmpi_jumpnv(DisasContext *ctx, TCGv pred = tcg_temp_new(); tcg_gen_setcondi_tl(cond, pred, val, src); gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off); - tcg_temp_free(pred); } /* Shift left with saturation */ @@ -830,11 +798,6 @@ static void gen_shl_sat(TCGv dst, TCGv src, TCGv shift_amt) tcg_gen_or_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR], ovf); tcg_gen_movcond_tl(TCG_COND_EQ, dst, dst_sar, src, dst, satval); - - tcg_temp_free(sh32); - tcg_temp_free(dst_sar); - tcg_temp_free(ovf); - tcg_temp_free(satval); } static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt) @@ -846,7 +809,6 @@ static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt) TCGv tmp = tcg_temp_new(); tcg_gen_umin_tl(tmp, shift_amt, tcg_constant_tl(31)); tcg_gen_sar_tl(dst, src, tmp); - tcg_temp_free(tmp); } /* Bidirectional shift right with saturation */ @@ -869,8 +831,6 @@ static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) gen_sar(RdV, RsV, shift_amt); gen_set_label(done); - - tcg_temp_free(shift_amt); } /* Bidirectional shift left with saturation */ @@ -893,8 +853,6 @@ static void gen_asl_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV) gen_shl_sat(RdV, RsV, shift_amt); gen_set_label(done); - - tcg_temp_free(shift_amt); } static intptr_t vreg_src_off(DisasContext *ctx, int num) @@ -924,7 +882,6 @@ static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num, /* Don't do anything if the slot was cancelled */ tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); - tcg_temp_free(cancelled); } if (type != EXT_TMP) { @@ -965,7 +922,6 @@ static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew, /* Don't do anything if the slot was cancelled */ tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); - tcg_temp_free(cancelled); } dstoff = offsetof(CPUHexagonState, future_QRegs[num]); @@ -989,7 +945,6 @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src, tcg_gen_addi_tl(src, src, 8); tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); } - tcg_temp_free_i64(tmp); } static void gen_vreg_store(DisasContext *ctx, TCGv EA, intptr_t srcoff, @@ -1061,10 +1016,6 @@ static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff) tcg_gen_st8_i64(mask, cpu_env, dstoff + i); } - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(word); - tcg_temp_free_i64(bits); - tcg_temp_free_i64(mask); } void probe_noshuf_load(TCGv va, int s, int mi) @@ -1088,7 +1039,6 @@ void gen_set_usr_field_if(int field, TCGv val) tcg_gen_or_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR], tmp); - tcg_temp_free(tmp); } else { TCGLabel *skip_label = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, val, 0, skip_label); @@ -1140,7 +1090,6 @@ void gen_sat_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) ovfl_64 = tcg_temp_new_i64(); tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source); tcg_gen_trunc_i64_tl(ovfl, ovfl_64); - tcg_temp_free_i64(ovfl_64); } void gen_satu_i64(TCGv_i64 dest, TCGv_i64 source, int width) @@ -1158,7 +1107,6 @@ void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width) ovfl_64 = tcg_temp_new_i64(); tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source); tcg_gen_trunc_i64_tl(ovfl, ovfl_64); - tcg_temp_free_i64(ovfl_64); } /* Implements the fADDSAT64 macro in TCG */ @@ -1182,15 +1130,12 @@ void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) /* if (xor & mask) */ tcg_gen_and_i64(cond1, xor, mask); - tcg_temp_free_i64(xor); tcg_gen_brcondi_i64(TCG_COND_NE, cond1, 0, no_ovfl_label); - tcg_temp_free_i64(cond1); /* else if ((a ^ sum) & mask) */ tcg_gen_xor_i64(cond2, a, sum); tcg_gen_and_i64(cond2, cond2, mask); tcg_gen_brcondi_i64(TCG_COND_NE, cond2, 0, ovfl_label); - tcg_temp_free_i64(cond2); /* fallthrough to no_ovfl_label branch */ /* if branch */ @@ -1201,10 +1146,7 @@ void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) /* else if branch */ gen_set_label(ovfl_label); tcg_gen_and_i64(cond3, sum, mask); - tcg_temp_free_i64(mask); - tcg_temp_free_i64(sum); tcg_gen_movcond_i64(TCG_COND_NE, ret, cond3, zero, max_pos, max_neg); - tcg_temp_free_i64(cond3); SET_USR_FIELD(USR_OVF, 1); gen_set_label(ret_label); diff --git a/target/hexagon/idef-parser/README.rst b/target/hexagon/idef-parser/README.rst index c230fec124..debeddfde5 100644 --- a/target/hexagon/idef-parser/README.rst +++ b/target/hexagon/idef-parser/README.rst @@ -31,7 +31,6 @@ idef-parser will compile the above code into the following code: TCGv_i32 tmp_0 = tcg_temp_new_i32(); tcg_gen_add_i32(tmp_0, RsV, RtV); tcg_gen_mov_i32(RdV, tmp_0); - tcg_temp_free_i32(tmp_0); } The output of the compilation process will be a function, containing the @@ -102,12 +101,6 @@ The result of the addition is now stored in the temporary, we move it into the correct destination register. This code may seem inefficient, but QEMU will perform some optimizations on the tinycode, reducing the unnecessary copy. -:: - - tcg_temp_free_i32(tmp_0); - -Finally, we free the temporary we used to hold the addition result. - Parser Input ------------ @@ -524,7 +517,6 @@ instruction, TCGv_i32 tmp_0 = tcg_temp_new_i32(); tcg_gen_add_i32(tmp_0, RsV, RsV); tcg_gen_mov_i32(RdV, tmp_0); - tcg_temp_free_i32(tmp_0); } Here the bug, albeit hard to spot, is in ``tcg_gen_add_i32(tmp_0, RsV, RsV);`` diff --git a/target/hexagon/idef-parser/idef-parser.h b/target/hexagon/idef-parser/idef-parser.h index 5c49d4da3e..17d2ebfaf6 100644 --- a/target/hexagon/idef-parser/idef-parser.h +++ b/target/hexagon/idef-parser/idef-parser.h @@ -185,7 +185,6 @@ typedef struct HexValue { unsigned bit_width; /**< Bit width of the rvalue */ HexSignedness signedness; /**< Unsigned flag for the rvalue */ bool is_dotnew; /**< rvalue of predicate type is dotnew? */ - bool is_manual; /**< Opt out of automatic freeing of params */ } HexValue; /** diff --git a/target/hexagon/idef-parser/idef-parser.y b/target/hexagon/idef-parser/idef-parser.y index c14cb39500..c784726d41 100644 --- a/target/hexagon/idef-parser/idef-parser.y +++ b/target/hexagon/idef-parser/idef-parser.y @@ -269,9 +269,6 @@ statements : statements statement statement : control_statement | var_decl ';' | rvalue ';' - { - gen_rvalue_free(c, &@1, &$1); - } | code_block | ';' ; @@ -347,7 +344,6 @@ assign_statement : lvalue '=' rvalue $3 = gen_rvalue_truncate(c, &@1, &$3); $3 = rvalue_materialize(c, &@1, &$3); OUT(c, &@1, "gen_write_new_pc(", &$3, ");\n"); - gen_rvalue_free(c, &@1, &$3); /* Free temporary value */ } | LOAD '(' IMM ',' IMM ',' SIGN ',' var ',' lvalue ')' { @@ -376,7 +372,6 @@ assign_statement : lvalue '=' rvalue $3 = gen_rvalue_truncate(c, &@1, &$3); $3 = rvalue_materialize(c, &@1, &$3); OUT(c, &@1, "SET_USR_FIELD(USR_LPCFG, ", &$3, ");\n"); - gen_rvalue_free(c, &@1, &$3); } | DEPOSIT '(' rvalue ',' rvalue ',' rvalue ')' { @@ -421,10 +416,6 @@ control_statement : frame_check ; frame_check : FCHK '(' rvalue ',' rvalue ')' ';' - { - gen_rvalue_free(c, &@1, &$3); - gen_rvalue_free(c, &@1, &$5); - } ; cancel_statement : LOAD_CANCEL @@ -543,7 +534,6 @@ rvalue : FAIL rvalue.imm.type = IMM_CONSTEXT; rvalue.signedness = UNSIGNED; rvalue.is_dotnew = false; - rvalue.is_manual = false; $$ = rvalue; } | var @@ -702,7 +692,6 @@ rvalue : FAIL } | rvalue '?' { - $1.is_manual = true; Ternary t = { 0 }; t.state = IN_LEFT; t.cond = $1; @@ -774,7 +763,6 @@ rvalue : FAIL @1.last_column = @6.last_column; $$ = gen_tmp(c, &@1, 32, UNSIGNED); OUT(c, &@1, "gen_read_ireg(", &$$, ", ", &$3, ", ", &$6, ");\n"); - gen_rvalue_free(c, &@1, &$3); } | CIRCADD '(' rvalue ',' rvalue ',' rvalue ')' { @@ -795,7 +783,7 @@ rvalue : FAIL } | LPCFG { - $$ = gen_tmp_value(c, &@1, "0", 32, UNSIGNED); + $$ = gen_tmp(c, &@1, 32, UNSIGNED); OUT(c, &@1, "GET_USR_FIELD(USR_LPCFG, ", &$$, ");\n"); } | EXTRACT '(' rvalue ',' rvalue ')' diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c index 3025040640..e1a55412c8 100644 --- a/target/hexagon/idef-parser/parser-helpers.c +++ b/target/hexagon/idef-parser/parser-helpers.c @@ -278,7 +278,6 @@ static HexValue gen_constant(Context *c, rvalue.bit_width = bit_width; rvalue.signedness = signedness; rvalue.is_dotnew = false; - rvalue.is_manual = true; rvalue.tmp.index = c->inst.tmp_count; OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count, " = tcg_constant_i", &bit_width, "(", value, ");\n"); @@ -299,7 +298,6 @@ HexValue gen_tmp(Context *c, rvalue.bit_width = bit_width; rvalue.signedness = signedness; rvalue.is_dotnew = false; - rvalue.is_manual = false; rvalue.tmp.index = c->inst.tmp_count; OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count, " = tcg_temp_new_i", &bit_width, "();\n"); @@ -307,30 +305,9 @@ HexValue gen_tmp(Context *c, return rvalue; } -HexValue gen_tmp_value(Context *c, - YYLTYPE *locp, - const char *value, - unsigned bit_width, - HexSignedness signedness) -{ - HexValue rvalue; - assert(bit_width == 32 || bit_width == 64); - memset(&rvalue, 0, sizeof(HexValue)); - rvalue.type = TEMP; - rvalue.bit_width = bit_width; - rvalue.signedness = signedness; - rvalue.is_dotnew = false; - rvalue.is_manual = false; - rvalue.tmp.index = c->inst.tmp_count; - OUT(c, locp, "TCGv_i", &bit_width, " tmp_", &c->inst.tmp_count, - " = tcg_const_i", &bit_width, "(", value, ");\n"); - c->inst.tmp_count++; - return rvalue; -} - -static HexValue gen_tmp_value_from_imm(Context *c, - YYLTYPE *locp, - HexValue *value) +static HexValue gen_constant_from_imm(Context *c, + YYLTYPE *locp, + HexValue *value) { HexValue rvalue; assert(value->type == IMMEDIATE); @@ -339,14 +316,13 @@ static HexValue gen_tmp_value_from_imm(Context *c, rvalue.bit_width = value->bit_width; rvalue.signedness = value->signedness; rvalue.is_dotnew = false; - rvalue.is_manual = false; rvalue.tmp.index = c->inst.tmp_count; /* - * Here we output the call to `tcg_const_i<width>` in + * Here we output the call to `tcg_constant_i<width>` in * order to create the temporary value. Note, that we * add a cast * - * `tcg_const_i<width>`((int<width>_t) ...)` + * `tcg_constant_i<width>`((int<width>_t) ...)` * * This cast is required to avoid implicit integer * conversion warnings since all immediates are @@ -354,7 +330,7 @@ static HexValue gen_tmp_value_from_imm(Context *c, * integer is 32-bit. */ OUT(c, locp, "TCGv_i", &rvalue.bit_width, " tmp_", &c->inst.tmp_count); - OUT(c, locp, " = tcg_const_i", &rvalue.bit_width, + OUT(c, locp, " = tcg_constant_i", &rvalue.bit_width, "((int", &rvalue.bit_width, "_t) (", value, "));\n"); c->inst.tmp_count++; @@ -375,7 +351,6 @@ HexValue gen_imm_value(Context *c __attribute__((unused)), rvalue.bit_width = bit_width; rvalue.signedness = signedness; rvalue.is_dotnew = false; - rvalue.is_manual = false; rvalue.imm.type = VALUE; rvalue.imm.value = value; return rvalue; @@ -390,7 +365,6 @@ HexValue gen_imm_qemu_tmp(Context *c, YYLTYPE *locp, unsigned bit_width, memset(&rvalue, 0, sizeof(HexValue)); rvalue.type = IMMEDIATE; rvalue.is_dotnew = false; - rvalue.is_manual = false; rvalue.bit_width = bit_width; rvalue.signedness = signedness; rvalue.imm.type = QEMU_TMP; @@ -398,26 +372,10 @@ HexValue gen_imm_qemu_tmp(Context *c, YYLTYPE *locp, unsigned bit_width, return rvalue; } -void gen_rvalue_free(Context *c, YYLTYPE *locp, HexValue *rvalue) -{ - if (rvalue->type == TEMP && !rvalue->is_manual) { - const char *bit_suffix = (rvalue->bit_width == 64) ? "i64" : "i32"; - OUT(c, locp, "tcg_temp_free_", bit_suffix, "(", rvalue, ");\n"); - } -} - -static void gen_rvalue_free_manual(Context *c, YYLTYPE *locp, HexValue *rvalue) -{ - rvalue->is_manual = false; - gen_rvalue_free(c, locp, rvalue); -} - HexValue rvalue_materialize(Context *c, YYLTYPE *locp, HexValue *rvalue) { if (rvalue->type == IMMEDIATE) { - HexValue res = gen_tmp_value_from_imm(c, locp, rvalue); - gen_rvalue_free(c, locp, rvalue); - return res; + return gen_constant_from_imm(c, locp, rvalue); } return *rvalue; } @@ -445,7 +403,6 @@ HexValue gen_rvalue_extend(Context *c, YYLTYPE *locp, HexValue *rvalue) const char *sign_suffix = is_unsigned ? "u" : ""; OUT(c, locp, "tcg_gen_ext", sign_suffix, "_i32_i64(", &res, ", ", rvalue, ");\n"); - gen_rvalue_free(c, locp, rvalue); return res; } } @@ -460,7 +417,6 @@ HexValue gen_rvalue_truncate(Context *c, YYLTYPE *locp, HexValue *rvalue) if (rvalue->bit_width == 64) { HexValue res = gen_tmp(c, locp, 32, rvalue->signedness); OUT(c, locp, "tcg_gen_trunc_i64_tl(", &res, ", ", rvalue, ");\n"); - gen_rvalue_free(c, locp, rvalue); return res; } } @@ -587,11 +543,6 @@ HexValue gen_bin_cmp(Context *c, fprintf(stderr, "Error in evalutating immediateness!"); abort(); } - - /* Free operands */ - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); - return res; } @@ -627,8 +578,6 @@ static void gen_simple_op(Context *c, YYLTYPE *locp, unsigned bit_width, "(", res, ", ", op1, ", ", op2, ");\n"); break; } - gen_rvalue_free(c, locp, op1); - gen_rvalue_free(c, locp, op2); } static void gen_sub_op(Context *c, YYLTYPE *locp, unsigned bit_width, @@ -658,8 +607,6 @@ static void gen_sub_op(Context *c, YYLTYPE *locp, unsigned bit_width, "(", res, ", ", op1, ", ", op2, ");\n"); } break; } - gen_rvalue_free(c, locp, op1); - gen_rvalue_free(c, locp, op2); } static void gen_asl_op(Context *c, YYLTYPE *locp, unsigned bit_width, @@ -711,10 +658,7 @@ static void gen_asl_op(Context *c, YYLTYPE *locp, unsigned bit_width, OUT(c, locp, "tcg_gen_movcond_i", &bit_width); OUT(c, locp, "(TCG_COND_GEU, ", res, ", ", &op2_m, ", ", &edge); OUT(c, locp, ", ", &zero, ", ", res, ");\n"); - gen_rvalue_free(c, locp, &edge); } - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); } static void gen_asr_op(Context *c, YYLTYPE *locp, unsigned bit_width, @@ -769,11 +713,7 @@ static void gen_asr_op(Context *c, YYLTYPE *locp, unsigned bit_width, OUT(c, locp, "tcg_gen_movcond_i", &bit_width); OUT(c, locp, "(TCG_COND_GEU, ", res, ", ", &op2_m, ", ", &edge); OUT(c, locp, ", ", &tmp, ", ", res, ");\n"); - gen_rvalue_free(c, locp, &edge); - gen_rvalue_free(c, locp, &tmp); } - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); } static void gen_lsr_op(Context *c, YYLTYPE *locp, unsigned bit_width, @@ -815,10 +755,7 @@ static void gen_lsr_op(Context *c, YYLTYPE *locp, unsigned bit_width, OUT(c, locp, "tcg_gen_movcond_i", &bit_width); OUT(c, locp, "(TCG_COND_GEU, ", res, ", ", &op2_m, ", ", &edge); OUT(c, locp, ", ", &zero, ", ", res, ");\n"); - gen_rvalue_free(c, locp, &edge); } - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); } /* @@ -847,9 +784,6 @@ static void gen_andl_op(Context *c, YYLTYPE *locp, unsigned bit_width, tmp2 = gen_bin_cmp(c, locp, TCG_COND_NE, op2, &zero); OUT(c, locp, "tcg_gen_and_", bit_suffix, "(", res, ", ", &tmp1, ", ", &tmp2, ");\n"); - gen_rvalue_free_manual(c, locp, &zero); - gen_rvalue_free(c, locp, &tmp1); - gen_rvalue_free(c, locp, &tmp2); break; } } @@ -892,8 +826,6 @@ static void gen_minmax_op(Context *c, YYLTYPE *locp, unsigned bit_width, OUT(c, locp, res, ", ", op1, ", ", &op2_m, ");\n"); break; } - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); } /* Code generation functions */ @@ -1055,7 +987,6 @@ HexValue gen_cast_op(Context *c, &res, ", ", src, ");\n"); } } - gen_rvalue_free(c, locp, src); return res; } } @@ -1115,8 +1046,6 @@ static HexValue gen_extend_imm_width_op(Context *c, if (need_guarding) { OUT(c, locp, "}\n"); } - - gen_rvalue_free(c, locp, value); return res; } else { /* @@ -1141,8 +1070,6 @@ static HexValue gen_extend_imm_width_op(Context *c, ", 0);\n"); OUT(c, locp, "}\n"); } - - gen_rvalue_free(c, locp, value); return res; } } @@ -1173,16 +1100,11 @@ static HexValue gen_extend_tcg_width_op(Context *c, OUT(c, locp, "tcg_gen_subfi_i", &dst_width); OUT(c, locp, "(", &shift, ", ", &dst_width, ", ", &src_width_m, ");\n"); if (signedness == UNSIGNED) { - const char *mask_str = (dst_width == 32) - ? "0xffffffff" - : "0xffffffffffffffff"; - HexValue mask = gen_tmp_value(c, locp, mask_str, - dst_width, UNSIGNED); + HexValue mask = gen_constant(c, locp, "-1", dst_width, UNSIGNED); OUT(c, locp, "tcg_gen_shr_i", &dst_width, "(", - &mask, ", ", &mask, ", ", &shift, ");\n"); + &res, ", ", &mask, ", ", &shift, ");\n"); OUT(c, locp, "tcg_gen_and_i", &dst_width, "(", - &res, ", ", value, ", ", &mask, ");\n"); - gen_rvalue_free(c, locp, &mask); + &res, ", ", &res, ", ", value, ");\n"); } else { OUT(c, locp, "tcg_gen_shl_i", &dst_width, "(", &res, ", ", value, ", ", &shift, ");\n"); @@ -1194,10 +1116,6 @@ static HexValue gen_extend_tcg_width_op(Context *c, OUT(c, locp, &src_width_m, ", ", &zero, ", ", &zero, ", ", &res, ");\n"); - gen_rvalue_free(c, locp, &src_width_m); - gen_rvalue_free(c, locp, value); - gen_rvalue_free(c, locp, &shift); - return res; } @@ -1293,15 +1211,12 @@ void gen_rdeposit_op(Context *c, */ k64 = gen_bin_op(c, locp, SUB_OP, &k64, &width_m); mask = gen_bin_op(c, locp, LSR_OP, &mask, &k64); - begin_m.is_manual = true; mask = gen_bin_op(c, locp, ASL_OP, &mask, &begin_m); - mask.is_manual = true; value_m = gen_bin_op(c, locp, ASL_OP, &value_m, &begin_m); value_m = gen_bin_op(c, locp, ANDB_OP, &value_m, &mask); OUT(c, locp, "tcg_gen_not_i", &dst->bit_width, "(", &mask, ", ", &mask, ");\n"); - mask.is_manual = false; res = gen_bin_op(c, locp, ANDB_OP, dst, &mask); res = gen_bin_op(c, locp, ORB_OP, &res, &value_m); @@ -1316,9 +1231,6 @@ void gen_rdeposit_op(Context *c, dst); OUT(c, locp, ", ", &width_m, ", ", &zero, ", ", &res, ", ", dst, ");\n"); - - gen_rvalue_free(c, locp, width); - gen_rvalue_free(c, locp, &res); } void gen_deposit_op(Context *c, @@ -1352,8 +1264,6 @@ void gen_deposit_op(Context *c, value_m = rvalue_materialize(c, locp, &value_m); OUT(c, locp, "tcg_gen_deposit_i", &bit_width, "(", dst, ", ", dst, ", "); OUT(c, locp, &value_m, ", ", index, " * ", &width, ", ", &width, ");\n"); - gen_rvalue_free(c, locp, index); - gen_rvalue_free(c, locp, &value_m); } HexValue gen_rextract_op(Context *c, @@ -1366,7 +1276,6 @@ HexValue gen_rextract_op(Context *c, HexValue res = gen_tmp(c, locp, bit_width, UNSIGNED); OUT(c, locp, "tcg_gen_extract_i", &bit_width, "(", &res); OUT(c, locp, ", ", src, ", ", &begin, ", ", &width, ");\n"); - gen_rvalue_free(c, locp, src); return res; } @@ -1399,12 +1308,8 @@ HexValue gen_extract_op(Context *c, const char *sign_suffix = (extract->signedness == UNSIGNED) ? "u" : ""; OUT(c, locp, "tcg_gen_ext", sign_suffix, "_i32_i64(", &tmp, ", ", &res, ");\n"); - gen_rvalue_free(c, locp, &res); res = tmp; } - - gen_rvalue_free(c, locp, src); - gen_rvalue_free(c, locp, index); return res; } @@ -1422,8 +1327,6 @@ void gen_write_reg(Context *c, YYLTYPE *locp, HexValue *reg, HexValue *value) locp, "ctx_log_reg_write(ctx, ", ®->reg.id, ");\n"); - gen_rvalue_free(c, locp, reg); - gen_rvalue_free(c, locp, &value_m); } void gen_assign(Context *c, @@ -1458,8 +1361,6 @@ void gen_assign(Context *c, const char *imm_suffix = (value_m.type == IMMEDIATE) ? "i" : ""; OUT(c, locp, "tcg_gen_mov", imm_suffix, "_i", &bit_width, "(", dst, ", ", &value_m, ");\n"); - - gen_rvalue_free(c, locp, &value_m); } HexValue gen_convround(Context *c, @@ -1475,8 +1376,6 @@ HexValue gen_convround(Context *c, HexValue and; HexValue src_p1; - src_m.is_manual = true; - and = gen_bin_op(c, locp, ANDB_OP, &src_m, &mask); src_p1 = gen_bin_op(c, locp, ADD_OP, &src_m, &one); @@ -1484,12 +1383,6 @@ HexValue gen_convround(Context *c, OUT(c, locp, ", ", &and, ", ", &mask, ", "); OUT(c, locp, &src_p1, ", ", &src_m, ");\n"); - /* Free src but use the original `is_manual` value */ - gen_rvalue_free(c, locp, src); - - /* Free the rest of the values */ - gen_rvalue_free(c, locp, &src_p1); - return res; } @@ -1515,9 +1408,6 @@ static HexValue gen_convround_n_b(Context *c, OUT(c, locp, "tcg_gen_add_i64(", &res); OUT(c, locp, ", ", &res, ", ", &tmp_64, ");\n"); - gen_rvalue_free(c, locp, &tmp); - gen_rvalue_free(c, locp, &tmp_64); - return res; } @@ -1540,10 +1430,6 @@ static HexValue gen_convround_n_c(Context *c, OUT(c, locp, "tcg_gen_add_i64(", &res); OUT(c, locp, ", ", &res, ", ", &tmp_64, ");\n"); - gen_rvalue_free(c, locp, &one); - gen_rvalue_free(c, locp, &tmp); - gen_rvalue_free(c, locp, &tmp_64); - return res; } @@ -1614,18 +1500,6 @@ HexValue gen_convround_n(Context *c, OUT(c, locp, "tcg_gen_shr_i64(", &res); OUT(c, locp, ", ", &res, ", ", &n_64, ");\n"); - gen_rvalue_free(c, locp, &src_casted); - gen_rvalue_free(c, locp, &pos_casted); - - gen_rvalue_free(c, locp, &r1); - gen_rvalue_free(c, locp, &r2); - gen_rvalue_free(c, locp, &r3); - - gen_rvalue_free(c, locp, &cond); - gen_rvalue_free(c, locp, &cond_64); - gen_rvalue_free(c, locp, &mask); - gen_rvalue_free(c, locp, &n_64); - res = gen_rvalue_truncate(c, locp, &res); return res; } @@ -1659,10 +1533,6 @@ HexValue gen_round(Context *c, b = gen_extend_op(c, locp, &src_width, 64, pos, UNSIGNED); b = rvalue_materialize(c, locp, &b); - /* Disable auto-free of values used more than once */ - a.is_manual = true; - b.is_manual = true; - n_m1 = gen_bin_op(c, locp, SUB_OP, &b, &one); shifted = gen_bin_op(c, locp, ASL_OP, &one, &n_m1); sum = gen_bin_op(c, locp, ADD_OP, &shifted, &a); @@ -1671,10 +1541,6 @@ HexValue gen_round(Context *c, OUT(c, locp, "(TCG_COND_EQ, ", &res, ", ", &b, ", ", &zero); OUT(c, locp, ", ", &a, ", ", &sum, ");\n"); - gen_rvalue_free_manual(c, locp, &a); - gen_rvalue_free_manual(c, locp, &b); - gen_rvalue_free(c, locp, &sum); - return res; } @@ -1700,9 +1566,6 @@ void gen_circ_op(Context *c, ", ", modifier); OUT(c, locp, ", ", &cs, ");\n"); - gen_rvalue_free(c, locp, &increment_m); - gen_rvalue_free(c, locp, modifier); - gen_rvalue_free(c, locp, &cs); } HexValue gen_locnt_op(Context *c, YYLTYPE *locp, HexValue *src) @@ -1718,7 +1581,6 @@ HexValue gen_locnt_op(Context *c, YYLTYPE *locp, HexValue *src) &res, ", ", &src_m, ");\n"); OUT(c, locp, "tcg_gen_clzi_i", bit_suffix, "(", &res, ", ", &res, ", "); OUT(c, locp, bit_suffix, ");\n"); - gen_rvalue_free(c, locp, &src_m); return res; } @@ -1732,7 +1594,6 @@ HexValue gen_ctpop_op(Context *c, YYLTYPE *locp, HexValue *src) src_m = rvalue_materialize(c, locp, &src_m); OUT(c, locp, "tcg_gen_ctpop_i", bit_suffix, "(", &res, ", ", &src_m, ");\n"); - gen_rvalue_free(c, locp, &src_m); return res; } @@ -1751,8 +1612,6 @@ HexValue gen_rotl(Context *c, YYLTYPE *locp, HexValue *src, HexValue *width) amount = rvalue_materialize(c, locp, &amount); OUT(c, locp, "tcg_gen_rotl_", suffix, "(", &res, ", ", src, ", ", &amount, ");\n"); - gen_rvalue_free(c, locp, src); - gen_rvalue_free(c, locp, &amount); return res; } @@ -1777,10 +1636,6 @@ HexValue gen_carry_from_add(Context *c, OUT(c, locp, "tcg_gen_add2_i64(", &res, ", ", &cf, ", ", &res, ", ", &cf); OUT(c, locp, ", ", &op2_m, ", ", &zero, ");\n"); - gen_rvalue_free(c, locp, &op1_m); - gen_rvalue_free(c, locp, &op2_m); - gen_rvalue_free(c, locp, &op3_m); - gen_rvalue_free(c, locp, &res); return cf; } @@ -1845,7 +1700,6 @@ void gen_inst_code(Context *c, YYLTYPE *locp) c->inst.name->str, c->inst.error_count); } else { - free_variables(c, locp); c->implemented_insn++; fprintf(c->enabled_file, "%s\n", c->inst.name->str); emit_footer(c); @@ -1865,7 +1719,7 @@ void gen_pred_assign(Context *c, YYLTYPE *locp, HexValue *left_pred, "Predicate assign not allowed in ternary!"); /* Extract predicate TCGv */ if (is_direct) { - *left_pred = gen_tmp_value(c, locp, "0", 32, UNSIGNED); + *left_pred = gen_tmp(c, locp, 32, UNSIGNED); } /* Extract first 8 bits, and store new predicate value */ OUT(c, locp, "tcg_gen_mov_i32(", left_pred, ", ", &r, ");\n"); @@ -1875,10 +1729,7 @@ void gen_pred_assign(Context *c, YYLTYPE *locp, HexValue *left_pred, OUT(c, locp, "gen_log_pred_write(ctx, ", pred_id, ", ", left_pred, ");\n"); OUT(c, locp, "ctx_log_pred_write(ctx, ", pred_id, ");\n"); - gen_rvalue_free(c, locp, left_pred); } - /* Free temporary value */ - gen_rvalue_free(c, locp, &r); } void gen_cancel(Context *c, YYLTYPE *locp) @@ -1928,8 +1779,6 @@ void gen_load(Context *c, YYLTYPE *locp, HexValue *width, OUT(c, locp, "(TCGv) "); } OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx);\n"); - /* If the var in EA was truncated it is now a tmp HexValue, so free it. */ - gen_rvalue_free(c, locp, ea); } void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea, @@ -1943,9 +1792,6 @@ void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea, src_m = rvalue_materialize(c, locp, &src_m); OUT(c, locp, "gen_store", &mem_width, "(cpu_env, ", ea, ", ", &src_m); OUT(c, locp, ", insn->slot);\n"); - gen_rvalue_free(c, locp, &src_m); - /* If the var in ea was truncated it is now a tmp HexValue, so free it. */ - gen_rvalue_free(c, locp, ea); } void gen_sethalf(Context *c, YYLTYPE *locp, HexCast *sh, HexValue *n, @@ -1982,11 +1828,6 @@ void gen_setbits(Context *c, YYLTYPE *locp, HexValue *hi, HexValue *lo, OUT(c, locp, "tcg_gen_deposit_i32(", dst, ", ", dst, ", ", &tmp, ", "); OUT(c, locp, lo, ", ", &len, ");\n"); - - gen_rvalue_free(c, locp, &tmp); - gen_rvalue_free(c, locp, hi); - gen_rvalue_free(c, locp, lo); - gen_rvalue_free(c, locp, value); } unsigned gen_if_cond(Context *c, YYLTYPE *locp, HexValue *cond) @@ -1999,7 +1840,6 @@ unsigned gen_if_cond(Context *c, YYLTYPE *locp, HexValue *cond) bit_suffix = (cond->bit_width == 64) ? "i64" : "i32"; OUT(c, locp, "tcg_gen_brcondi_", bit_suffix, "(TCG_COND_EQ, ", cond, ", 0, if_label_", &c->inst.if_count, ");\n"); - gen_rvalue_free(c, locp, cond); return c->inst.if_count++; } @@ -2025,7 +1865,7 @@ HexValue gen_rvalue_pred(Context *c, YYLTYPE *locp, HexValue *pred) bool is_dotnew = pred->is_dotnew; char predicate_id[2] = { pred->pred.id, '\0' }; char *pred_str = (char *) &predicate_id; - *pred = gen_tmp_value(c, locp, "0", 32, UNSIGNED); + *pred = gen_tmp(c, locp, 32, UNSIGNED); if (is_dotnew) { OUT(c, locp, "tcg_gen_mov_i32(", pred, ", hex_new_pred_value["); @@ -2090,7 +1930,6 @@ static inline HexValue gen_rvalue_simple_unary(Context *c, YYLTYPE *locp, res = gen_tmp(c, locp, bit_width, value->signedness); OUT(c, locp, tcg_code, "_i", &bit_width, "(", &res, ", ", value, ");\n"); - gen_rvalue_free(c, locp, value); } return res; } @@ -2116,7 +1955,6 @@ HexValue gen_rvalue_notl(Context *c, YYLTYPE *locp, HexValue *value) OUT(c, locp, "tcg_gen_movcond_i", &bit_width); OUT(c, locp, "(TCG_COND_EQ, ", &res, ", ", value, ", ", &zero); OUT(c, locp, ", ", &one, ", ", &zero, ");\n"); - gen_rvalue_free(c, locp, value); } return res; } @@ -2147,7 +1985,6 @@ HexValue gen_rvalue_sat(Context *c, YYLTYPE *locp, HexSat *sat, OUT(c, locp, &ovfl, ", ", &res, ", ", value, ", ", &width->imm.value, ");\n"); OUT(c, locp, "gen_set_usr_field_if(USR_OVF,", &ovfl, ");\n"); - gen_rvalue_free(c, locp, value); return res; } @@ -2162,9 +1999,6 @@ HexValue gen_rvalue_fscr(Context *c, YYLTYPE *locp, HexValue *value) OUT(c, locp, "tcg_gen_concat_i32_i64(", &key, ", ", &frame_key, ", ", &frame_key, ");\n"); OUT(c, locp, "tcg_gen_xor_i64(", &res, ", ", value, ", ", &key, ");\n"); - gen_rvalue_free(c, locp, &key); - gen_rvalue_free(c, locp, &frame_key); - gen_rvalue_free(c, locp, value); return res; } @@ -2186,7 +2020,6 @@ HexValue gen_rvalue_brev(Context *c, YYLTYPE *locp, HexValue *value) res = gen_tmp(c, locp, value->bit_width, value->signedness); *value = rvalue_materialize(c, locp, value); OUT(c, locp, "gen_helper_fbrev(", &res, ", ", value, ");\n"); - gen_rvalue_free(c, locp, value); return res; } @@ -2198,7 +2031,6 @@ HexValue gen_rvalue_ternary(Context *c, YYLTYPE *locp, HexValue *cond, unsigned bit_width = (is_64bit) ? 64 : 32; HexValue zero = gen_constant(c, locp, "0", bit_width, UNSIGNED); HexValue res = gen_tmp(c, locp, bit_width, UNSIGNED); - Ternary *ternary = NULL; if (is_64bit) { *cond = gen_rvalue_extend(c, locp, cond); @@ -2216,13 +2048,8 @@ HexValue gen_rvalue_ternary(Context *c, YYLTYPE *locp, HexValue *cond, OUT(c, locp, ", ", true_branch, ", ", false_branch, ");\n"); assert(c->ternary->len > 0); - ternary = &g_array_index(c->ternary, Ternary, c->ternary->len - 1); - gen_rvalue_free_manual(c, locp, &ternary->cond); g_array_remove_index(c->ternary, c->ternary->len - 1); - gen_rvalue_free(c, locp, cond); - gen_rvalue_free(c, locp, true_branch); - gen_rvalue_free(c, locp, false_branch); return res; } @@ -2301,15 +2128,6 @@ void track_string(Context *c, GString *s) g_array_append_val(c->inst.strings, s); } -void free_variables(Context *c, YYLTYPE *locp) -{ - for (unsigned i = 0; i < c->inst.allocated->len; ++i) { - Var *var = &g_array_index(c->inst.allocated, Var, i); - const char *suffix = var->bit_width == 64 ? "i64" : "i32"; - OUT(c, locp, "tcg_temp_free_", suffix, "(", var->name->str, ");\n"); - } -} - void free_instruction(Context *c) { assert(!is_inside_ternary(c)); diff --git a/target/hexagon/idef-parser/parser-helpers.h b/target/hexagon/idef-parser/parser-helpers.h index 2766296417..1239d23a6a 100644 --- a/target/hexagon/idef-parser/parser-helpers.h +++ b/target/hexagon/idef-parser/parser-helpers.h @@ -154,12 +154,6 @@ HexValue gen_tmp(Context *c, unsigned bit_width, HexSignedness signedness); -HexValue gen_tmp_value(Context *c, - YYLTYPE *locp, - const char *value, - unsigned bit_width, - HexSignedness signedness); - HexValue gen_imm_value(Context *c __attribute__((unused)), YYLTYPE *locp, int value, @@ -169,8 +163,6 @@ HexValue gen_imm_value(Context *c __attribute__((unused)), HexValue gen_imm_qemu_tmp(Context *c, YYLTYPE *locp, unsigned bit_width, HexSignedness signedness); -void gen_rvalue_free(Context *c, YYLTYPE *locp, HexValue *rvalue); - HexValue rvalue_materialize(Context *c, YYLTYPE *locp, HexValue *rvalue); HexValue gen_rvalue_extend(Context *c, YYLTYPE *locp, HexValue *rvalue); @@ -365,8 +357,6 @@ void emit_footer(Context *c); void track_string(Context *c, GString *s); -void free_variables(Context *c, YYLTYPE *locp); - void free_instruction(Context *c); void assert_signedness(Context *c, diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index cd64bb8eec..17facadaad 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -220,8 +220,6 @@ static inline void gen_pred_cancel(TCGv pred, uint32_t slot_num) tcg_gen_andi_tl(tmp, pred, 1); tcg_gen_movcond_tl(TCG_COND_EQ, hex_slot_cancelled, tmp, zero, slot_mask, hex_slot_cancelled); - tcg_temp_free(slot_mask); - tcg_temp_free(tmp); } #define PRED_LOAD_CANCEL(PRED, EA) \ gen_pred_cancel(PRED, insn->is_endloop ? 4 : insn->slot) @@ -376,10 +374,6 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) tcg_gen_deposit_tl(result, msb, lsb, 0, 7); tcg_gen_shli_tl(result, result, shift); - - tcg_temp_free(msb); - tcg_temp_free(lsb); - return result; } #define fREAD_IREG(VAL, SHIFT) gen_read_ireg(ireg, (VAL), (SHIFT)) @@ -512,7 +506,6 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) TCGv tmp = tcg_temp_new(); \ tcg_gen_shli_tl(tmp, REG2, SCALE); \ tcg_gen_add_tl(EA, REG, tmp); \ - tcg_temp_free(tmp); \ } while (0) #define fEA_IRs(IMM, REG, SCALE) \ do { \ diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 381fdaa3a8..93fd1b55e3 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -481,7 +481,6 @@ static void gen_pred_writes(DisasContext *ctx) hex_new_pred_value[pred_num], hex_pred[pred_num]); } - tcg_temp_free(pred_written); } else { for (i = 0; i < ctx->preg_log_idx; i++) { int pred_num = ctx->preg_log[i]; @@ -536,7 +535,6 @@ void process_store(DisasContext *ctx, int slot_num) /* Don't do anything if the slot was cancelled */ tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); - tcg_temp_free(cancelled); } { TCGv address = tcg_temp_new(); @@ -586,7 +584,6 @@ void process_store(DisasContext *ctx, int slot_num) gen_helper_commit_store(cpu_env, slot); } } - tcg_temp_free(address); } if (is_predicated) { gen_set_label(label_end); @@ -627,8 +624,6 @@ static void process_dczeroa(DisasContext *ctx) tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_gen_addi_tl(addr, addr, 8); tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); - - tcg_temp_free(addr); } } @@ -673,7 +668,6 @@ static void gen_commit_hvx(DisasContext *ctx) tcg_gen_andi_tl(cmp, hex_VRegs_updated, 1 << rnum); tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip); - tcg_temp_free(cmp); tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); gen_set_label(label_skip); } else { @@ -706,7 +700,6 @@ static void gen_commit_hvx(DisasContext *ctx) tcg_gen_andi_tl(cmp, hex_QRegs_updated, 1 << rnum); tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip); - tcg_temp_free(cmp); tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); gen_set_label(label_skip); } else { diff --git a/target/hppa/translate.c b/target/hppa/translate.c index cee960949f..cb4fd1fd62 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -35,12 +35,10 @@ #undef TCGv #undef tcg_temp_new #undef tcg_global_mem_new -#undef tcg_temp_free #if TARGET_LONG_BITS == 64 #define TCGv_tl TCGv_i64 #define tcg_temp_new_tl tcg_temp_new_i64 -#define tcg_temp_free_tl tcg_temp_free_i64 #if TARGET_REGISTER_BITS == 64 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64 #else @@ -49,7 +47,6 @@ #else #define TCGv_tl TCGv_i32 #define tcg_temp_new_tl tcg_temp_new_i32 -#define tcg_temp_free_tl tcg_temp_free_i32 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32 #endif @@ -58,7 +55,6 @@ #define tcg_temp_new tcg_temp_new_i64 #define tcg_global_mem_new tcg_global_mem_new_i64 -#define tcg_temp_free tcg_temp_free_i64 #define tcg_gen_movi_reg tcg_gen_movi_i64 #define tcg_gen_mov_reg tcg_gen_mov_i64 @@ -153,7 +149,6 @@ #define TCGv_reg TCGv_i32 #define tcg_temp_new tcg_temp_new_i32 #define tcg_global_mem_new tcg_global_mem_new_i32 -#define tcg_temp_free tcg_temp_free_i32 #define tcg_gen_movi_reg tcg_gen_movi_i32 #define tcg_gen_mov_reg tcg_gen_mov_i32 @@ -488,10 +483,6 @@ static void cond_free(DisasCond *cond) { switch (cond->c) { default: - if (cond->a0 != cpu_psw_n) { - tcg_temp_free(cond->a0); - } - tcg_temp_free(cond->a1); cond->a0 = NULL; cond->a1 = NULL; /* fallthru */ @@ -1021,7 +1012,6 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, tcg_gen_and_reg(tmp, in1, in2); tcg_gen_andc_reg(cb, cb, res); tcg_gen_or_reg(cb, cb, tmp); - tcg_temp_free(tmp); } switch (cf >> 1) { @@ -1040,7 +1030,6 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, tcg_gen_andc_reg(tmp, tmp, res); tcg_gen_andi_reg(tmp, tmp, 0x80808080u); cond = cond_make_0(TCG_COND_NE, tmp); - tcg_temp_free(tmp); break; case 3: /* SHZ / NHZ */ @@ -1049,7 +1038,6 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, tcg_gen_andc_reg(tmp, tmp, res); tcg_gen_andi_reg(tmp, tmp, 0x80008000u); cond = cond_make_0(TCG_COND_NE, tmp); - tcg_temp_free(tmp); break; case 4: /* SDC / NDC */ @@ -1070,9 +1058,6 @@ static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, default: g_assert_not_reached(); } - if (cf & 8) { - tcg_temp_free(cb); - } if (cf & 1) { cond.c = tcg_invert_cond(cond.c); } @@ -1090,7 +1075,6 @@ static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res, tcg_gen_xor_reg(sv, res, in1); tcg_gen_xor_reg(tmp, in1, in2); tcg_gen_andc_reg(sv, sv, tmp); - tcg_temp_free(tmp); return sv; } @@ -1105,7 +1089,6 @@ static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res, tcg_gen_xor_reg(sv, res, in1); tcg_gen_xor_reg(tmp, in1, in2); tcg_gen_and_reg(sv, sv, tmp); - tcg_temp_free(tmp); return sv; } @@ -1163,7 +1146,6 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, tmp = tcg_temp_new(); tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(cpu_env, tmp); - tcg_temp_free(tmp); } /* Write back the result. */ @@ -1172,7 +1154,6 @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); } save_gpr(ctx, rt, dest); - tcg_temp_free(dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); @@ -1257,16 +1238,12 @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, tmp = tcg_temp_new(); tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(cpu_env, tmp); - tcg_temp_free(tmp); } /* Write back the result. */ save_or_nullify(ctx, cpu_psw_cb, cb); save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); save_gpr(ctx, rt, dest); - tcg_temp_free(dest); - tcg_temp_free(cb); - tcg_temp_free(cb_msb); /* Install the new nullification. */ cond_free(&ctx->null_cond); @@ -1321,7 +1298,6 @@ static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, /* Clear. */ tcg_gen_movi_reg(dest, 0); save_gpr(ctx, rt, dest); - tcg_temp_free(dest); /* Install the new nullification. */ cond_free(&ctx->null_cond); @@ -1381,7 +1357,6 @@ static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, TCGv_reg tmp = tcg_temp_new(); tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); gen_helper_tcond(cpu_env, tmp); - tcg_temp_free(tmp); } save_gpr(ctx, rt, dest); @@ -1420,11 +1395,9 @@ static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5); tcg_gen_andi_reg(tmp, tmp, 030); tcg_gen_trunc_reg_ptr(ptr, tmp); - tcg_temp_free(tmp); tcg_gen_add_ptr(ptr, ptr, cpu_env); tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); - tcg_temp_free_ptr(ptr); return spc; } @@ -1582,7 +1555,6 @@ static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, tmp = tcg_temp_new_i32(); do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); save_frw_i32(rt, tmp); - tcg_temp_free_i32(tmp); if (rt == 0) { gen_helper_loaded_fr0(cpu_env); @@ -1608,7 +1580,6 @@ static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, tmp = tcg_temp_new_i64(); do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); save_frd(rt, tmp); - tcg_temp_free_i64(tmp); if (rt == 0) { gen_helper_loaded_fr0(cpu_env); @@ -1642,7 +1613,6 @@ static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, tmp = load_frw_i32(rt); do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); - tcg_temp_free_i32(tmp); return nullify_end(ctx); } @@ -1663,7 +1633,6 @@ static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, tmp = load_frd(rt); do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); - tcg_temp_free_i64(tmp); return nullify_end(ctx); } @@ -1685,7 +1654,6 @@ static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, func(tmp, cpu_env, tmp); save_frw_i32(rt, tmp); - tcg_temp_free_i32(tmp); return nullify_end(ctx); } @@ -1701,9 +1669,7 @@ static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, func(dst, cpu_env, src); - tcg_temp_free_i64(src); save_frw_i32(rt, dst); - tcg_temp_free_i32(dst); return nullify_end(ctx); } @@ -1718,7 +1684,6 @@ static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, func(tmp, cpu_env, tmp); save_frd(rt, tmp); - tcg_temp_free_i64(tmp); return nullify_end(ctx); } @@ -1734,9 +1699,7 @@ static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, func(dst, cpu_env, src); - tcg_temp_free_i32(src); save_frd(rt, dst); - tcg_temp_free_i64(dst); return nullify_end(ctx); } @@ -1752,9 +1715,7 @@ static bool do_fop_weww(DisasContext *ctx, unsigned rt, func(a, cpu_env, a, b); - tcg_temp_free_i32(b); save_frw_i32(rt, a); - tcg_temp_free_i32(a); return nullify_end(ctx); } @@ -1770,9 +1731,7 @@ static bool do_fop_dedd(DisasContext *ctx, unsigned rt, func(a, cpu_env, a, b); - tcg_temp_free_i64(b); save_frd(rt, a); - tcg_temp_free_i64(a); return nullify_end(ctx); } @@ -2098,8 +2057,6 @@ static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) tcg_gen_trunc_i64_reg(t1, t0); save_gpr(ctx, rt, t1); - tcg_temp_free(t1); - tcg_temp_free_i64(t0); cond_free(&ctx->null_cond); return true; @@ -2176,7 +2133,6 @@ static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) } else { tcg_gen_mov_i64(cpu_sr[rs], t64); } - tcg_temp_free_i64(t64); return nullify_end(ctx); } @@ -2192,7 +2148,6 @@ static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) tmp = tcg_temp_new(); tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1); save_or_nullify(ctx, cpu_sar, tmp); - tcg_temp_free(tmp); cond_free(&ctx->null_cond); return true; @@ -2254,7 +2209,6 @@ static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) tcg_gen_not_reg(tmp, load_gpr(ctx, a->r)); tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); save_or_nullify(ctx, cpu_sar, tmp); - tcg_temp_free(tmp); cond_free(&ctx->null_cond); return true; @@ -2273,8 +2227,6 @@ static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b))); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_trunc_i64_reg(dest, t0); - - tcg_temp_free_i64(t0); #endif save_gpr(ctx, a->t, dest); @@ -2437,8 +2389,6 @@ static bool trans_probe(DisasContext *ctx, arg_probe *a) gen_helper_probe(dest, cpu_env, addr, level, want); - tcg_temp_free_i32(level); - save_gpr(ctx, a->t, dest); return nullify_end(ctx); } @@ -2530,8 +2480,6 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) : offsetof(CPUHPPAState, cr[CR_IIAOQ])); tcg_gen_shli_i64(stl, stl, 32); tcg_gen_or_tl(addr, atl, stl); - tcg_temp_free_tl(atl); - tcg_temp_free_tl(stl); reg = load_gpr(ctx, a->r); if (a->addr) { @@ -2539,7 +2487,6 @@ static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) } else { gen_helper_itlbp(cpu_env, addr, reg); } - tcg_temp_free_tl(addr); /* Exit TB for TLB change if mmu is enabled. */ if (ctx->tb_flags & PSW_C) { @@ -2568,7 +2515,6 @@ static bool trans_lpa(DisasContext *ctx, arg_ldst *a) save_gpr(ctx, a->b, ofs); } save_gpr(ctx, a->t, paddr); - tcg_temp_free(paddr); return nullify_end(ctx); #endif @@ -2819,8 +2765,6 @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); - tcg_temp_free(addc); - /* Write back the result register. */ save_gpr(ctx, a->t, dest); @@ -2842,10 +2786,6 @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv); } - tcg_temp_free(add1); - tcg_temp_free(add2); - tcg_temp_free(dest); - return nullify_end(ctx); } @@ -3100,7 +3040,6 @@ static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1, cond = do_cond(c * 2 + f, dest, cb_msb, sv); save_gpr(ctx, r, dest); - tcg_temp_free(dest); return do_cbranch(ctx, disp, n, &cond); } @@ -3128,7 +3067,6 @@ static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) tcg_gen_shl_reg(tmp, tcg_r, cpu_sar); cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); - tcg_temp_free(tmp); return do_cbranch(ctx, a->disp, a->n, &cond); } @@ -3144,7 +3082,6 @@ static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) tcg_gen_shli_reg(tmp, tcg_r, a->p); cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); - tcg_temp_free(tmp); return do_cbranch(ctx, a->disp, a->n, &cond); } @@ -3197,7 +3134,6 @@ static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2)); tcg_gen_rotr_i32(t32, t32, cpu_sar); tcg_gen_extu_i32_reg(dest, t32); - tcg_temp_free_i32(t32); } else { TCGv_i64 t = tcg_temp_new_i64(); TCGv_i64 s = tcg_temp_new_i64(); @@ -3206,9 +3142,6 @@ static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) tcg_gen_extu_reg_i64(s, cpu_sar); tcg_gen_shr_i64(t, t, s); tcg_gen_trunc_i64_reg(dest, t); - - tcg_temp_free_i64(t); - tcg_temp_free_i64(s); } save_gpr(ctx, a->t, dest); @@ -3240,13 +3173,11 @@ static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) tcg_gen_trunc_reg_i32(t32, t2); tcg_gen_rotri_i32(t32, t32, sa); tcg_gen_extu_i32_reg(dest, t32); - tcg_temp_free_i32(t32); } else { TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); tcg_gen_shri_i64(t64, t64, sa); tcg_gen_trunc_i64_reg(dest, t64); - tcg_temp_free_i64(t64); } save_gpr(ctx, a->t, dest); @@ -3280,7 +3211,6 @@ static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a) tcg_gen_shr_reg(dest, src, tmp); tcg_gen_extract_reg(dest, dest, 0, len); } - tcg_temp_free(tmp); save_gpr(ctx, a->t, dest); /* Install the new nullification. */ @@ -3410,9 +3340,6 @@ static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, } else { tcg_gen_shl_reg(dest, tmp, shift); } - tcg_temp_free(shift); - tcg_temp_free(mask); - tcg_temp_free(tmp); save_gpr(ctx, rt, dest); /* Install the new nullification. */ @@ -3487,7 +3414,6 @@ static bool trans_be(DisasContext *ctx, arg_be *a) tcg_gen_mov_i64(cpu_iasq_b, new_spc); nullify_set(ctx, a->n); } - tcg_temp_free_i64(new_spc); tcg_gen_lookup_and_goto_ptr(); ctx->base.is_jmp = DISAS_NORETURN; return nullify_end(ctx); @@ -3876,9 +3802,6 @@ static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc); - tcg_temp_free_i32(ta); - tcg_temp_free_i32(tb); - return nullify_end(ctx); } @@ -3896,9 +3819,6 @@ static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc); - tcg_temp_free_i64(ta); - tcg_temp_free_i64(tb); - return nullify_end(ctx); } @@ -3958,7 +3878,6 @@ static bool trans_ftest(DisasContext *ctx, arg_ftest *a) tcg_gen_extract_reg(t, t, 21 - cbit, 1); ctx->null_cond = cond_make_0(TCG_COND_NE, t); - tcg_temp_free(t); } done: @@ -4019,8 +3938,6 @@ static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) y = load_frw0_i64(a->r2); tcg_gen_mul_i64(x, x, y); save_frd(a->t, x); - tcg_temp_free_i64(x); - tcg_temp_free_i64(y); return nullify_end(ctx); } @@ -4094,10 +4011,7 @@ static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) gen_helper_fmpyfadd_s(x, cpu_env, x, y, z); } - tcg_temp_free_i32(y); - tcg_temp_free_i32(z); save_frw_i32(a->t, x); - tcg_temp_free_i32(x); return nullify_end(ctx); } @@ -4116,10 +4030,7 @@ static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) gen_helper_fmpyfadd_d(x, cpu_env, x, y, z); } - tcg_temp_free_i64(y); - tcg_temp_free_i64(z); save_frd(a->t, x); - tcg_temp_free_i64(x); return nullify_end(ctx); } @@ -4234,13 +4145,11 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) } } - /* Free any temporaries allocated. */ + /* Forget any temporaries allocated. */ for (i = 0, n = ctx->ntempr; i < n; ++i) { - tcg_temp_free(ctx->tempr[i]); ctx->tempr[i] = NULL; } for (i = 0, n = ctx->ntempl; i < n; ++i) { - tcg_temp_free_tl(ctx->templ[i]); ctx->templ[i] = NULL; } ctx->ntempr = 0; diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index d5fd8d965c..4fdd87750b 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -1567,20 +1567,6 @@ illegal: return false; } -static void decode_temp_free(X86DecodedOp *op) -{ - if (op->v_ptr) { - tcg_temp_free_ptr(op->v_ptr); - } -} - -static void decode_temps_free(X86DecodedInsn *decode) -{ - decode_temp_free(&decode->op[0]); - decode_temp_free(&decode->op[1]); - decode_temp_free(&decode->op[2]); -} - /* * Convert one instruction. s->base.is_jmp is set if the translation must * be stopped. @@ -1835,7 +1821,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) decode.e.gen(s, env, &decode); gen_writeback(s, &decode, 0, s->T0); } - decode_temps_free(&decode); return; illegal_op: gen_illegal_opcode(s); diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index 0d01e13002..95fb4f52fa 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -629,7 +629,6 @@ static inline void gen_ternary_sse(DisasContext *s, CPUX86State *env, X86Decoded /* The format of the fourth input is Lx */ tcg_gen_addi_ptr(ptr3, cpu_env, ZMM_OFFSET(op3)); fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2, ptr3); - tcg_temp_free_ptr(ptr3); } #define TERNARY_SSE(uname, uvname, lname) \ static void gen_##uvname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ @@ -1001,7 +1000,6 @@ static inline void gen_vsib_avx(DisasContext *s, CPUX86State *env, X86DecodedIns int ymmh_ofs = vector_elem_offset(&decode->op[1], MO_128, 1); tcg_gen_gvec_dup_imm(MO_64, ymmh_ofs, 16, 16, 0); } - tcg_temp_free_ptr(index); } #define VSIB_AVX(uname, lname) \ static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ @@ -1627,7 +1625,6 @@ static void gen_PMOVMSKB(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco tcg_gen_deposit_tl(s->T0, t, s->T0, 8, TARGET_LONG_BITS - 8); } } - tcg_temp_free(t); } static void gen_PSHUFW(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1762,7 +1759,6 @@ static void gen_PSRLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco } else { gen_helper_psrldq_xmm(cpu_env, OP_PTR0, OP_PTR1, imm_vec); } - tcg_temp_free_ptr(imm_vec); } static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1775,7 +1771,6 @@ static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco } else { gen_helper_pslldq_xmm(cpu_env, OP_PTR0, OP_PTR1, imm_vec); } - tcg_temp_free_ptr(imm_vec); } static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -2293,7 +2288,6 @@ static void gen_VZEROALL(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco tcg_gen_addi_ptr(ptr, cpu_env, offsetof(CPUX86State, xmm_t0)); gen_helper_memset(ptr, ptr, tcg_constant_i32(0), tcg_constant_ptr(CPU_NB_REGS * sizeof(ZMMReg))); - tcg_temp_free_ptr(ptr); } static void gen_VZEROUPPER(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index defbc43deb..fa422ebd0b 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -899,10 +899,6 @@ static void gen_compute_eflags(DisasContext *s) gen_update_cc_op(s); gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op); set_cc_op(s, CC_OP_EFLAGS); - - if (dead) { - tcg_temp_free(zero); - } } typedef struct CCPrepare { @@ -1650,7 +1646,6 @@ static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, } else { tcg_gen_mov_tl(cpu_cc_src, shm1); } - tcg_temp_free(z_tl); /* Get the two potential CC_OP values into temporaries. */ tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); @@ -1666,8 +1661,6 @@ static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result, s32 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(s32, count); tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop); - tcg_temp_free_i32(z32); - tcg_temp_free_i32(s32); /* The CC_OP value is no longer predictable. */ set_cc_op(s, CC_OP_DYNAMIC); @@ -1827,8 +1820,6 @@ static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right) tcg_gen_movi_i32(s->tmp3_i32, CC_OP_EFLAGS); tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0, s->tmp2_i32, s->tmp3_i32); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); /* The CC_OP value is no longer predictable. */ set_cc_op(s, CC_OP_DYNAMIC); @@ -2049,7 +2040,6 @@ static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1, gen_op_st_rm_T0_A0(s, ot, op1); gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right); - tcg_temp_free(count); } static void gen_shift(DisasContext *s1, int op, MemOp ot, int d, int s) @@ -2513,13 +2503,6 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2, s->T0, cpu_regs[reg]); gen_op_mov_reg_v(s, ot, reg, s->T0); - - if (cc.mask != -1) { - tcg_temp_free(cc.reg); - } - if (!cc.use_reg2) { - tcg_temp_free(cc.reg2); - } } static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg) @@ -2748,7 +2731,6 @@ static void gen_set_hflag(DisasContext *s, uint32_t mask) tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags)); tcg_gen_ori_i32(t, t, mask); tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags)); - tcg_temp_free_i32(t); s->flags |= mask; } } @@ -2760,7 +2742,6 @@ static void gen_reset_hflag(DisasContext *s, uint32_t mask) tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags)); tcg_gen_andi_i32(t, t, ~mask); tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags)); - tcg_temp_free_i32(t); s->flags &= ~mask; } } @@ -2772,7 +2753,6 @@ static void gen_set_eflags(DisasContext *s, target_ulong mask) tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags)); tcg_gen_ori_tl(t, t, mask); tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags)); - tcg_temp_free(t); } static void gen_reset_eflags(DisasContext *s, target_ulong mask) @@ -2782,7 +2762,6 @@ static void gen_reset_eflags(DisasContext *s, target_ulong mask) tcg_gen_ld_tl(t, cpu_env, offsetof(CPUX86State, eflags)); tcg_gen_andi_tl(t, t, ~mask); tcg_gen_st_tl(t, cpu_env, offsetof(CPUX86State, eflags)); - tcg_temp_free(t); } /* Clear BND registers during legacy branches. */ @@ -3015,13 +2994,11 @@ static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm) tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ); } - tcg_temp_free_i64(val); /* Set tmp0 to match the required value of Z. */ tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp); Z = tcg_temp_new(); tcg_gen_trunc_i64_tl(Z, cmp); - tcg_temp_free_i64(cmp); /* * Extract the result values for the register pair. @@ -3042,12 +3019,10 @@ static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm) tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero, s->T1, cpu_regs[R_EDX]); } - tcg_temp_free_i64(old); /* Update Z. */ gen_compute_eflags(s); tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1); - tcg_temp_free(Z); } #ifdef TARGET_X86_64 @@ -3072,8 +3047,6 @@ static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm) } tcg_gen_extr_i128_i64(s->T0, s->T1, val); - tcg_temp_free_i128(cmp); - tcg_temp_free_i128(val); /* Determine success after the fact. */ t0 = tcg_temp_new_i64(); @@ -3081,13 +3054,11 @@ static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm) tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]); tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]); tcg_gen_or_i64(t0, t0, t1); - tcg_temp_free_i64(t1); /* Update Z. */ gen_compute_eflags(s); tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0); tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1); - tcg_temp_free_i64(t0); /* * Extract the result values for the register pair. We may do this @@ -3437,10 +3408,8 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_neg_tl(t1, t0); tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1, s->mem_index, ot | MO_LE); - tcg_temp_free(t1); tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1); - tcg_temp_free(t2); tcg_gen_neg_tl(s->T0, t0); } else { tcg_gen_neg_tl(s->T0, s->T0); @@ -3927,9 +3896,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_mov_tl(s->cc_srcT, cmpv); tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); set_cc_op(s, CC_OP_SUBB + ot); - tcg_temp_free(oldv); - tcg_temp_free(newv); - tcg_temp_free(cmpv); } break; case 0x1c7: /* cmpxchg8b */ @@ -4380,7 +4346,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) if (shift) { TCGv imm = tcg_const_tl(x86_ldub_code(env, s)); gen_shiftd_rm_T1(s, ot, opreg, op, imm); - tcg_temp_free(imm); } else { gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); } @@ -4614,7 +4579,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_st_tl(last_addr, cpu_env, offsetof(CPUX86State, fpdp)); } - tcg_temp_free(last_addr); } else { /* register float ops */ opreg = rm; @@ -5262,52 +5226,19 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) case 0x9d: /* popf */ gen_svm_check_intercept(s, SVM_EXIT_POPF); if (check_vm86_iopl(s)) { - ot = gen_pop_T0(s); + int mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK; + if (CPL(s) == 0) { - if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | AC_MASK | - ID_MASK | NT_MASK | - IF_MASK | - IOPL_MASK))); - } else { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | AC_MASK | - ID_MASK | NT_MASK | - IF_MASK | IOPL_MASK) - & 0xffff)); - } - } else { - if (CPL(s) <= IOPL(s)) { - if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | - AC_MASK | - ID_MASK | - NT_MASK | - IF_MASK))); - } else { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | - AC_MASK | - ID_MASK | - NT_MASK | - IF_MASK) - & 0xffff)); - } - } else { - if (dflag != MO_16) { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | AC_MASK | - ID_MASK | NT_MASK))); - } else { - gen_helper_write_eflags(cpu_env, s->T0, - tcg_const_i32((TF_MASK | AC_MASK | - ID_MASK | NT_MASK) - & 0xffff)); - } - } + mask |= IF_MASK | IOPL_MASK; + } else if (CPL(s) <= IOPL(s)) { + mask |= IF_MASK; + } + if (dflag == MO_16) { + mask &= 0xffff; } + + ot = gen_pop_T0(s); + gen_helper_write_eflags(cpu_env, s->T0, tcg_constant_i32(mask)); gen_pop_update(s, ot); set_cc_op(s, CC_OP_EFLAGS); /* abort translation because TF/AC flag may change */ @@ -6279,9 +6210,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } break; case 0x102: /* lar */ @@ -6308,7 +6236,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) gen_op_mov_reg_v(s, ot, reg, t0); gen_set_label(label1); set_cc_op(s, CC_OP_EFLAGS); - tcg_temp_free(t0); } break; case 0x118: @@ -6353,7 +6280,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) TCGv_i64 notu = tcg_temp_new_i64(); tcg_gen_not_i64(notu, cpu_bndu[reg]); gen_bndck(env, s, modrm, TCG_COND_GTU, notu); - tcg_temp_free_i64(notu); } else if (prefixes & PREFIX_DATA) { /* bndmov -- from reg/mem */ if (reg >= 4 || s->aflag == MO_16) { diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index d6513f2d9d..97e6579f6a 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -546,6 +546,8 @@ static void loongarch_qemu_write(void *opaque, hwaddr addr, static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) { switch (addr) { + case VERSION_REG: + return 0x11ULL; case FEATURE_REG: return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | 1ULL << IOCSRF_CSRIPI; diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index d60693fafe..e11c875188 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -28,6 +28,7 @@ #define IOCSRF_GMOD 9 #define IOCSRF_VM 11 +#define VERSION_REG 0x0 #define FEATURE_REG 0x8 #define VENDOR_REG 0x10 #define CPUNAME_REG 0x20 diff --git a/target/loongarch/insn_trans/trans_arith.c.inc b/target/loongarch/insn_trans/trans_arith.c.inc index 8e45eadbc8..43d6cf261d 100644 --- a/target/loongarch/insn_trans/trans_arith.c.inc +++ b/target/loongarch/insn_trans/trans_arith.c.inc @@ -100,14 +100,12 @@ static void gen_mulh_d(TCGv dest, TCGv src1, TCGv src2) { TCGv discard = tcg_temp_new(); tcg_gen_muls2_tl(discard, dest, src1, src2); - tcg_temp_free(discard); } static void gen_mulh_du(TCGv dest, TCGv src1, TCGv src2) { TCGv discard = tcg_temp_new(); tcg_gen_mulu2_tl(discard, dest, src1, src2); - tcg_temp_free(discard); } static void prep_divisor_d(TCGv ret, TCGv src1, TCGv src2) @@ -129,9 +127,6 @@ static void prep_divisor_d(TCGv ret, TCGv src1, TCGv src2) tcg_gen_and_tl(ret, ret, t0); tcg_gen_or_tl(ret, ret, t1); tcg_gen_movcond_tl(TCG_COND_NE, ret, ret, zero, ret, src2); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void prep_divisor_du(TCGv ret, TCGv src2) @@ -152,7 +147,6 @@ static void gen_div_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); prep_divisor_d(t0, src1, src2); tcg_gen_div_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_rem_d(TCGv dest, TCGv src1, TCGv src2) @@ -160,7 +154,6 @@ static void gen_rem_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); prep_divisor_d(t0, src1, src2); tcg_gen_rem_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_div_du(TCGv dest, TCGv src1, TCGv src2) @@ -168,7 +161,6 @@ static void gen_div_du(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); prep_divisor_du(t0, src2); tcg_gen_divu_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_rem_du(TCGv dest, TCGv src1, TCGv src2) @@ -176,7 +168,6 @@ static void gen_rem_du(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); prep_divisor_du(t0, src2); tcg_gen_remu_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_div_w(TCGv dest, TCGv src1, TCGv src2) @@ -185,7 +176,6 @@ static void gen_div_w(TCGv dest, TCGv src1, TCGv src2) /* We need not check for integer overflow for div_w. */ prep_divisor_du(t0, src2); tcg_gen_div_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_rem_w(TCGv dest, TCGv src1, TCGv src2) @@ -194,7 +184,6 @@ static void gen_rem_w(TCGv dest, TCGv src1, TCGv src2) /* We need not check for integer overflow for rem_w. */ prep_divisor_du(t0, src2); tcg_gen_rem_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_alsl(TCGv dest, TCGv src1, TCGv src2, target_long sa) @@ -202,7 +191,6 @@ static void gen_alsl(TCGv dest, TCGv src1, TCGv src2, target_long sa) TCGv t0 = tcg_temp_new(); tcg_gen_shli_tl(t0, src1, sa); tcg_gen_add_tl(dest, t0, src2); - tcg_temp_free(t0); } static bool trans_lu32i_d(DisasContext *ctx, arg_lu32i_d *a) diff --git a/target/loongarch/insn_trans/trans_atomic.c.inc b/target/loongarch/insn_trans/trans_atomic.c.inc index 6763c1c301..612709f2a7 100644 --- a/target/loongarch/insn_trans/trans_atomic.c.inc +++ b/target/loongarch/insn_trans/trans_atomic.c.inc @@ -14,7 +14,6 @@ static bool gen_ll(DisasContext *ctx, arg_rr_i *a, MemOp mop) tcg_gen_st_tl(t0, cpu_env, offsetof(CPULoongArchState, lladdr)); tcg_gen_st_tl(dest, cpu_env, offsetof(CPULoongArchState, llval)); gen_set_gpr(a->rd, dest, EXT_NONE); - tcg_temp_free(t0); return true; } @@ -43,8 +42,6 @@ static bool gen_sc(DisasContext *ctx, arg_rr_i *a, MemOp mop) tcg_gen_setcond_tl(TCG_COND_EQ, dest, t0, cpu_llval); gen_set_label(done); gen_set_gpr(a->rd, dest, EXT_NONE); - tcg_temp_free(t0); - tcg_temp_free(val); return true; } diff --git a/target/loongarch/insn_trans/trans_bit.c.inc b/target/loongarch/insn_trans/trans_bit.c.inc index b01e4aeb23..25b4d7858b 100644 --- a/target/loongarch/insn_trans/trans_bit.c.inc +++ b/target/loongarch/insn_trans/trans_bit.c.inc @@ -122,9 +122,6 @@ static void gen_revb_2h(TCGv dest, TCGv src1) tcg_gen_and_tl(t1, src1, mask); tcg_gen_shli_tl(t1, t1, 8); tcg_gen_or_tl(dest, t0, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_revb_4h(TCGv dest, TCGv src1) @@ -138,9 +135,6 @@ static void gen_revb_4h(TCGv dest, TCGv src1) tcg_gen_and_tl(t1, src1, mask); tcg_gen_shli_tl(t1, t1, 8); tcg_gen_or_tl(dest, t0, t1); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_revh_2w(TCGv dest, TCGv src1) @@ -154,9 +148,6 @@ static void gen_revh_2w(TCGv dest, TCGv src1) tcg_gen_and_i64(t0, t0, mask); tcg_gen_shli_i64(t1, t1, 16); tcg_gen_or_i64(dest, t1, t0); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static void gen_revh_d(TCGv dest, TCGv src1) @@ -171,9 +162,6 @@ static void gen_revh_d(TCGv dest, TCGv src1) tcg_gen_shli_tl(t0, t0, 16); tcg_gen_or_tl(t0, t0, t1); tcg_gen_rotri_tl(dest, t0, 32); - - tcg_temp_free(t0); - tcg_temp_free(t1); } static void gen_maskeqz(TCGv dest, TCGv src1, TCGv src2) diff --git a/target/loongarch/insn_trans/trans_fcmp.c.inc b/target/loongarch/insn_trans/trans_fcmp.c.inc index 2ccf646ccb..3b0da2b9f4 100644 --- a/target/loongarch/insn_trans/trans_fcmp.c.inc +++ b/target/loongarch/insn_trans/trans_fcmp.c.inc @@ -38,7 +38,6 @@ static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a) fn(var, cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk], tcg_constant_i32(flags)); tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd])); - tcg_temp_free(var); return true; } @@ -57,7 +56,5 @@ static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a) fn(var, cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk], tcg_constant_i32(flags)); tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd])); - - tcg_temp_free(var); return true; } diff --git a/target/loongarch/insn_trans/trans_fmemory.c.inc b/target/loongarch/insn_trans/trans_fmemory.c.inc index 3025a1d3e9..0d11843873 100644 --- a/target/loongarch/insn_trans/trans_fmemory.c.inc +++ b/target/loongarch/insn_trans/trans_fmemory.c.inc @@ -13,12 +13,11 @@ static void maybe_nanbox_load(TCGv freg, MemOp mop) static bool gen_fload_i(DisasContext *ctx, arg_fr_i *a, MemOp mop) { TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; CHECK_FPE; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } @@ -26,31 +25,22 @@ static bool gen_fload_i(DisasContext *ctx, arg_fr_i *a, MemOp mop) tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); maybe_nanbox_load(cpu_fpr[a->fd], mop); - if (temp) { - tcg_temp_free(temp); - } - return true; } static bool gen_fstore_i(DisasContext *ctx, arg_fr_i *a, MemOp mop) { TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; CHECK_FPE; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - - if (temp) { - tcg_temp_free(temp); - } return true; } @@ -66,7 +56,6 @@ static bool gen_floadx(DisasContext *ctx, arg_frr *a, MemOp mop) tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); maybe_nanbox_load(cpu_fpr[a->fd], mop); - tcg_temp_free(addr); return true; } @@ -82,7 +71,6 @@ static bool gen_fstorex(DisasContext *ctx, arg_frr *a, MemOp mop) addr = tcg_temp_new(); tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - tcg_temp_free(addr); return true; } @@ -100,7 +88,6 @@ static bool gen_fload_gt(DisasContext *ctx, arg_frr *a, MemOp mop) tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); maybe_nanbox_load(cpu_fpr[a->fd], mop); - tcg_temp_free(addr); return true; } @@ -117,7 +104,6 @@ static bool gen_fstore_gt(DisasContext *ctx, arg_frr *a, MemOp mop) gen_helper_asrtgt_d(cpu_env, src1, src2); tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - tcg_temp_free(addr); return true; } @@ -135,7 +121,6 @@ static bool gen_fload_le(DisasContext *ctx, arg_frr *a, MemOp mop) tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); maybe_nanbox_load(cpu_fpr[a->fd], mop); - tcg_temp_free(addr); return true; } @@ -152,7 +137,6 @@ static bool gen_fstore_le(DisasContext *ctx, arg_frr *a, MemOp mop) gen_helper_asrtle_d(cpu_env, src1, src2); tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop); - tcg_temp_free(addr); return true; } diff --git a/target/loongarch/insn_trans/trans_fmov.c.inc b/target/loongarch/insn_trans/trans_fmov.c.inc index 8e5106db4e..069c941665 100644 --- a/target/loongarch/insn_trans/trans_fmov.c.inc +++ b/target/loongarch/insn_trans/trans_fmov.c.inc @@ -18,7 +18,6 @@ static bool trans_fsel(DisasContext *ctx, arg_fsel *a) tcg_gen_ld8u_tl(cond, cpu_env, offsetof(CPULoongArchState, cf[a->ca])); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_fpr[a->fd], cond, zero, cpu_fpr[a->fj], cpu_fpr[a->fk]); - tcg_temp_free(cond); return true; } @@ -82,9 +81,6 @@ static bool trans_movgr2fcsr(DisasContext *ctx, arg_movgr2fcsr *a) tcg_gen_andi_i32(fcsr0, fcsr0, ~mask); tcg_gen_or_i32(fcsr0, fcsr0, temp); tcg_gen_st_i32(fcsr0, cpu_env, offsetof(CPULoongArchState, fcsr0)); - - tcg_temp_free_i32(temp); - tcg_temp_free_i32(fcsr0); } /* @@ -134,7 +130,6 @@ static bool trans_movfr2cf(DisasContext *ctx, arg_movfr2cf *a) t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_fpr[a->fj], 0x1); tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); - tcg_temp_free(t0); return true; } @@ -157,7 +152,6 @@ static bool trans_movgr2cf(DisasContext *ctx, arg_movgr2cf *a) t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, gpr_src(ctx, a->rj, EXT_NONE), 0x1); tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); - tcg_temp_free(t0); return true; } diff --git a/target/loongarch/insn_trans/trans_memory.c.inc b/target/loongarch/insn_trans/trans_memory.c.inc index d5eb31147c..75cfdf59ad 100644 --- a/target/loongarch/insn_trans/trans_memory.c.inc +++ b/target/loongarch/insn_trans/trans_memory.c.inc @@ -7,21 +7,15 @@ static bool gen_load(DisasContext *ctx, arg_rr_i *a, MemOp mop) { TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); gen_set_gpr(a->rd, dest, EXT_NONE); - - if (temp) { - tcg_temp_free(temp); - } - return true; } @@ -29,20 +23,14 @@ static bool gen_store(DisasContext *ctx, arg_rr_i *a, MemOp mop) { TCGv data = gpr_src(ctx, a->rd, EXT_NONE); TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); - - if (temp) { - tcg_temp_free(temp); - } - return true; } @@ -56,7 +44,6 @@ static bool gen_loadx(DisasContext *ctx, arg_rrr *a, MemOp mop) tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); gen_set_gpr(a->rd, dest, EXT_NONE); - tcg_temp_free(addr); return true; } @@ -70,7 +57,6 @@ static bool gen_storex(DisasContext *ctx, arg_rrr *a, MemOp mop) tcg_gen_add_tl(addr, src1, src2); tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); - tcg_temp_free(addr); return true; } @@ -146,21 +132,15 @@ static bool gen_ldptr(DisasContext *ctx, arg_rr_i *a, MemOp mop) { TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE); TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop); gen_set_gpr(a->rd, dest, EXT_NONE); - - if (temp) { - tcg_temp_free(temp); - } - return true; } @@ -168,20 +148,14 @@ static bool gen_stptr(DisasContext *ctx, arg_rr_i *a, MemOp mop) { TCGv data = gpr_src(ctx, a->rd, EXT_NONE); TCGv addr = gpr_src(ctx, a->rj, EXT_NONE); - TCGv temp = NULL; if (a->imm) { - temp = tcg_temp_new(); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, mop); - - if (temp) { - tcg_temp_free(temp); - } - return true; } diff --git a/target/loongarch/insn_trans/trans_privileged.c.inc b/target/loongarch/insn_trans/trans_privileged.c.inc index 40f82becb0..5a04352b01 100644 --- a/target/loongarch/insn_trans/trans_privileged.c.inc +++ b/target/loongarch/insn_trans/trans_privileged.c.inc @@ -243,7 +243,7 @@ static bool trans_csrwr(DisasContext *ctx, arg_csrwr *a) dest = gpr_dst(ctx, a->rd, EXT_NONE); csr->writefn(dest, cpu_env, src1); } else { - dest = temp_new(ctx); + dest = tcg_temp_new(); tcg_gen_ld_tl(dest, cpu_env, csr->offset); tcg_gen_st_tl(src1, cpu_env, csr->offset); } @@ -291,10 +291,6 @@ static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a) tcg_gen_st_tl(newv, cpu_env, csr->offset); } gen_set_gpr(a->rd, oldv, EXT_NONE); - - tcg_temp_free(temp); - tcg_temp_free(newv); - tcg_temp_free(oldv); return true; } diff --git a/target/loongarch/insn_trans/trans_shift.c.inc b/target/loongarch/insn_trans/trans_shift.c.inc index 5260af2337..bf5428a2ba 100644 --- a/target/loongarch/insn_trans/trans_shift.c.inc +++ b/target/loongarch/insn_trans/trans_shift.c.inc @@ -8,7 +8,6 @@ static void gen_sll_w(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x1f); tcg_gen_shl_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_srl_w(TCGv dest, TCGv src1, TCGv src2) @@ -16,7 +15,6 @@ static void gen_srl_w(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x1f); tcg_gen_shr_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_sra_w(TCGv dest, TCGv src1, TCGv src2) @@ -24,7 +22,6 @@ static void gen_sra_w(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x1f); tcg_gen_sar_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_sll_d(TCGv dest, TCGv src1, TCGv src2) @@ -32,7 +29,6 @@ static void gen_sll_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x3f); tcg_gen_shl_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_srl_d(TCGv dest, TCGv src1, TCGv src2) @@ -40,7 +36,6 @@ static void gen_srl_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x3f); tcg_gen_shr_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_sra_d(TCGv dest, TCGv src1, TCGv src2) @@ -48,7 +43,6 @@ static void gen_sra_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x3f); tcg_gen_sar_tl(dest, src1, t0); - tcg_temp_free(t0); } static void gen_rotr_w(TCGv dest, TCGv src1, TCGv src2) @@ -64,10 +58,6 @@ static void gen_rotr_w(TCGv dest, TCGv src1, TCGv src2) tcg_gen_rotr_i32(t1, t1, t2); tcg_gen_ext_i32_tl(dest, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_temp_free(t0); } static void gen_rotr_d(TCGv dest, TCGv src1, TCGv src2) @@ -75,7 +65,6 @@ static void gen_rotr_d(TCGv dest, TCGv src1, TCGv src2) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, src2, 0x3f); tcg_gen_rotr_tl(dest, src1, t0); - tcg_temp_free(t0); } static bool trans_srai_w(DisasContext *ctx, arg_srai_w *a) diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c index 2a43ab0201..f443b5822f 100644 --- a/target/loongarch/translate.c +++ b/target/loongarch/translate.c @@ -85,9 +85,6 @@ static void loongarch_tr_init_disas_context(DisasContextBase *dcbase, bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; ctx->base.max_insns = MIN(ctx->base.max_insns, bound); - ctx->ntemp = 0; - memset(ctx->temp, 0, sizeof(ctx->temp)); - ctx->zero = tcg_constant_tl(0); } @@ -110,12 +107,6 @@ static void loongarch_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) * * Further, we may provide an extension for word operations. */ -static TCGv temp_new(DisasContext *ctx) -{ - assert(ctx->ntemp < ARRAY_SIZE(ctx->temp)); - return ctx->temp[ctx->ntemp++] = tcg_temp_new(); -} - static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext) { TCGv t; @@ -128,11 +119,11 @@ static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext) case EXT_NONE: return cpu_gpr[reg_num]; case EXT_SIGN: - t = temp_new(ctx); + t = tcg_temp_new(); tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); return t; case EXT_ZERO: - t = temp_new(ctx); + t = tcg_temp_new(); tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); return t; } @@ -142,7 +133,7 @@ static TCGv gpr_src(DisasContext *ctx, int reg_num, DisasExtend src_ext) static TCGv gpr_dst(DisasContext *ctx, int reg_num, DisasExtend dst_ext) { if (reg_num == 0 || dst_ext) { - return temp_new(ctx); + return tcg_temp_new(); } return cpu_gpr[reg_num]; } @@ -195,12 +186,6 @@ static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) generate_exception(ctx, EXCCODE_INE); } - for (int i = ctx->ntemp - 1; i >= 0; --i) { - tcg_temp_free(ctx->temp[i]); - ctx->temp[i] = NULL; - } - ctx->ntemp = 0; - ctx->base.pc_next += 4; } diff --git a/target/loongarch/translate.h b/target/loongarch/translate.h index 6d2e382e8b..67bc74c05b 100644 --- a/target/loongarch/translate.h +++ b/target/loongarch/translate.h @@ -32,9 +32,6 @@ typedef struct DisasContext { uint16_t mem_idx; uint16_t plv; TCGv zero; - /* Space for 3 operands plus 1 extra for address computation. */ - TCGv temp[4]; - uint8_t ntemp; } DisasContext; void generate_exception(DisasContext *ctx, int excp); diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 157c2cbb8f..3055d2d246 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -121,35 +121,9 @@ typedef struct DisasContext { int done_mac; int writeback_mask; TCGv writeback[8]; -#define MAX_TO_RELEASE 8 - int release_count; - TCGv release[MAX_TO_RELEASE]; bool ss_active; } DisasContext; -static void init_release_array(DisasContext *s) -{ -#ifdef CONFIG_DEBUG_TCG - memset(s->release, 0, sizeof(s->release)); -#endif - s->release_count = 0; -} - -static void do_release(DisasContext *s) -{ - int i; - for (i = 0; i < s->release_count; i++) { - tcg_temp_free(s->release[i]); - } - init_release_array(s); -} - -static TCGv mark_to_release(DisasContext *s, TCGv tmp) -{ - g_assert(s->release_count < MAX_TO_RELEASE); - return s->release[s->release_count++] = tmp; -} - static TCGv get_areg(DisasContext *s, unsigned regno) { if (s->writeback_mask & (1 << regno)) { @@ -164,7 +138,6 @@ static void delay_set_areg(DisasContext *s, unsigned regno, { if (s->writeback_mask & (1 << regno)) { if (give_temp) { - tcg_temp_free(s->writeback[regno]); s->writeback[regno] = val; } else { tcg_gen_mov_i32(s->writeback[regno], val); @@ -189,7 +162,6 @@ static void do_writebacks(DisasContext *s) do { unsigned regno = ctz32(mask); tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]); - tcg_temp_free(s->writeback[regno]); mask &= mask - 1; } while (mask); } @@ -296,7 +268,6 @@ static void gen_raise_exception(int nr) tmp = tcg_const_i32(nr); gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); } static void gen_raise_exception_format2(DisasContext *s, int nr, @@ -396,8 +367,7 @@ static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val, gen_store(s, opsize, addr, val, index); return store_dummy; } else { - return mark_to_release(s, gen_load(s, opsize, addr, - what == EA_LOADS, index)); + return gen_load(s, opsize, addr, what == EA_LOADS, index); } } @@ -491,7 +461,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) } else { bd = 0; } - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); if ((ext & 0x44) == 0) { /* pre-index */ add = gen_addr_index(s, ext, tmp); @@ -501,7 +471,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) if ((ext & 0x80) == 0) { /* base not suppressed */ if (IS_NULL_QREG(base)) { - base = mark_to_release(s, tcg_const_i32(offset + bd)); + base = tcg_const_i32(offset + bd); bd = 0; } if (!IS_NULL_QREG(add)) { @@ -517,11 +487,11 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) add = tmp; } } else { - add = mark_to_release(s, tcg_const_i32(bd)); + add = tcg_const_i32(bd); } if ((ext & 3) != 0) { /* memory indirect */ - base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s))); + base = gen_load(s, OS_LONG, add, 0, IS_USER(s)); if ((ext & 0x44) == 4) { add = gen_addr_index(s, ext, tmp); tcg_gen_add_i32(tmp, add, base); @@ -546,7 +516,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) } } else { /* brief extension word format */ - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); add = gen_addr_index(s, ext, tmp); if (!IS_NULL_QREG(base)) { tcg_gen_add_i32(tmp, add, base); @@ -609,9 +579,7 @@ static void gen_flush_flags(DisasContext *s) gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1); tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0); - tcg_temp_free(t0); tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V); - tcg_temp_free(t1); break; case CC_OP_SUBB: @@ -626,9 +594,7 @@ static void gen_flush_flags(DisasContext *s) gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1); tcg_gen_xor_i32(t1, QREG_CC_N, t0); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0); - tcg_temp_free(t0); tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1); - tcg_temp_free(t1); break; case CC_OP_CMPB: @@ -642,7 +608,6 @@ static void gen_flush_flags(DisasContext *s) tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N); tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N); tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0); - tcg_temp_free(t0); tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z); break; @@ -660,7 +625,6 @@ static void gen_flush_flags(DisasContext *s) default: t0 = tcg_const_i32(s->cc_op); gen_helper_flush_flags(cpu_env, t0); - tcg_temp_free(t0); s->cc_op_synced = 1; break; } @@ -676,7 +640,7 @@ static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign) if (opsize == OS_LONG) { tmp = val; } else { - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); gen_ext(tmp, val, opsize, sign); } @@ -756,14 +720,12 @@ static void gen_partset_reg(int opsize, TCGv reg, TCGv val) tmp = tcg_temp_new(); tcg_gen_ext8u_i32(tmp, val); tcg_gen_or_i32(reg, reg, tmp); - tcg_temp_free(tmp); break; case OS_WORD: tcg_gen_andi_i32(reg, reg, 0xffff0000); tmp = tcg_temp_new(); tcg_gen_ext16u_i32(tmp, val); tcg_gen_or_i32(reg, reg, tmp); - tcg_temp_free(tmp); break; case OS_LONG: case OS_SINGLE: @@ -802,7 +764,7 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, return NULL_QREG; } reg = get_areg(s, reg0); - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); if (reg0 == 7 && opsize == OS_BYTE && m68k_feature(s->env, M68K_FEATURE_M68K)) { tcg_gen_subi_i32(tmp, reg, 2); @@ -812,7 +774,7 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, return tmp; case 5: /* Indirect displacement. */ reg = get_areg(s, reg0); - tmp = mark_to_release(s, tcg_temp_new()); + tmp = tcg_temp_new(); ext = read_im16(env, s); tcg_gen_addi_i32(tmp, reg, (int16_t)ext); return tmp; @@ -823,14 +785,14 @@ static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, switch (reg0) { case 0: /* Absolute short. */ offset = (int16_t)read_im16(env, s); - return mark_to_release(s, tcg_const_i32(offset)); + return tcg_const_i32(offset); case 1: /* Absolute long. */ offset = read_im32(env, s); - return mark_to_release(s, tcg_const_i32(offset)); + return tcg_const_i32(offset); case 2: /* pc displacement */ offset = s->pc; offset += (int16_t)read_im16(env, s); - return mark_to_release(s, tcg_const_i32(offset)); + return tcg_const_i32(offset); case 3: /* pc index+displacement. */ return gen_lea_indexed(env, s, NULL_QREG); case 4: /* Immediate. */ @@ -958,7 +920,7 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0, default: g_assert_not_reached(); } - return mark_to_release(s, tcg_const_i32(offset)); + return tcg_const_i32(offset); default: return NULL_QREG; } @@ -997,12 +959,10 @@ static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src) t32 = tcg_temp_new(); tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper)); tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper)); - tcg_temp_free(t32); t64 = tcg_temp_new_i64(); tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower)); tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower)); - tcg_temp_free_i64(t64); } static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, @@ -1056,8 +1016,6 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, default: g_assert_not_reached(); } - tcg_temp_free(tmp); - tcg_temp_free_i64(t64); } static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, @@ -1111,8 +1069,6 @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, default: g_assert_not_reached(); } - tcg_temp_free(tmp); - tcg_temp_free_i64(t64); } static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr, @@ -1168,7 +1124,6 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, default: g_assert_not_reached(); } - tcg_temp_free(tmp); } return 0; case 1: /* Address register direct. */ @@ -1214,27 +1169,22 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, case OS_BYTE: tmp = tcg_const_i32((int8_t)read_im8(env, s)); gen_helper_exts32(cpu_env, fp, tmp); - tcg_temp_free(tmp); break; case OS_WORD: tmp = tcg_const_i32((int16_t)read_im16(env, s)); gen_helper_exts32(cpu_env, fp, tmp); - tcg_temp_free(tmp); break; case OS_LONG: tmp = tcg_const_i32(read_im32(env, s)); gen_helper_exts32(cpu_env, fp, tmp); - tcg_temp_free(tmp); break; case OS_SINGLE: tmp = tcg_const_i32(read_im32(env, s)); gen_helper_extf32(cpu_env, fp, tmp); - tcg_temp_free(tmp); break; case OS_DOUBLE: t64 = tcg_const_i64(read_im64(env, s)); gen_helper_extf64(cpu_env, fp, t64); - tcg_temp_free_i64(t64); break; case OS_EXTENDED: if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { @@ -1243,10 +1193,8 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, } tmp = tcg_const_i32(read_im32(env, s) >> 16); tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper)); - tcg_temp_free(tmp); t64 = tcg_const_i64(read_im64(env, s)); tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower)); - tcg_temp_free_i64(t64); break; case OS_PACKED: /* @@ -1276,8 +1224,6 @@ static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn, typedef struct { TCGCond tcond; - bool g1; - bool g2; TCGv v1; TCGv v2; } DisasCompare; @@ -1290,7 +1236,6 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) /* The CC_OP_CMP form can handle most normal comparisons directly. */ if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) { - c->g1 = c->g2 = 1; c->v1 = QREG_CC_N; c->v2 = QREG_CC_V; switch (cond) { @@ -1308,7 +1253,6 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) goto done; case 10: /* PL */ case 11: /* MI */ - c->g1 = c->g2 = 0; c->v2 = tcg_const_i32(0); c->v1 = tmp = tcg_temp_new(); tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V); @@ -1325,8 +1269,6 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) } } - c->g1 = 1; - c->g2 = 0; c->v2 = tcg_const_i32(0); switch (cond) { @@ -1410,7 +1352,6 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) case 2: /* HI (!C && !Z) -> !(C || Z)*/ case 3: /* LS (C || Z) */ c->v1 = tmp = tcg_temp_new(); - c->g1 = 0; tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); tcg_gen_or_i32(tmp, tmp, QREG_CC_C); tcond = TCG_COND_NE; @@ -1438,20 +1379,17 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) case 12: /* GE (!(N ^ V)) */ case 13: /* LT (N ^ V) */ c->v1 = tmp = tcg_temp_new(); - c->g1 = 0; tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V); tcond = TCG_COND_LT; break; case 14: /* GT (!(Z || (N ^ V))) */ case 15: /* LE (Z || (N ^ V)) */ c->v1 = tmp = tcg_temp_new(); - c->g1 = 0; tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); tcg_gen_neg_i32(tmp, tmp); tmp2 = tcg_temp_new(); tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V); tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free(tmp2); tcond = TCG_COND_LT; break; } @@ -1463,16 +1401,6 @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) c->tcond = tcond; } -static void free_cond(DisasCompare *c) -{ - if (!c->g1) { - tcg_temp_free(c->v1); - } - if (!c->g2) { - tcg_temp_free(c->v2); - } -} - static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1) { DisasCompare c; @@ -1480,7 +1408,6 @@ static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1) gen_cc_cond(&c, s, cond); update_cc_op(s); tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1); - free_cond(&c); } /* Force a TB lookup after an instruction that changes the CPU state. */ @@ -1539,11 +1466,9 @@ DISAS_INSN(scc) tmp = tcg_temp_new(); tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2); - free_cond(&c); tcg_gen_neg_i32(tmp, tmp); DEST_EA(env, insn, OS_BYTE, tmp, NULL); - tcg_temp_free(tmp); } DISAS_INSN(dbcc) @@ -1610,7 +1535,6 @@ DISAS_INSN(mulw) tcg_gen_mul_i32(tmp, tmp, src); tcg_gen_mov_i32(reg, tmp); gen_logic_cc(s, tmp, OS_LONG); - tcg_temp_free(tmp); } DISAS_INSN(divw) @@ -1741,7 +1665,6 @@ static void bcd_add(TCGv dest, TCGv src) tcg_gen_andi_i32(t0, t0, 0x22); tcg_gen_add_i32(dest, t0, t0); tcg_gen_add_i32(dest, dest, t0); - tcg_temp_free(t0); /* * remove the exceeding 0x6 @@ -1749,7 +1672,6 @@ static void bcd_add(TCGv dest, TCGv src) */ tcg_gen_sub_i32(dest, t1, dest); - tcg_temp_free(t1); } static void bcd_sub(TCGv dest, TCGv src) @@ -1798,13 +1720,10 @@ static void bcd_sub(TCGv dest, TCGv src) tcg_gen_andi_i32(t2, t2, 0x22); tcg_gen_add_i32(t0, t2, t2); tcg_gen_add_i32(t0, t0, t2); - tcg_temp_free(t2); /* return t1 - t0 */ tcg_gen_sub_i32(dest, t1, t0); - tcg_temp_free(t0); - tcg_temp_free(t1); } static void bcd_flags(TCGv val) @@ -1905,8 +1824,6 @@ DISAS_INSN(nbcd) DEST_EA(env, insn, OS_BYTE, dest, &addr); bcd_flags(dest); - - tcg_temp_free(dest); } DISAS_INSN(addsub) @@ -1945,7 +1862,6 @@ DISAS_INSN(addsub) } else { gen_partset_reg(opsize, DREG(insn, 9), dest); } - tcg_temp_free(dest); } /* Reverse the order of the bits in REG. */ @@ -1982,7 +1898,6 @@ DISAS_INSN(bitop_reg) tmp = tcg_const_i32(1); tcg_gen_shl_i32(tmp, tmp, src2); - tcg_temp_free(src2); tcg_gen_and_i32(QREG_CC_Z, src1, tmp); @@ -2000,11 +1915,9 @@ DISAS_INSN(bitop_reg) default: /* btst */ break; } - tcg_temp_free(tmp); if (op) { DEST_EA(env, insn, opsize, dest, &addr); } - tcg_temp_free(dest); } DISAS_INSN(sats) @@ -2024,7 +1937,6 @@ static void gen_push(DisasContext *s, TCGv val) tcg_gen_subi_i32(tmp, QREG_SP, 4); gen_store(s, OS_LONG, tmp, val, IS_USER(s)); tcg_gen_mov_i32(QREG_SP, tmp); - tcg_temp_free(tmp); } static TCGv mreg(int reg) @@ -2100,7 +2012,6 @@ DISAS_INSN(movem) for (i = 0; i < 16; i++) { if (mask & (1 << i)) { tcg_gen_mov_i32(mreg(i), r[i]); - tcg_temp_free(r[i]); } } if (mode == 3) { @@ -2127,7 +2038,6 @@ DISAS_INSN(movem) tmp = tcg_temp_new(); tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr); gen_store(s, opsize, addr, tmp, IS_USER(s)); - tcg_temp_free(tmp); } else { gen_store(s, opsize, addr, mreg(i), IS_USER(s)); } @@ -2143,9 +2053,6 @@ DISAS_INSN(movem) } } } - - tcg_temp_free(incr); - tcg_temp_free(addr); } DISAS_INSN(movep) @@ -2189,8 +2096,6 @@ DISAS_INSN(movep) } } } - tcg_temp_free(abuf); - tcg_temp_free(dbuf); } DISAS_INSN(bitop_im) @@ -2249,7 +2154,6 @@ DISAS_INSN(bitop_im) break; } DEST_EA(env, insn, opsize, tmp, &addr); - tcg_temp_free(tmp); } } @@ -2272,7 +2176,6 @@ static TCGv gen_get_sr(DisasContext *s) sr = tcg_temp_new(); tcg_gen_andi_i32(sr, QREG_SR, 0xffe0); tcg_gen_or_i32(sr, sr, ccr); - tcg_temp_free(ccr); return sr; } @@ -2421,8 +2324,6 @@ DISAS_INSN(arith_im) default: abort(); } - tcg_temp_free(im); - tcg_temp_free(dest); } DISAS_INSN(cas) @@ -2478,8 +2379,6 @@ DISAS_INSN(cas) gen_update_cc_cmp(s, load, cmp, opsize); gen_partset_reg(opsize, DREG(ext, 0), load); - tcg_temp_free(load); - switch (extract32(insn, 3, 3)) { case 3: /* Indirect postincrement. */ tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize)); @@ -2535,7 +2434,6 @@ DISAS_INSN(cas2w) } else { gen_helper_cas2w(cpu_env, regs, addr1, addr2); } - tcg_temp_free(regs); /* Note that cas2w also assigned to env->cc_op. */ s->cc_op = CC_OP_CMPW; @@ -2586,7 +2484,6 @@ DISAS_INSN(cas2l) } else { gen_helper_cas2l(cpu_env, regs, addr1, addr2); } - tcg_temp_free(regs); /* Note that cas2l also assigned to env->cc_op. */ s->cc_op = CC_OP_CMPL; @@ -2658,7 +2555,6 @@ DISAS_INSN(negx) z = tcg_const_i32(0); tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z); tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X); - tcg_temp_free(z); gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1); @@ -2706,7 +2602,6 @@ DISAS_INSN(clr) opsize = insn_opsize(insn); DEST_EA(env, insn, opsize, zero, NULL); gen_logic_cc(s, zero, opsize); - tcg_temp_free(zero); } DISAS_INSN(move_from_ccr) @@ -2732,7 +2627,6 @@ DISAS_INSN(neg) gen_update_cc_add(dest, src1, opsize); tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0); DEST_EA(env, insn, opsize, dest, &addr); - tcg_temp_free(dest); } DISAS_INSN(move_to_ccr) @@ -2767,8 +2661,6 @@ DISAS_INSN(swap) tcg_gen_shli_i32(src1, reg, 16); tcg_gen_shri_i32(src2, reg, 16); tcg_gen_or_i32(reg, src1, src2); - tcg_temp_free(src2); - tcg_temp_free(src1); gen_logic_cc(s, reg, OS_LONG); } @@ -2811,7 +2703,6 @@ DISAS_INSN(ext) else tcg_gen_mov_i32(reg, tmp); gen_logic_cc(s, tmp, OS_LONG); - tcg_temp_free(tmp); } DISAS_INSN(tst) @@ -2856,7 +2747,6 @@ DISAS_INSN(tas) tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80), IS_USER(s), MO_SB); gen_logic_cc(s, src1, OS_BYTE); - tcg_temp_free(src1); switch (mode) { case 3: /* Indirect postincrement. */ @@ -2945,7 +2835,6 @@ static void gen_link(DisasContext *s, uint16_t insn, int32_t offset) tcg_gen_mov_i32(reg, tmp); } tcg_gen_addi_i32(QREG_SP, tmp, offset); - tcg_temp_free(tmp); } DISAS_INSN(link) @@ -2976,8 +2865,6 @@ DISAS_INSN(unlk) tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s)); tcg_gen_mov_i32(reg, tmp); tcg_gen_addi_i32(QREG_SP, src, 4); - tcg_temp_free(src); - tcg_temp_free(tmp); } #if defined(CONFIG_SOFTMMU) @@ -3017,10 +2904,8 @@ DISAS_INSN(rtr) tcg_gen_addi_i32(sp, QREG_SP, 2); tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s)); tcg_gen_addi_i32(QREG_SP, sp, 4); - tcg_temp_free(sp); gen_set_sr(s, ccr, true); - tcg_temp_free(ccr); gen_jmp(s, tmp); } @@ -3099,9 +2984,7 @@ DISAS_INSN(addsubq) } gen_update_cc_add(dest, val, opsize); } - tcg_temp_free(val); DEST_EA(env, insn, opsize, dest, &addr); - tcg_temp_free(dest); } DISAS_INSN(branch) @@ -3179,7 +3062,6 @@ DISAS_INSN(or) gen_partset_reg(opsize, DREG(insn, 9), dest); } gen_logic_cc(s, dest, opsize); - tcg_temp_free(dest); } DISAS_INSN(suba) @@ -3214,7 +3096,6 @@ static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize) tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest); tcg_gen_xor_i32(tmp, dest, src); tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp); - tcg_temp_free(tmp); /* Copy the rest of the results into place. */ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ @@ -3262,9 +3143,6 @@ DISAS_INSN(subx_mem) gen_subx(s, src, dest, opsize); gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); - - tcg_temp_free(dest); - tcg_temp_free(src); } DISAS_INSN(mov3q) @@ -3278,7 +3156,6 @@ DISAS_INSN(mov3q) src = tcg_const_i32(val); gen_logic_cc(s, src, OS_LONG); DEST_EA(env, insn, OS_LONG, src, NULL); - tcg_temp_free(src); } DISAS_INSN(cmp) @@ -3338,7 +3215,6 @@ DISAS_INSN(eor) tcg_gen_xor_i32(dest, src, DREG(insn, 9)); gen_logic_cc(s, dest, opsize); DEST_EA(env, insn, opsize, dest, &addr); - tcg_temp_free(dest); } static void do_exg(TCGv reg1, TCGv reg2) @@ -3347,7 +3223,6 @@ static void do_exg(TCGv reg1, TCGv reg2) tcg_gen_mov_i32(temp, reg1); tcg_gen_mov_i32(reg1, reg2); tcg_gen_mov_i32(reg2, temp); - tcg_temp_free(temp); } DISAS_INSN(exg_dd) @@ -3390,7 +3265,6 @@ DISAS_INSN(and) gen_partset_reg(opsize, reg, dest); } gen_logic_cc(s, dest, opsize); - tcg_temp_free(dest); } DISAS_INSN(adda) @@ -3424,7 +3298,6 @@ static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize) tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src); tcg_gen_xor_i32(tmp, dest, src); tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp); - tcg_temp_free(tmp); /* Copy the rest of the results into place. */ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ @@ -3472,9 +3345,6 @@ DISAS_INSN(addx_mem) gen_addx(s, src, dest, opsize); gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); - - tcg_temp_free(dest); - tcg_temp_free(src); } static inline void shift_im(DisasContext *s, uint16_t insn, int opsize) @@ -3508,7 +3378,6 @@ static inline void shift_im(DisasContext *s, uint16_t insn, int opsize) tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1); tcg_gen_sari_i32(t0, reg, bits - count - 1); tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0); - tcg_temp_free(t0); } tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V); } @@ -3566,7 +3435,6 @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits); tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C, s32, zero, zero, QREG_CC_C); - tcg_temp_free(zero); } tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1); @@ -3587,7 +3455,6 @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) TCGv_i64 tt = tcg_const_i64(32); /* if shift is greater than 32, use 32 */ tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64); - tcg_temp_free_i64(tt); /* Sign extend the input to 64 bits; re-do the shift. */ tcg_gen_ext_i32_i64(t64, reg); tcg_gen_shl_i64(s64, t64, s64); @@ -3619,10 +3486,6 @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); - tcg_temp_free(s32); - tcg_temp_free_i64(s64); - tcg_temp_free_i64(t64); - /* Write back the result. */ gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N); set_cc_op(s, CC_OP_FLAGS); @@ -3783,25 +3646,20 @@ static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size) /* shx = shx < 0 ? size : shx; */ zero = tcg_const_i32(0); tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx); - tcg_temp_free(zero); } else { tcg_gen_mov_i32(shr, shift); /* shr = shift */ tcg_gen_movi_i32(shl, size + 1); tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */ tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */ } - tcg_temp_free_i32(sz); /* reg = (reg << shl) | (reg >> shr) | (x << shx); */ tcg_gen_shl_i32(shl, reg, shl); tcg_gen_shr_i32(shr, reg, shr); tcg_gen_or_i32(reg, shl, shr); - tcg_temp_free(shl); - tcg_temp_free(shr); tcg_gen_shl_i32(shx, QREG_CC_X, shx); tcg_gen_or_i32(reg, reg, shx); - tcg_temp_free(shx); /* X = (reg >> size) & 1 */ @@ -3835,7 +3693,6 @@ static TCGv rotate32_x(TCGv reg, TCGv shift, int left) /* rotate */ tcg_gen_rotl_i64(t0, t0, shift64); - tcg_temp_free_i64(shift64); /* result is [reg:..:reg:X] */ @@ -3849,7 +3706,6 @@ static TCGv rotate32_x(TCGv reg, TCGv shift, int left) tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X); tcg_gen_rotr_i64(t0, t0, shift64); - tcg_temp_free_i64(shift64); /* result is value: [X:reg:..:reg] */ @@ -3863,17 +3719,13 @@ static TCGv rotate32_x(TCGv reg, TCGv shift, int left) tcg_gen_shli_i32(hi, hi, 1); } - tcg_temp_free_i64(t0); tcg_gen_or_i32(lo, lo, hi); - tcg_temp_free(hi); /* if shift == 0, register and X are not affected */ zero = tcg_const_i32(0); tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X); tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo); - tcg_temp_free(zero); - tcg_temp_free(lo); return X; } @@ -3895,9 +3747,7 @@ DISAS_INSN(rotate_im) } else { TCGv X = rotate32_x(DREG(insn, 0), shift, left); rotate_x_flags(DREG(insn, 0), X, 32); - tcg_temp_free(X); } - tcg_temp_free(shift); set_cc_op(s, CC_OP_FLAGS); } @@ -3922,9 +3772,7 @@ DISAS_INSN(rotate8_im) } else { TCGv X = rotate_x(reg, shift, left, 8); rotate_x_flags(reg, X, 8); - tcg_temp_free(X); } - tcg_temp_free(shift); gen_partset_reg(OS_BYTE, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } @@ -3948,9 +3796,7 @@ DISAS_INSN(rotate16_im) } else { TCGv X = rotate_x(reg, shift, left, 16); rotate_x_flags(reg, X, 16); - tcg_temp_free(X); } - tcg_temp_free(shift); gen_partset_reg(OS_WORD, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } @@ -3982,10 +3828,7 @@ DISAS_INSN(rotate_reg) tcg_gen_remu_i32(t1, t0, t1); X = rotate32_x(DREG(insn, 0), t1, left); rotate_x_flags(DREG(insn, 0), X, 32); - tcg_temp_free(X); } - tcg_temp_free(t1); - tcg_temp_free(t0); set_cc_op(s, CC_OP_FLAGS); } @@ -4016,10 +3859,7 @@ DISAS_INSN(rotate8_reg) tcg_gen_remu_i32(t1, t0, t1); X = rotate_x(reg, t1, left, 8); rotate_x_flags(reg, X, 8); - tcg_temp_free(X); } - tcg_temp_free(t1); - tcg_temp_free(t0); gen_partset_reg(OS_BYTE, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } @@ -4051,10 +3891,7 @@ DISAS_INSN(rotate16_reg) tcg_gen_remu_i32(t1, t0, t1); X = rotate_x(reg, t1, left, 16); rotate_x_flags(reg, X, 16); - tcg_temp_free(X); } - tcg_temp_free(t1); - tcg_temp_free(t0); gen_partset_reg(OS_WORD, DREG(insn, 0), reg); set_cc_op(s, CC_OP_FLAGS); } @@ -4074,9 +3911,7 @@ DISAS_INSN(rotate_mem) } else { TCGv X = rotate_x(src, shift, left, 16); rotate_x_flags(src, X, 16); - tcg_temp_free(X); } - tcg_temp_free(shift); DEST_EA(env, insn, OS_WORD, src, &addr); set_cc_op(s, CC_OP_FLAGS); } @@ -4117,7 +3952,6 @@ DISAS_INSN(bfext_reg) } else { tcg_gen_shr_i32(dst, tmp, shift); } - tcg_temp_free(shift); } else { /* Immediate width. */ if (ext & 0x800) { @@ -4146,7 +3980,6 @@ DISAS_INSN(bfext_reg) } } - tcg_temp_free(tmp); set_cc_op(s, CC_OP_LOGIC); } @@ -4181,16 +4014,8 @@ DISAS_INSN(bfext_mem) TCGv_i64 tmp = tcg_temp_new_i64(); gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len); tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp); - tcg_temp_free_i64(tmp); } set_cc_op(s, CC_OP_LOGIC); - - if (!(ext & 0x20)) { - tcg_temp_free(len); - } - if (!(ext & 0x800)) { - tcg_temp_free(ofs); - } } DISAS_INSN(bfop_reg) @@ -4258,7 +4083,6 @@ DISAS_INSN(bfop_reg) tcg_gen_movi_i32(tofs, ofs); } } - tcg_temp_free(tmp); } set_cc_op(s, CC_OP_LOGIC); @@ -4271,8 +4095,6 @@ DISAS_INSN(bfop_reg) break; case 0x0d00: /* bfffo */ gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen); - tcg_temp_free(tlen); - tcg_temp_free(tofs); break; case 0x0e00: /* bfset */ tcg_gen_orc_i32(src, src, mask); @@ -4283,7 +4105,6 @@ DISAS_INSN(bfop_reg) default: g_assert_not_reached(); } - tcg_temp_free(mask); } DISAS_INSN(bfop_mem) @@ -4320,7 +4141,6 @@ DISAS_INSN(bfop_mem) t64 = tcg_temp_new_i64(); gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len); tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64); - tcg_temp_free_i64(t64); break; case 0x0e00: /* bfset */ gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len); @@ -4332,13 +4152,6 @@ DISAS_INSN(bfop_mem) g_assert_not_reached(); } set_cc_op(s, CC_OP_LOGIC); - - if (!(ext & 0x20)) { - tcg_temp_free(len); - } - if (!(ext & 0x800)) { - tcg_temp_free(ofs); - } } DISAS_INSN(bfins_reg) @@ -4408,11 +4221,7 @@ DISAS_INSN(bfins_reg) tcg_gen_rotr_i32(tmp, tmp, rot); tcg_gen_and_i32(dst, dst, mask); tcg_gen_or_i32(dst, dst, tmp); - - tcg_temp_free(rot); - tcg_temp_free(mask); } - tcg_temp_free(tmp); } DISAS_INSN(bfins_mem) @@ -4440,13 +4249,6 @@ DISAS_INSN(bfins_mem) gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len); set_cc_op(s, CC_OP_LOGIC); - - if (!(ext & 0x20)) { - tcg_temp_free(len); - } - if (!(ext & 0x800)) { - tcg_temp_free(ofs); - } } DISAS_INSN(ff1) @@ -4515,9 +4317,7 @@ DISAS_INSN(chk2) tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize)); bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s)); - tcg_temp_free(addr1); bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s)); - tcg_temp_free(addr2); reg = tcg_temp_new(); if (ext & 0x8000) { @@ -4528,9 +4328,6 @@ DISAS_INSN(chk2) gen_flush_flags(s); gen_helper_chk2(cpu_env, reg, bound1, bound2); - tcg_temp_free(reg); - tcg_temp_free(bound1); - tcg_temp_free(bound2); } static void m68k_copy_line(TCGv dst, TCGv src, int index) @@ -4552,10 +4349,6 @@ static void m68k_copy_line(TCGv dst, TCGv src, int index) tcg_gen_qemu_st64(t0, addr, index); tcg_gen_addi_i32(addr, addr, 8); tcg_gen_qemu_st64(t1, addr, index); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free(addr); } DISAS_INSN(move16_reg) @@ -4576,7 +4369,6 @@ DISAS_INSN(move16_reg) tcg_gen_mov_i32(tmp, AREG(ext, 12)); tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16); tcg_gen_addi_i32(AREG(ext, 12), tmp, 16); - tcg_temp_free(tmp); } DISAS_INSN(move16_mem) @@ -4595,8 +4387,6 @@ DISAS_INSN(move16_mem) m68k_copy_line(addr, reg, index); } - tcg_temp_free(addr); - if (((insn >> 3) & 2) == 0) { /* (Ay)+ */ tcg_gen_addi_i32(reg, reg, 16); @@ -4681,7 +4471,6 @@ DISAS_INSN(moves) } else { gen_partset_reg(opsize, reg, tmp); } - tcg_temp_free(tmp); } switch (extract32(insn, 3, 3)) { case 3: /* Indirect postincrement. */ @@ -4855,7 +4644,6 @@ DISAS_INSN(pflush) opmode = tcg_const_i32((insn >> 3) & 3); gen_helper_pflush(cpu_env, AREG(insn, 0), opmode); - tcg_temp_free(opmode); } DISAS_INSN(ptest) @@ -4868,7 +4656,6 @@ DISAS_INSN(ptest) } is_read = tcg_const_i32((insn >> 5) & 1); gen_helper_ptest(cpu_env, AREG(insn, 0), is_read); - tcg_temp_free(is_read); } #endif @@ -4914,7 +4701,6 @@ static void do_trapcc(DisasContext *s, DisasCompare *c) s->base.is_jmp = DISAS_NEXT; } } - free_cond(c); } DISAS_INSN(trapcc) @@ -4985,7 +4771,6 @@ static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg) tmp = tcg_temp_new(); gen_load_fcr(s, tmp, reg); tcg_gen_qemu_st32(tmp, addr, index); - tcg_temp_free(tmp); } static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) @@ -4996,7 +4781,6 @@ static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) tmp = tcg_temp_new(); tcg_gen_qemu_ld32u(tmp, addr, index); gen_store_fcr(s, tmp, reg); - tcg_temp_free(tmp); } @@ -5042,7 +4826,6 @@ static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s, } tmp = tcg_const_i32(read_im32(env, s)); gen_store_fcr(s, tmp, mask); - tcg_temp_free(tmp); return; } break; @@ -5095,7 +4878,6 @@ static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s, tcg_gen_mov_i32(AREG(insn, 0), addr); } } - tcg_temp_free_i32(addr); } static void gen_op_fmovem(CPUM68KState *env, DisasContext *s, @@ -5156,7 +4938,6 @@ static void gen_op_fmovem(CPUM68KState *env, DisasContext *s, if ((insn & 070) == 030 || (insn & 070) == 040) { tcg_gen_mov_i32(AREG(insn, 0), tmp); } - tcg_temp_free(tmp); } /* @@ -5183,8 +4964,6 @@ DISAS_INSN(fpu) TCGv rom_offset = tcg_const_i32(opmode); cpu_dest = gen_fp_ptr(REG(ext, 7)); gen_helper_fconst(cpu_env, cpu_dest, rom_offset); - tcg_temp_free_ptr(cpu_dest); - tcg_temp_free(rom_offset); return; } break; @@ -5196,7 +4975,6 @@ DISAS_INSN(fpu) gen_addr_fault(s); } gen_helper_ftst(cpu_env, cpu_src); - tcg_temp_free_ptr(cpu_src); return; case 4: /* fmove to control register. */ case 5: /* fmove from control register. */ @@ -5384,7 +5162,6 @@ DISAS_INSN(fpu) case 0x36: case 0x37: { TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0)); gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src); - tcg_temp_free_ptr(cpu_dest2); } break; case 0x38: /* fcmp */ @@ -5396,9 +5173,7 @@ DISAS_INSN(fpu) default: goto undef; } - tcg_temp_free_ptr(cpu_src); gen_helper_ftst(cpu_env, cpu_dest); - tcg_temp_free_ptr(cpu_dest); return; undef: /* FIXME: Is this right for offset addressing modes? */ @@ -5410,9 +5185,7 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) { TCGv fpsr; - c->g1 = 1; c->v2 = tcg_const_i32(0); - c->g2 = 0; /* TODO: Raise BSUN exception. */ fpsr = tcg_temp_new(); gen_load_fcr(s, fpsr, M68K_FPSR); @@ -5425,14 +5198,12 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 1: /* EQual Z */ case 17: /* Signaling EQual Z */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); c->tcond = TCG_COND_NE; break; case 2: /* Ordered Greater Than !(A || Z || N) */ case 18: /* Greater Than !(A || Z || N) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); c->tcond = TCG_COND_EQ; @@ -5440,7 +5211,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 3: /* Ordered Greater than or Equal Z || !(A || N) */ case 19: /* Greater than or Equal Z || !(A || N) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N); @@ -5451,7 +5221,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 4: /* Ordered Less Than !(!N || A || Z); */ case 20: /* Less Than !(!N || A || Z); */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N); tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_EQ; @@ -5459,7 +5228,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 5: /* Ordered Less than or Equal Z || (N && !A) */ case 21: /* Less than or Equal Z || (N && !A) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); tcg_gen_andc_i32(c->v1, fpsr, c->v1); @@ -5469,35 +5237,30 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 6: /* Ordered Greater or Less than !(A || Z) */ case 22: /* Greater or Less than !(A || Z) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_EQ; break; case 7: /* Ordered !A */ case 23: /* Greater, Less or Equal !A */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); c->tcond = TCG_COND_EQ; break; case 8: /* Unordered A */ case 24: /* Not Greater, Less or Equal A */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); c->tcond = TCG_COND_NE; break; case 9: /* Unordered or Equal A || Z */ case 25: /* Not Greater or Less then A || Z */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); c->tcond = TCG_COND_NE; break; case 10: /* Unordered or Greater Than A || !(N || Z)) */ case 26: /* Not Less or Equal A || !(N || Z)) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N); @@ -5508,7 +5271,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 11: /* Unordered or Greater or Equal A || Z || !N */ case 27: /* Not Less Than A || Z || !N */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N); c->tcond = TCG_COND_NE; @@ -5516,7 +5278,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 12: /* Unordered or Less Than A || (N && !Z) */ case 28: /* Not Greater than or Equal A || (N && !Z) */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); tcg_gen_andc_i32(c->v1, fpsr, c->v1); @@ -5526,14 +5287,12 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) case 13: /* Unordered or Less or Equal A || Z || N */ case 29: /* Not Greater Than A || Z || N */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); c->tcond = TCG_COND_NE; break; case 14: /* Not Equal !Z */ case 30: /* Signaling Not Equal !Z */ c->v1 = tcg_temp_new(); - c->g1 = 0; tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); c->tcond = TCG_COND_EQ; break; @@ -5543,7 +5302,6 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) c->tcond = TCG_COND_ALWAYS; break; } - tcg_temp_free(fpsr); } static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1) @@ -5553,7 +5311,6 @@ static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1) gen_fcc_cond(&c, s, cond); update_cc_op(s); tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1); - free_cond(&c); } DISAS_INSN(fbcc) @@ -5589,11 +5346,9 @@ DISAS_INSN(fscc) tmp = tcg_temp_new(); tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2); - free_cond(&c); tcg_gen_neg_i32(tmp, tmp); DEST_EA(env, insn, OS_BYTE, tmp, NULL); - tcg_temp_free(tmp); } DISAS_INSN(ftrapcc) @@ -5652,7 +5407,6 @@ DISAS_INSN(fsave) /* always write IDLE */ TCGv idle = tcg_const_i32(0x41000000); DEST_EA(env, insn, OS_LONG, idle, NULL); - tcg_temp_free(idle); } else { disas_undef(env, s, insn); } @@ -5852,7 +5606,6 @@ DISAS_INSN(mac) case 4: /* Pre-decrement. */ tcg_gen_mov_i32(AREG(insn, 0), addr); } - tcg_temp_free(loadval); } } @@ -5926,7 +5679,6 @@ DISAS_INSN(macsr_to_ccr) /* Note that X and C are always cleared. */ tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V); gen_helper_set_ccr(cpu_env, tmp); - tcg_temp_free(tmp); set_cc_op(s, CC_OP_FLAGS); } @@ -6287,7 +6039,6 @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->cc_op_synced = 1; dc->done_mac = 0; dc->writeback_mask = 0; - init_release_array(dc); dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS); /* If architectural single step active, limit to 1 */ @@ -6314,7 +6065,6 @@ static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) opcode_table[insn](env, dc, insn); do_writebacks(dc); - do_release(dc); dc->pc_prev = dc->base.pc_next; dc->base.pc_next = dc->pc; diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 037a652cb9..ee0d7b81ad 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -101,10 +101,7 @@ static void t_sync_flags(DisasContext *dc) static void gen_raise_exception(DisasContext *dc, uint32_t index) { - TCGv_i32 tmp = tcg_const_i32(index); - - gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(index)); dc->base.is_jmp = DISAS_NORETURN; } @@ -117,9 +114,8 @@ static void gen_raise_exception_sync(DisasContext *dc, uint32_t index) static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec) { - TCGv_i32 tmp = tcg_const_i32(esr_ec); + TCGv_i32 tmp = tcg_constant_i32(esr_ec); tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr)); - tcg_temp_free_i32(tmp); gen_raise_exception_sync(dc, EXCP_HW_EXCP); } @@ -262,11 +258,9 @@ static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects, rd = reg_for_write(dc, arg->rd); ra = reg_for_read(dc, arg->ra); - imm = tcg_const_i32(arg->imm); + imm = tcg_constant_i32(arg->imm); fn(rd, ra, imm); - - tcg_temp_free_i32(imm); return true; } @@ -309,24 +303,19 @@ static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects, /* No input carry, but output carry. */ static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero); - - tcg_temp_free_i32(zero); } /* Input and output carry. */ static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero); tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); - - tcg_temp_free_i32(tmp); - tcg_temp_free_i32(zero); } /* Input carry, but no output carry. */ @@ -361,7 +350,6 @@ static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, inb, 31); tcg_gen_sar_i32(out, ina, tmp); - tcg_temp_free_i32(tmp); } static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) @@ -369,7 +357,6 @@ static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, inb, 31); tcg_gen_shr_i32(out, ina, tmp); - tcg_temp_free_i32(tmp); } static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) @@ -377,7 +364,6 @@ static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, inb, 31); tcg_gen_shl_i32(out, ina, tmp); - tcg_temp_free_i32(tmp); } static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm) @@ -436,7 +422,6 @@ static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina); tcg_gen_sub_i32(out, inb, ina); tcg_gen_deposit_i32(out, out, lt, 31, 1); - tcg_temp_free_i32(lt); } static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) @@ -446,7 +431,6 @@ static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina); tcg_gen_sub_i32(out, inb, ina); tcg_gen_deposit_i32(out, out, lt, 31, 1); - tcg_temp_free_i32(lt); } DO_TYPEA(cmp, false, gen_cmp) @@ -513,21 +497,18 @@ static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_muls2_i32(tmp, out, ina, inb); - tcg_temp_free_i32(tmp); } static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mulu2_i32(tmp, out, ina, inb); - tcg_temp_free_i32(tmp); } static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mulsu2_i32(tmp, out, ina, inb); - tcg_temp_free_i32(tmp); } DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32) @@ -563,15 +544,12 @@ static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) /* Input and output carry. */ static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) { - TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 zero = tcg_constant_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_not_i32(tmp, ina); tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero); tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); - - tcg_temp_free_i32(zero); - tcg_temp_free_i32(tmp); } /* No input or output carry. */ @@ -588,8 +566,6 @@ static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) tcg_gen_not_i32(nota, ina); tcg_gen_add_i32(out, inb, nota); tcg_gen_add_i32(out, out, cpu_msr_c); - - tcg_temp_free_i32(nota); } DO_TYPEA(rsub, true, gen_rsub) @@ -618,8 +594,6 @@ static void gen_src(TCGv_i32 out, TCGv_i32 ina) tcg_gen_mov_i32(tmp, cpu_msr_c); tcg_gen_andi_i32(cpu_msr_c, ina, 1); tcg_gen_extract2_i32(out, ina, tmp, 1); - - tcg_temp_free_i32(tmp); } static void gen_srl(TCGv_i32 out, TCGv_i32 ina) @@ -659,7 +633,6 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]); tcg_gen_extu_i32_tl(ret, tmp); - tcg_temp_free_i32(tmp); } else if (ra) { tcg_gen_extu_i32_tl(ret, cpu_R[ra]); } else if (rb) { @@ -683,7 +656,6 @@ static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_addi_i32(tmp, cpu_R[ra], imm); tcg_gen_extu_i32_tl(ret, tmp); - tcg_temp_free_i32(tmp); } else { tcg_gen_movi_tl(ret, (uint32_t)imm); } @@ -772,8 +744,6 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, #endif tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop); - - tcg_temp_free(addr); return true; } @@ -879,7 +849,6 @@ static bool trans_lwx(DisasContext *dc, arg_typea *arg) tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL); tcg_gen_mov_tl(cpu_res_addr, addr); - tcg_temp_free(addr); if (arg->rd) { tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val); @@ -925,8 +894,6 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, #endif tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop); - - tcg_temp_free(addr); return true; } @@ -1040,7 +1007,6 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg) * In either case, addr is no longer needed. */ tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail); - tcg_temp_free(addr); /* * Compare the value loaded during lwx with current contents of @@ -1053,7 +1019,6 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg) dc->mem_index, MO_TEUL); tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail); - tcg_temp_free_i32(tval); /* Success */ tcg_gen_movi_i32(cpu_msr_c, 0); @@ -1150,13 +1115,11 @@ static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm, } /* Compute the final destination into btarget. */ - zero = tcg_const_i32(0); - next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4); + zero = tcg_constant_i32(0); + next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4); tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget, reg_for_read(dc, ra), zero, cpu_btarget, next); - tcg_temp_free_i32(zero); - tcg_temp_free_i32(next); return true; } @@ -1261,8 +1224,6 @@ static bool trans_mbar(DisasContext *dc, arg_mbar *arg) /* Sleep. */ if (mbar_imm & 16) { - TCGv_i32 tmp_1; - if (trap_userspace(dc, true)) { /* Sleep is a privileged instruction. */ return true; @@ -1270,11 +1231,9 @@ static bool trans_mbar(DisasContext *dc, arg_mbar *arg) t_sync_flags(dc); - tmp_1 = tcg_const_i32(1); - tcg_gen_st_i32(tmp_1, cpu_env, + tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, -offsetof(MicroBlazeCPU, env) +offsetof(CPUState, halted)); - tcg_temp_free_i32(tmp_1); tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4); @@ -1345,7 +1304,6 @@ static void msr_read(DisasContext *dc, TCGv_i32 d) t = tcg_temp_new_i32(); tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC); tcg_gen_or_i32(d, cpu_msr, t); - tcg_temp_free_i32(t); } static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set) @@ -1438,12 +1396,10 @@ static bool trans_mts(DisasContext *dc, arg_mts *arg) case 0x1004: /* TLBHI */ case 0x1005: /* TLBSX */ { - TCGv_i32 tmp_ext = tcg_const_i32(arg->e); - TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7); + TCGv_i32 tmp_ext = tcg_constant_i32(arg->e); + TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7); gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src); - tcg_temp_free_i32(tmp_reg); - tcg_temp_free_i32(tmp_ext); } break; @@ -1467,7 +1423,6 @@ static bool trans_mfs(DisasContext *dc, arg_mfs *arg) TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear)); tcg_gen_extrh_i64_i32(dest, t64); - tcg_temp_free_i64(t64); } return true; #ifndef CONFIG_USER_ONLY @@ -1498,7 +1453,6 @@ static bool trans_mfs(DisasContext *dc, arg_mfs *arg) TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear)); tcg_gen_extrl_i64_i32(dest, t64); - tcg_temp_free_i64(t64); } break; case SR_ESR: @@ -1528,12 +1482,10 @@ static bool trans_mfs(DisasContext *dc, arg_mfs *arg) case 0x1004: /* TLBHI */ case 0x1005: /* TLBSX */ { - TCGv_i32 tmp_ext = tcg_const_i32(arg->e); - TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7); + TCGv_i32 tmp_ext = tcg_constant_i32(arg->e); + TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7); gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg); - tcg_temp_free_i32(tmp_reg); - tcg_temp_free_i32(tmp_ext); } break; #endif @@ -1559,8 +1511,6 @@ static void do_rti(DisasContext *dc) tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM); tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM)); tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); - - tcg_temp_free_i32(tmp); } static void do_rtb(DisasContext *dc) @@ -1571,8 +1521,6 @@ static void do_rtb(DisasContext *dc) tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP)); tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM)); tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); - - tcg_temp_free_i32(tmp); } static void do_rte(DisasContext *dc) @@ -1584,8 +1532,6 @@ static void do_rte(DisasContext *dc) tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM)); tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP)); tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); - - tcg_temp_free_i32(tmp); } /* Insns connected to FSL or AXI stream attached devices. */ @@ -1604,10 +1550,8 @@ static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl) tcg_gen_movi_i32(t_id, imm); } - t_ctrl = tcg_const_i32(ctrl); + t_ctrl = tcg_constant_i32(ctrl); gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl); - tcg_temp_free_i32(t_id); - tcg_temp_free_i32(t_ctrl); return true; } @@ -1636,10 +1580,8 @@ static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl) tcg_gen_movi_i32(t_id, imm); } - t_ctrl = tcg_const_i32(ctrl); + t_ctrl = tcg_constant_i32(ctrl); gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra)); - tcg_temp_free_i32(t_id); - tcg_temp_free_i32(t_ctrl); return true; } @@ -1704,7 +1646,6 @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs) } if (dc->r0) { - tcg_temp_free_i32(dc->r0); dc->r0 = NULL; dc->r0_set = false; } diff --git a/target/mips/tcg/mips16e_translate.c.inc b/target/mips/tcg/mips16e_translate.c.inc index 918b15d55c..602f5f0c02 100644 --- a/target/mips/tcg/mips16e_translate.c.inc +++ b/target/mips/tcg/mips16e_translate.c.inc @@ -280,9 +280,6 @@ static void gen_mips16_save(DisasContext *ctx, tcg_gen_movi_tl(t2, -framesize); gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } static void gen_mips16_restore(DisasContext *ctx, @@ -386,9 +383,6 @@ static void gen_mips16_restore(DisasContext *ctx, tcg_gen_movi_tl(t2, framesize); gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } #if defined(TARGET_MIPS64) diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c index 3e2c98f2c6..a7d241e4e7 100644 --- a/target/mips/tcg/vr54xx_translate.c +++ b/target/mips/tcg/vr54xx_translate.c @@ -53,7 +53,7 @@ static bool trans_mult_acc(DisasContext *ctx, arg_r *a, tcg_temp_free(t0); tcg_temp_free(t1); - return false; + return true; } TRANS(MACC, trans_mult_acc, gen_helper_macc); diff --git a/target/nios2/translate.c b/target/nios2/translate.c index 140bc31017..6610e22236 100644 --- a/target/nios2/translate.c +++ b/target/nios2/translate.c @@ -233,7 +233,6 @@ static void gen_jumpr(DisasContext *dc, int regno, bool is_call) tcg_gen_andi_tl(test, dest, 3); tcg_gen_brcondi_tl(TCG_COND_NE, test, 0, l); - tcg_temp_free(test); tcg_gen_mov_tl(cpu_pc, dest); if (is_call) { @@ -300,7 +299,6 @@ static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags) tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s); tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags); - tcg_temp_free(addr); } /* Store instructions */ @@ -312,7 +310,6 @@ static void gen_stx(DisasContext *dc, uint32_t code, uint32_t flags) TCGv addr = tcg_temp_new(); tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16.s); tcg_gen_qemu_st_tl(val, addr, dc->mem_idx, flags); - tcg_temp_free(addr); } /* Branch instructions */ @@ -500,7 +497,6 @@ static void eret(DisasContext *dc, uint32_t code, uint32_t flags) TCGv tmp = tcg_temp_new(); tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_ESTATUS])); gen_helper_eret(cpu_env, tmp, load_gpr(dc, R_EA)); - tcg_temp_free(tmp); } else { gen_helper_eret(cpu_env, load_gpr(dc, R_SSTATUS), load_gpr(dc, R_EA)); } @@ -530,7 +526,6 @@ static void bret(DisasContext *dc, uint32_t code, uint32_t flags) TCGv tmp = tcg_temp_new(); tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPUNios2State, ctrl[CR_BSTATUS])); gen_helper_eret(cpu_env, tmp, load_gpr(dc, R_BA)); - tcg_temp_free(tmp); dc->base.is_jmp = DISAS_NORETURN; #endif @@ -597,8 +592,6 @@ static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags) tcg_gen_ld_tl(t1, cpu_env, offsetof(CPUNios2State, ctrl[CR_IPENDING])); tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUNios2State, ctrl[CR_IENABLE])); tcg_gen_and_tl(dest, t1, t2); - tcg_temp_free(t1); - tcg_temp_free(t2); break; default: tcg_gen_ld_tl(dest, cpu_env, @@ -662,11 +655,9 @@ static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags) tcg_gen_ld_tl(o, cpu_env, ofs); tcg_gen_andi_tl(o, o, ro); tcg_gen_or_tl(n, n, o); - tcg_temp_free(o); } tcg_gen_st_tl(n, cpu_env, ofs); - tcg_temp_free(n); } break; } @@ -753,7 +744,6 @@ static void do_rr_mul_high(DisasContext *dc, uint32_t insn, GenFn4 *fn) fn(discard, dest_gpr(dc, instr.c), load_gpr(dc, instr.a), load_gpr(dc, instr.b)); - tcg_temp_free(discard); } #define gen_rr_mul_high(fname, insn) \ @@ -771,7 +761,6 @@ static void do_rr_shift(DisasContext *dc, uint32_t insn, GenFn3 *fn) tcg_gen_andi_tl(sh, load_gpr(dc, instr.b), 31); fn(dest_gpr(dc, instr.c), load_gpr(dc, instr.a), sh); - tcg_temp_free(sh); } #define gen_rr_shift(fname, insn) \ @@ -990,10 +979,6 @@ static void nios2_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) instr = &i_type_instructions[op]; instr->handler(dc, code, instr->flags); - - if (dc->sink) { - tcg_temp_free(dc->sink); - } } static void nios2_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index b8cd8e0964..76e53c78d4 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -206,10 +206,8 @@ static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); tcg_gen_xor_tl(t0, res, srcb); tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); - tcg_temp_free(t0); tcg_gen_mov_tl(dest, res); - tcg_temp_free(res); gen_ove_cyov(dc); } @@ -224,10 +222,8 @@ static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); tcg_gen_xor_tl(t0, res, srcb); tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); - tcg_temp_free(t0); tcg_gen_mov_tl(dest, res); - tcg_temp_free(res); gen_ove_cyov(dc); } @@ -243,7 +239,6 @@ static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb); tcg_gen_mov_tl(dest, res); - tcg_temp_free(res); gen_ove_cyov(dc); } @@ -255,7 +250,6 @@ static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb); tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1); tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0); - tcg_temp_free(t0); tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); gen_ove_ov(dc); @@ -278,7 +272,6 @@ static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) Supress the host-side exception by dividing by 1. */ tcg_gen_or_tl(t0, srcb, cpu_sr_ov); tcg_gen_div_tl(dest, srca, t0); - tcg_temp_free(t0); tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); gen_ove_ov(dc); @@ -293,7 +286,6 @@ static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) Supress the host-side exception by dividing by 1. */ tcg_gen_or_tl(t0, srcb, cpu_sr_cy); tcg_gen_divu_tl(dest, srca, t0); - tcg_temp_free(t0); gen_ove_cy(dc); } @@ -314,14 +306,11 @@ static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_muls2_i64(cpu_mac, high, t1, t2); tcg_gen_sari_i64(t1, cpu_mac, 63); tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high); - tcg_temp_free_i64(high); tcg_gen_trunc_i64_tl(cpu_sr_ov, t1); tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); gen_ove_ov(dc); } - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) @@ -340,12 +329,9 @@ static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_mulu2_i64(cpu_mac, high, t1, t2); tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0); tcg_gen_trunc_i64_tl(cpu_sr_cy, high); - tcg_temp_free_i64(high); gen_ove_cy(dc); } - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) @@ -362,14 +348,12 @@ static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_add_i64(cpu_mac, cpu_mac, t1); tcg_gen_xor_i64(t1, t1, cpu_mac); tcg_gen_andc_i64(t1, t1, t2); - tcg_temp_free_i64(t2); #if TARGET_LONG_BITS == 32 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); #else tcg_gen_mov_i64(cpu_sr_ov, t1); #endif - tcg_temp_free_i64(t1); gen_ove_ov(dc); } @@ -382,13 +366,11 @@ static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_extu_tl_i64(t1, srca); tcg_gen_extu_tl_i64(t2, srcb); tcg_gen_mul_i64(t1, t1, t2); - tcg_temp_free_i64(t2); /* Note that overflow is only computed during addition stage. */ tcg_gen_add_i64(cpu_mac, cpu_mac, t1); tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1); tcg_gen_trunc_i64_tl(cpu_sr_cy, t1); - tcg_temp_free_i64(t1); gen_ove_cy(dc); } @@ -407,14 +389,12 @@ static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); tcg_gen_xor_i64(t1, t1, cpu_mac); tcg_gen_and_i64(t1, t1, t2); - tcg_temp_free_i64(t2); #if TARGET_LONG_BITS == 32 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); #else tcg_gen_mov_i64(cpu_sr_ov, t1); #endif - tcg_temp_free_i64(t1); gen_ove_ov(dc); } @@ -432,8 +412,6 @@ static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1); tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); tcg_gen_trunc_i64_tl(cpu_sr_cy, t2); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t1); gen_ove_cy(dc); } @@ -672,7 +650,6 @@ static bool trans_l_lwa(DisasContext *dc, arg_load *a) tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TEUL); tcg_gen_mov_tl(cpu_lock_addr, ea); tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d)); - tcg_temp_free(ea); return true; } @@ -684,7 +661,6 @@ static void do_load(DisasContext *dc, arg_load *a, MemOp mop) ea = tcg_temp_new(); tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i); tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, mop); - tcg_temp_free(ea); } static bool trans_l_lwz(DisasContext *dc, arg_load *a) @@ -734,13 +710,11 @@ static bool trans_l_swa(DisasContext *dc, arg_store *a) lab_fail = gen_new_label(); lab_done = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail); - tcg_temp_free(ea); val = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value, cpu_R(dc, a->b), dc->mem_idx, MO_TEUL); tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value); - tcg_temp_free(val); tcg_gen_br(lab_done); @@ -757,7 +731,6 @@ static void do_store(DisasContext *dc, arg_store *a, MemOp mop) TCGv t0 = tcg_temp_new(); tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i); tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop); - tcg_temp_free(t0); } static bool trans_l_sw(DisasContext *dc, arg_store *a) @@ -866,7 +839,6 @@ static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a) tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); gen_helper_mfspr(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d), spr); - tcg_temp_free(spr); } return true; } @@ -897,7 +869,6 @@ static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a) spr = tcg_temp_new(); tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); gen_helper_mtspr(cpu_env, spr, cpu_R(dc, a->b)); - tcg_temp_free(spr); } return true; } @@ -1349,8 +1320,6 @@ static bool do_dp3(DisasContext *dc, arg_dab_pair *a, load_pair(dc, t1, a->b, a->bp); fn(t0, cpu_env, t0, t1); save_pair(dc, t0, a->d, a->dp); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); gen_helper_update_fpcsr(cpu_env); return true; @@ -1372,7 +1341,6 @@ static bool do_dp2(DisasContext *dc, arg_da_pair *a, load_pair(dc, t0, a->a, a->ap); fn(t0, cpu_env, t0); save_pair(dc, t0, a->d, a->dp); - tcg_temp_free_i64(t0); gen_helper_update_fpcsr(cpu_env); return true; @@ -1399,8 +1367,6 @@ static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a, } else { fn(cpu_sr_f, cpu_env, t0, t1); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); if (inv) { tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1); @@ -1457,7 +1423,6 @@ static bool trans_lf_stod_d(DisasContext *dc, arg_lf_stod_d *a) t0 = tcg_temp_new_i64(); gen_helper_stod(t0, cpu_env, cpu_R(dc, a->a)); save_pair(dc, t0, a->d, a->dp); - tcg_temp_free_i64(t0); gen_helper_update_fpcsr(cpu_env); return true; @@ -1476,7 +1441,6 @@ static bool trans_lf_dtos_d(DisasContext *dc, arg_lf_dtos_d *a) t0 = tcg_temp_new_i64(); load_pair(dc, t0, a->a, a->ap); gen_helper_dtos(cpu_R(dc, a->d), cpu_env, t0); - tcg_temp_free_i64(t0); gen_helper_update_fpcsr(cpu_env); return true; @@ -1502,9 +1466,6 @@ static bool trans_lf_madd_d(DisasContext *dc, arg_dab_pair *a) load_pair(dc, t2, a->b, a->bp); gen_helper_float_madd_d(t0, cpu_env, t0, t1, t2); save_pair(dc, t0, a->d, a->dp); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); gen_helper_update_fpcsr(cpu_env); return true; diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc index c3cc919ee4..42f2cd04a1 100644 --- a/target/ppc/power8-pmu-regs.c.inc +++ b/target/ppc/power8-pmu-regs.c.inc @@ -58,8 +58,6 @@ static bool spr_groupA_write_allowed(DisasContext *ctx) /* * Helper function to avoid code repetition between MMCR0 and * MMCR2 problem state write functions. - * - * 'ret' must be tcg_temp_freed() by the caller. */ static TCGv masked_gprn_for_spr_write(int gprn, int sprn, uint64_t spr_mask) @@ -77,8 +75,6 @@ static TCGv masked_gprn_for_spr_write(int gprn, int sprn, /* Add the masked gprn bits into 'ret' */ tcg_gen_or_tl(ret, ret, t0); - tcg_temp_free(t0); - return ret; } @@ -100,8 +96,6 @@ void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn) gen_load_spr(t0, SPR_POWER_MMCR0); tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK); tcg_gen_mov_tl(cpu_gpr[gprn], t0); - - tcg_temp_free(t0); } static void write_MMCR0_common(DisasContext *ctx, TCGv val) @@ -137,8 +131,6 @@ void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn) masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0, MMCR0_UREG_MASK); write_MMCR0_common(ctx, masked_gprn); - - tcg_temp_free(masked_gprn); } void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) @@ -164,8 +156,6 @@ void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) gen_load_spr(t0, SPR_POWER_MMCR2); tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK); tcg_gen_mov_tl(cpu_gpr[gprn], t0); - - tcg_temp_free(t0); } void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) @@ -183,8 +173,6 @@ void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2, MMCR2_UREG_MASK); gen_store_spr(SPR_POWER_MMCR2, masked_gprn); - - tcg_temp_free(masked_gprn); } void spr_read_PMC(DisasContext *ctx, int gprn, int sprn) @@ -193,8 +181,6 @@ void spr_read_PMC(DisasContext *ctx, int gprn, int sprn) gen_icount_io_start(ctx); gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn); - - tcg_temp_free_i32(t_sprn); } void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn) @@ -228,8 +214,6 @@ void spr_write_PMC(DisasContext *ctx, int sprn, int gprn) gen_icount_io_start(ctx); gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]); - - tcg_temp_free_i32(t_sprn); } void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn) diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 2956021e89..df324fc7ff 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -266,8 +266,6 @@ static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) t0 = tcg_const_i32(excp); t1 = tcg_const_i32(error); gen_helper_raise_exception_err(cpu_env, t0, t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); ctx->base.is_jmp = DISAS_NORETURN; } @@ -282,7 +280,6 @@ static void gen_exception(DisasContext *ctx, uint32_t excp) gen_update_nip(ctx, ctx->cia); t0 = tcg_const_i32(excp); gen_helper_raise_exception(cpu_env, t0); - tcg_temp_free_i32(t0); ctx->base.is_jmp = DISAS_NORETURN; } @@ -294,7 +291,6 @@ static void gen_exception_nip(DisasContext *ctx, uint32_t excp, gen_update_nip(ctx, nip); t0 = tcg_const_i32(excp); gen_helper_raise_exception(cpu_env, t0); - tcg_temp_free_i32(t0); ctx->base.is_jmp = DISAS_NORETURN; } @@ -341,7 +337,6 @@ static uint32_t gen_prep_dbgex(DisasContext *ctx) gen_load_spr(t0, SPR_BOOKE_DBSR); tcg_gen_ori_tl(t0, t0, dbsr); gen_store_spr(SPR_BOOKE_DBSR, t0); - tcg_temp_free(t0); return POWERPC_EXCP_DEBUG; } else { return POWERPC_EXCP_TRACE; @@ -393,7 +388,6 @@ static void spr_load_dump_spr(int sprn) #ifdef PPC_DUMP_SPR_ACCESSES TCGv_i32 t0 = tcg_const_i32(sprn); gen_helper_load_dump_spr(cpu_env, t0); - tcg_temp_free_i32(t0); #endif } @@ -408,7 +402,6 @@ static void spr_store_dump_spr(int sprn) #ifdef PPC_DUMP_SPR_ACCESSES TCGv_i32 t0 = tcg_const_i32(sprn); gen_helper_store_dump_spr(cpu_env, t0); - tcg_temp_free_i32(t0); #endif } @@ -437,7 +430,6 @@ void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) TCGv t0 = tcg_temp_new(); tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); gen_store_spr(sprn, t0); - tcg_temp_free(t0); spr_store_dump_spr(sprn); #else spr_write_generic(ctx, sprn, gprn); @@ -452,8 +444,6 @@ void spr_write_clear(DisasContext *ctx, int sprn, int gprn) tcg_gen_neg_tl(t1, cpu_gpr[gprn]); tcg_gen_and_tl(t0, t0, t1); gen_store_spr(sprn, t0); - tcg_temp_free(t0); - tcg_temp_free(t1); } void spr_access_nop(DisasContext *ctx, int sprn, int gprn) @@ -483,9 +473,6 @@ void spr_read_xer(DisasContext *ctx, int gprn, int sprn) tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32); tcg_gen_or_tl(dst, dst, t0); } - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } void spr_write_xer(DisasContext *ctx, int sprn, int gprn) @@ -687,28 +674,24 @@ void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4); gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2); gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4); gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } /* DBAT0U...DBAT7U */ @@ -731,28 +714,24 @@ void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2); gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4); gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2); gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4); gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } /* SDR1 */ @@ -784,7 +763,6 @@ void spr_write_hior(DisasContext *ctx, int sprn, int gprn) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); - tcg_temp_free(t0); } void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) { @@ -855,7 +833,6 @@ void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF); gen_helper_store_40x_pid(cpu_env, t0); - tcg_temp_free(t0); } void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) @@ -878,7 +855,6 @@ void spr_write_pir(DisasContext *ctx, int sprn, int gprn) TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF); gen_store_spr(SPR_PIR, t0); - tcg_temp_free(t0); } #endif @@ -888,7 +864,6 @@ void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0); - tcg_temp_free_i32(t0); } void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) @@ -896,7 +871,6 @@ void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]); tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); - tcg_temp_free_i32(t0); } #if !defined(CONFIG_USER_ONLY) @@ -908,7 +882,6 @@ void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) @@ -933,7 +906,6 @@ void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } #endif @@ -968,10 +940,6 @@ void spr_write_amr(DisasContext *ctx, int sprn, int gprn) tcg_gen_or_tl(t0, t0, t2); gen_store_spr(SPR_AMR, t0); spr_store_dump_spr(SPR_AMR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) @@ -999,10 +967,6 @@ void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) tcg_gen_or_tl(t0, t0, t2); gen_store_spr(SPR_UAMOR, t0); spr_store_dump_spr(SPR_UAMOR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) @@ -1030,10 +994,6 @@ void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) tcg_gen_or_tl(t0, t0, t2); gen_store_spr(SPR_IAMR, t0); spr_store_dump_spr(SPR_IAMR); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } #endif #endif @@ -1054,7 +1014,6 @@ void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) @@ -1063,7 +1022,6 @@ void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) @@ -1073,7 +1031,6 @@ void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) tcg_gen_andi_tl(t0, cpu_gpr[gprn], ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC)); gen_store_spr(sprn, t0); - tcg_temp_free(t0); } void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) @@ -1085,7 +1042,6 @@ void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) { TCGv_i32 t0 = tcg_const_i32(sprn); gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]); - tcg_temp_free_i32(t0); } void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) { @@ -1106,7 +1062,6 @@ void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) gen_store_spr(SPR_BOOKE_MAS3, val); tcg_gen_shri_tl(val, cpu_gpr[gprn], 32); gen_store_spr(SPR_BOOKE_MAS7, val); - tcg_temp_free(val); } void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) @@ -1117,8 +1072,6 @@ void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) tcg_gen_shli_tl(mas7, mas7, 32); gen_load_spr(mas3, SPR_BOOKE_MAS3); tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7); - tcg_temp_free(mas3); - tcg_temp_free(mas7); } #endif @@ -1132,10 +1085,6 @@ static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, TCGv_i32 t3 = tcg_const_i32(cause); gen_helper_fscr_facility_check(cpu_env, t1, t2, t3); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, @@ -1146,10 +1095,6 @@ static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, TCGv_i32 t3 = tcg_const_i32(cause); gen_helper_msr_facility_check(cpu_env, t1, t2, t3); - - tcg_temp_free_i32(t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t1); } void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) @@ -1160,9 +1105,6 @@ void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) gen_load_spr(spr, sprn - 1); tcg_gen_shri_tl(spr_up, spr, 32); tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up); - - tcg_temp_free(spr); - tcg_temp_free(spr_up); } void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) @@ -1172,8 +1114,6 @@ void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) gen_load_spr(spr, sprn - 1); tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32); gen_store_spr(sprn - 1, spr); - - tcg_temp_free(spr); } #if !defined(CONFIG_USER_ONLY) @@ -1185,7 +1125,6 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer); gen_store_spr(sprn, hmer); spr_store_dump_spr(sprn); - tcg_temp_free(hmer); } void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) @@ -1269,8 +1208,6 @@ void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn) gen_load_spr(t0, sprn + 16); tcg_gen_ext32u_tl(cpu_gpr[gprn], t0); - - tcg_temp_free(t0); } #endif @@ -1447,17 +1384,12 @@ static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) tcg_gen_trunc_tl_i32(t, t0); tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free_i32(t); } static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf) { TCGv t0 = tcg_const_tl(arg1); gen_op_cmp(arg0, t0, s, crf); - tcg_temp_free(t0); } static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) @@ -1473,15 +1405,12 @@ static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) tcg_gen_ext32u_tl(t1, arg1); } gen_op_cmp(t0, t1, s, crf); - tcg_temp_free(t1); - tcg_temp_free(t0); } static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf) { TCGv t0 = tcg_const_tl(arg1); gen_op_cmp32(arg0, t0, s, crf); - tcg_temp_free(t0); } static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) @@ -1525,10 +1454,6 @@ static void gen_cmprb(DisasContext *ctx) tcg_gen_or_i32(crf, crf, src2lo); } tcg_gen_shli_i32(crf, crf, CRF_GT_BIT); - tcg_temp_free_i32(src1); - tcg_temp_free_i32(src2); - tcg_temp_free_i32(src2lo); - tcg_temp_free_i32(src2hi); } #if defined(TARGET_PPC64) @@ -1555,8 +1480,6 @@ static void gen_isel(DisasContext *ctx) tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, cpu_gpr[rB(ctx->opcode)]); - tcg_temp_free(zr); - tcg_temp_free(t0); } /* cmpb: PowerPC 2.05 specification */ @@ -1580,7 +1503,6 @@ static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, } else { tcg_gen_andc_tl(cpu_ov, cpu_ov, t0); } - tcg_temp_free(t0); if (NARROW_MODE(ctx)) { tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1); if (is_isa300(ctx)) { @@ -1613,7 +1535,6 @@ static inline void gen_op_arith_compute_ca32(DisasContext *ctx, } tcg_gen_xor_tl(t0, t0, res); tcg_gen_extract_tl(ca32, t0, 32, 1); - tcg_temp_free(t0); } /* Common add function */ @@ -1642,7 +1563,6 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_add_tl(t0, t0, ca); } tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */ - tcg_temp_free(t1); tcg_gen_extract_tl(ca, ca, 32, 1); if (is_isa300(ctx)) { tcg_gen_mov_tl(ca32, ca); @@ -1656,7 +1576,6 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero); } gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); - tcg_temp_free(zero); } } else { tcg_gen_add_tl(t0, arg1, arg2); @@ -1674,7 +1593,6 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, if (t0 != ret) { tcg_gen_mov_tl(ret, t0); - tcg_temp_free(t0); } } /* Add functions with two operands */ @@ -1696,7 +1614,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ cpu_gpr[rA(ctx->opcode)], t0, \ ca, glue(ca, 32), \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ - tcg_temp_free(t0); \ } /* add add. addo addo. */ @@ -1722,7 +1639,6 @@ static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) TCGv c = tcg_const_tl(SIMM(ctx->opcode)); gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); - tcg_temp_free(c); } static void gen_addic(DisasContext *ctx) @@ -1769,10 +1685,6 @@ static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, } tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); } - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, ret); @@ -1800,7 +1712,6 @@ static void gen_##name(DisasContext *ctx) \ TCGv_i32 t0 = tcg_const_i32(compute_ov); \ gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ - tcg_temp_free_i32(t0); \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ } \ @@ -1844,10 +1755,6 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, } tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, ret); @@ -1894,19 +1801,13 @@ static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_i32(t3, t0, t1); tcg_gen_ext_i32_tl(ret, t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } else { TCGv_i32 t2 = tcg_const_i32(1); TCGv_i32 t3 = tcg_const_i32(0); tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1); tcg_gen_remu_i32(t3, t0, t1); tcg_gen_extu_i32_tl(ret, t3); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); } #define GEN_INT_ARITH_MODW(name, opc3, sign) \ @@ -1940,18 +1841,12 @@ static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_movi_i64(t3, 0); tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); tcg_gen_rem_i64(ret, t0, t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } else { TCGv_i64 t2 = tcg_const_i64(1); TCGv_i64 t3 = tcg_const_i64(0); tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1); tcg_gen_remu_i64(ret, t0, t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } #define GEN_INT_ARITH_MODD(name, opc3, sign) \ @@ -1976,8 +1871,6 @@ static void gen_mulhw(DisasContext *ctx) tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_muls2_i32(t0, t1, t0, t1); tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -1993,8 +1886,6 @@ static void gen_mulhwu(DisasContext *ctx) tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_mulu2_i32(t0, t1, t0, t1); tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2010,8 +1901,6 @@ static void gen_mullw(DisasContext *ctx) tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); #else tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); @@ -2044,8 +1933,6 @@ static void gen_mullwo(DisasContext *ctx) } tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2065,7 +1952,6 @@ static void gen_mulhd(DisasContext *ctx) TCGv lo = tcg_temp_new(); tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - tcg_temp_free(lo); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2077,7 +1963,6 @@ static void gen_mulhdu(DisasContext *ctx) TCGv lo = tcg_temp_new(); tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - tcg_temp_free(lo); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2110,9 +1995,6 @@ static void gen_mulldo(DisasContext *ctx) } tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } @@ -2148,9 +2030,7 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, } tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */ tcg_gen_add_tl(t0, t0, inv1); - tcg_temp_free(inv1); tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */ - tcg_temp_free(t1); tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1); if (is_isa300(ctx)) { tcg_gen_mov_tl(cpu_ca32, cpu_ca); @@ -2162,8 +2042,6 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); - tcg_temp_free(zero); - tcg_temp_free(inv1); } else { tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); tcg_gen_sub_tl(t0, arg2, arg1); @@ -2190,7 +2068,6 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, if (t0 != ret) { tcg_gen_mov_tl(ret, t0); - tcg_temp_free(t0); } } /* Sub functions with Two operands functions */ @@ -2210,7 +2087,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ cpu_gpr[rA(ctx->opcode)], t0, \ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ - tcg_temp_free(t0); \ } /* subf subf. subfo subfo. */ GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) @@ -2234,7 +2110,6 @@ static void gen_subfic(DisasContext *ctx) TCGv c = tcg_const_tl(SIMM(ctx->opcode)); gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], c, 0, 1, 0, 0); - tcg_temp_free(c); } /* neg neg. nego nego. */ @@ -2243,7 +2118,6 @@ static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) TCGv zero = tcg_const_tl(0); gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], zero, 0, 0, compute_ov, Rc(ctx->opcode)); - tcg_temp_free(zero); } static void gen_neg(DisasContext *ctx) @@ -2306,7 +2180,6 @@ static void gen_cntlzw(DisasContext *ctx) tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); tcg_gen_clzi_i32(t, t, 32); tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); - tcg_temp_free_i32(t); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); @@ -2321,7 +2194,6 @@ static void gen_cnttzw(DisasContext *ctx) tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); tcg_gen_ctzi_i32(t, t, 32); tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); - tcg_temp_free_i32(t); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); @@ -2345,7 +2217,6 @@ static void gen_pause(DisasContext *ctx) TCGv_i32 t0 = tcg_const_i32(0); tcg_gen_st_i32(t0, cpu_env, -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); - tcg_temp_free_i32(t0); /* Stop translation, this gives other CPUs a chance to run */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); @@ -2424,7 +2295,6 @@ static void gen_or(DisasContext *ctx) tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); gen_store_spr(SPR_PPR, t0); - tcg_temp_free(t0); } #if !defined(CONFIG_USER_ONLY) /* @@ -2539,7 +2409,6 @@ static void gen_prtyw(DisasContext *ctx) tcg_gen_shri_tl(t0, ra, 8); tcg_gen_xor_tl(ra, ra, t0); tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL); - tcg_temp_free(t0); } #if defined(TARGET_PPC64) @@ -2556,7 +2425,6 @@ static void gen_prtyd(DisasContext *ctx) tcg_gen_shri_tl(t0, ra, 8); tcg_gen_xor_tl(ra, ra, t0); tcg_gen_andi_tl(ra, ra, 1); - tcg_temp_free(t0); } #endif @@ -2645,7 +2513,6 @@ static void gen_rlwimi(DisasContext *ctx) tcg_gen_trunc_tl_i32(t0, t_rs); tcg_gen_rotli_i32(t0, t0, sh); tcg_gen_extu_i32_tl(t1, t0); - tcg_temp_free_i32(t0); } else { #if defined(TARGET_PPC64) tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32); @@ -2658,7 +2525,6 @@ static void gen_rlwimi(DisasContext *ctx) tcg_gen_andi_tl(t1, t1, mask); tcg_gen_andi_tl(t_ra, t_ra, ~mask); tcg_gen_or_tl(t_ra, t_ra, t1); - tcg_temp_free(t1); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); @@ -2702,7 +2568,6 @@ static void gen_rlwinm(DisasContext *ctx) tcg_gen_rotli_i32(t0, t0, sh); tcg_gen_andi_i32(t0, t0, mask); tcg_gen_extu_i32_tl(t_ra, t0); - tcg_temp_free_i32(t0); } } else { #if defined(TARGET_PPC64) @@ -2749,15 +2614,12 @@ static void gen_rlwnm(DisasContext *ctx) tcg_gen_andi_i32(t0, t0, 0x1f); tcg_gen_rotl_i32(t1, t1, t0); tcg_gen_extu_i32_tl(t_ra, t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); } else { #if defined(TARGET_PPC64) TCGv_i64 t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(t0, t_rb, 0x1f); tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); tcg_gen_rotl_i64(t_ra, t_ra, t0); - tcg_temp_free_i64(t0); #else g_assert_not_reached(); #endif @@ -2865,7 +2727,6 @@ static void gen_rldnm(DisasContext *ctx, int mb, int me) t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, t_rb, 0x3f); tcg_gen_rotl_tl(t_ra, t_rs, t0); - tcg_temp_free(t0); tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); if (unlikely(Rc(ctx->opcode) != 0)) { @@ -2912,7 +2773,6 @@ static void gen_rldimi(DisasContext *ctx, int mbn, int shn) tcg_gen_andi_tl(t1, t1, mask); tcg_gen_andi_tl(t_ra, t_ra, ~mask); tcg_gen_or_tl(t_ra, t_ra, t1); - tcg_temp_free(t1); } if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, t_ra); @@ -2941,8 +2801,6 @@ static void gen_slw(DisasContext *ctx) t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); @@ -2978,7 +2836,6 @@ static void gen_srawi(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1); tcg_gen_and_tl(cpu_ca, cpu_ca, t0); - tcg_temp_free(t0); tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); if (is_isa300(ctx)) { tcg_gen_mov_tl(cpu_ca32, cpu_ca); @@ -3009,8 +2866,6 @@ static void gen_srw(DisasContext *ctx) t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f); tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } @@ -3030,8 +2885,6 @@ static void gen_sld(DisasContext *ctx) t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } @@ -3064,7 +2917,6 @@ static inline void gen_sradi(DisasContext *ctx, int n) t0 = tcg_temp_new(); tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1); tcg_gen_and_tl(cpu_ca, cpu_ca, t0); - tcg_temp_free(t0); tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0); if (is_isa300(ctx)) { tcg_gen_mov_tl(cpu_ca32, cpu_ca); @@ -3123,8 +2975,6 @@ static void gen_srd(DisasContext *ctx) t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f); tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t1); - tcg_temp_free(t0); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } @@ -3296,7 +3146,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ } #define GEN_LDX(name, ldop, opc2, opc3, type) \ @@ -3314,7 +3163,6 @@ static void glue(gen_, name##epx)(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\ - tcg_temp_free(EA); \ } GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02) @@ -3342,7 +3190,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ } #define GEN_STX(name, stop, opc2, opc3, type) \ GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE) @@ -3360,7 +3207,6 @@ static void glue(gen_, name##epx)(DisasContext *ctx) \ gen_addr_reg_index(ctx, EA); \ tcg_gen_qemu_st_tl( \ cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \ - tcg_temp_free(EA); \ } GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06) @@ -3413,8 +3259,6 @@ static void gen_lmw(DisasContext *ctx) t1 = tcg_const_i32(rD(ctx->opcode)); gen_addr_imm_index(ctx, t0, 0); gen_helper_lmw(cpu_env, t0, t1); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); } /* stmw */ @@ -3432,8 +3276,6 @@ static void gen_stmw(DisasContext *ctx) t1 = tcg_const_i32(rS(ctx->opcode)); gen_addr_imm_index(ctx, t0, 0); gen_helper_stmw(cpu_env, t0, t1); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); } /*** Integer load and store strings ***/ @@ -3472,9 +3314,6 @@ static void gen_lswi(DisasContext *ctx) t1 = tcg_const_i32(nb); t2 = tcg_const_i32(start); gen_helper_lsw(cpu_env, t0, t1, t2); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } /* lswx */ @@ -3494,10 +3333,6 @@ static void gen_lswx(DisasContext *ctx) t2 = tcg_const_i32(rA(ctx->opcode)); t3 = tcg_const_i32(rB(ctx->opcode)); gen_helper_lswx(cpu_env, t0, t1, t2, t3); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); - tcg_temp_free_i32(t3); } /* stswi */ @@ -3520,9 +3355,6 @@ static void gen_stswi(DisasContext *ctx) t1 = tcg_const_i32(nb); t2 = tcg_const_i32(rS(ctx->opcode)); gen_helper_stsw(cpu_env, t0, t1, t2); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } /* stswx */ @@ -3543,9 +3375,6 @@ static void gen_stswx(DisasContext *ctx) tcg_gen_andi_i32(t1, t1, 0x7F); t2 = tcg_const_i32(rS(ctx->opcode)); gen_helper_stsw(cpu_env, t0, t1, t2); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } /*** Memory synchronisation ***/ @@ -3620,7 +3449,6 @@ static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) gen_helper_check_tlb_flush_local(cpu_env); } gen_set_label(l); - tcg_temp_free_i32(t); } #else static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { } @@ -3653,7 +3481,6 @@ static void gen_load_locked(DisasContext *ctx, MemOp memop) tcg_gen_mov_tl(cpu_reserve, t0); tcg_gen_mov_tl(cpu_reserve_val, gpr); tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); - tcg_temp_free(t0); } #define LARX(name, memop) \ @@ -3687,10 +3514,6 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1)); tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); - - tcg_temp_free(t); - tcg_temp_free(t2); - tcg_temp_free(u); } static void gen_ld_atomic(DisasContext *ctx, MemOp memop) @@ -3753,9 +3576,6 @@ static void gen_ld_atomic(DisasContext *ctx, MemOp memop) cpu_gpr[(rt + 2) & 31], t0); tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop); tcg_gen_mov_tl(dst, t0); - - tcg_temp_free(t0); - tcg_temp_free(t1); } break; @@ -3785,7 +3605,6 @@ static void gen_ld_atomic(DisasContext *ctx, MemOp memop) /* invoke data storage error handler */ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); } - tcg_temp_free(EA); if (need_serial) { /* Restart with exclusive lock. */ @@ -3861,20 +3680,12 @@ static void gen_st_atomic(DisasContext *ctx, MemOp memop) tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2); tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop); tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop); - - tcg_temp_free(ea_plus_s); - tcg_temp_free(s2); - tcg_temp_free(s); - tcg_temp_free(t2); - tcg_temp_free(t); } break; default: /* invoke data storage error handler */ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); } - tcg_temp_free(discard); - tcg_temp_free(EA); } static void gen_stwat(DisasContext *ctx) @@ -3899,7 +3710,6 @@ static void gen_conditional_store(DisasContext *ctx, MemOp memop) gen_set_access_type(ctx, ACCESS_RES); gen_addr_reg_index(ctx, t0); tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1); - tcg_temp_free(t0); t0 = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val, @@ -3909,7 +3719,6 @@ static void gen_conditional_store(DisasContext *ctx, MemOp memop) tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); tcg_gen_or_tl(t0, t0, cpu_so); tcg_gen_trunc_tl_i32(cpu_crf[0], t0); - tcg_temp_free(t0); tcg_gen_br(l2); gen_set_label(l1); @@ -3973,13 +3782,11 @@ static void gen_lqarx(DisasContext *ctx) ctx->mem_idx)); gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); } - tcg_temp_free_i32(oi); tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); } else { /* Restart with exclusive lock. */ gen_helper_exit_atomic(cpu_env); ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(EA); return; } } else if (ctx->le_mode) { @@ -3993,7 +3800,6 @@ static void gen_lqarx(DisasContext *ctx) gen_addr_add(ctx, EA, EA, 8); tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEUQ); } - tcg_temp_free(EA); tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val)); tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2)); @@ -4020,7 +3826,6 @@ static void gen_stqcx_(DisasContext *ctx) gen_addr_reg_index(ctx, EA); tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail); - tcg_temp_free(EA); cmp = tcg_temp_new_i128(); val = tcg_temp_new_i128(); @@ -4032,23 +3837,19 @@ static void gen_stqcx_(DisasContext *ctx) tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN)); - tcg_temp_free_i128(cmp); t0 = tcg_temp_new(); t1 = tcg_temp_new(); tcg_gen_extr_i128_i64(t1, t0, val); - tcg_temp_free_i128(val); tcg_gen_xor_tl(t1, t1, cpu_reserve_val2); tcg_gen_xor_tl(t0, t0, cpu_reserve_val); tcg_gen_or_tl(t0, t0, t1); - tcg_temp_free(t1); tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0); tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); tcg_gen_or_tl(t0, t0, cpu_so); tcg_gen_trunc_tl_i32(cpu_crf[0], t0); - tcg_temp_free(t0); tcg_gen_br(lab_over); gen_set_label(lab_fail); @@ -4145,7 +3946,6 @@ static void gen_wait(DisasContext *ctx) TCGv_i32 t0 = tcg_const_i32(1); tcg_gen_st_i32(t0, cpu_env, -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); - tcg_temp_free_i32(t0); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); } @@ -4191,7 +3991,6 @@ static void gen_doze(DisasContext *ctx) CHK_HV(ctx); t = tcg_const_i32(PPC_PM_DOZE); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4207,7 +4006,6 @@ static void gen_nap(DisasContext *ctx) CHK_HV(ctx); t = tcg_const_i32(PPC_PM_NAP); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4223,7 +4021,6 @@ static void gen_stop(DisasContext *ctx) CHK_HV(ctx); t = tcg_const_i32(PPC_PM_STOP); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4239,7 +4036,6 @@ static void gen_sleep(DisasContext *ctx) CHK_HV(ctx); t = tcg_const_i32(PPC_PM_SLEEP); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4255,7 +4051,6 @@ static void gen_rvwinkle(DisasContext *ctx) CHK_HV(ctx); t = tcg_const_i32(PPC_PM_RVWINKLE); gen_helper_pminsn(cpu_env, t); - tcg_temp_free_i32(t); /* Stop translation, as the CPU is supposed to sleep from now */ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ @@ -4309,7 +4104,6 @@ static void pmu_count_insns(DisasContext *ctx) } gen_set_label(l); - tcg_temp_free(t0); } else { gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns)); } @@ -4324,8 +4118,6 @@ static void pmu_count_insns(DisasContext *ctx) gen_load_spr(t0, SPR_POWER_PMC5); tcg_gen_addi_tl(t0, t0, ctx->base.num_insns); gen_store_spr(SPR_POWER_PMC5, t0); - - tcg_temp_free(t0); #endif /* #if !defined(CONFIG_USER_ONLY) */ } #else @@ -4451,8 +4243,6 @@ static void gen_bcond(DisasContext *ctx, int type) */ if (unlikely(!is_book3s_arch2x(ctx))) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - tcg_temp_free(temp); - tcg_temp_free(target); return; } @@ -4480,7 +4270,6 @@ static void gen_bcond(DisasContext *ctx, int type) tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1); } } - tcg_temp_free(temp); } if ((bo & 0x10) == 0) { /* Test CR */ @@ -4495,7 +4284,6 @@ static void gen_bcond(DisasContext *ctx, int type) tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1); } - tcg_temp_free_i32(temp); } gen_update_cfar(ctx, ctx->cia); if (type == BCOND_IM) { @@ -4512,7 +4300,6 @@ static void gen_bcond(DisasContext *ctx, int type) tcg_gen_andi_tl(cpu_nip, target, ~3); } gen_lookup_and_goto_ptr(ctx); - tcg_temp_free(target); } if ((bo & 0x14) != 0x14) { /* fallthrough case */ @@ -4570,8 +4357,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ tcg_gen_andi_i32(t0, t0, bitmask); \ tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \ tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } /* crand */ @@ -4724,7 +4509,6 @@ static void gen_tw(DisasContext *ctx) t0 = tcg_const_i32(TO(ctx->opcode)); gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); - tcg_temp_free_i32(t0); } /* twi */ @@ -4739,8 +4523,6 @@ static void gen_twi(DisasContext *ctx) t0 = tcg_const_tl(SIMM(ctx->opcode)); t1 = tcg_const_i32(TO(ctx->opcode)); gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); } #if defined(TARGET_PPC64) @@ -4755,7 +4537,6 @@ static void gen_td(DisasContext *ctx) t0 = tcg_const_i32(TO(ctx->opcode)); gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); - tcg_temp_free_i32(t0); } /* tdi */ @@ -4770,8 +4551,6 @@ static void gen_tdi(DisasContext *ctx) t0 = tcg_const_tl(SIMM(ctx->opcode)); t1 = tcg_const_i32(TO(ctx->opcode)); gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1); - tcg_temp_free(t0); - tcg_temp_free_i32(t1); } #endif @@ -4792,8 +4571,6 @@ static void gen_mcrxr(DisasContext *ctx) tcg_gen_shli_i32(dst, dst, 1); tcg_gen_or_i32(dst, dst, t0); tcg_gen_or_i32(dst, dst, t1); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); tcg_gen_movi_tl(cpu_so, 0); tcg_gen_movi_tl(cpu_ov, 0); @@ -4817,8 +4594,6 @@ static void gen_mcrxrx(DisasContext *ctx) tcg_gen_or_tl(t1, t1, cpu_ca32); tcg_gen_or_tl(t0, t0, t1); tcg_gen_trunc_tl_i32(dst, t0); - tcg_temp_free(t0); - tcg_temp_free(t1); } #endif @@ -4853,7 +4628,6 @@ static void gen_mfcr(DisasContext *ctx) tcg_gen_shli_i32(t0, t0, 4); tcg_gen_or_i32(t0, t0, cpu_crf[7]); tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free_i32(t0); } } @@ -4950,7 +4724,6 @@ static void gen_mtcrf(DisasContext *ctx) tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]); tcg_gen_shri_i32(temp, temp, crn * 4); tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf); - tcg_temp_free_i32(temp); } } else { TCGv_i32 temp = tcg_temp_new_i32(); @@ -4961,7 +4734,6 @@ static void gen_mtcrf(DisasContext *ctx) tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf); } } - tcg_temp_free_i32(temp); } } @@ -5008,9 +4780,6 @@ static void gen_mtmsrd(DisasContext *ctx) /* Must stop the translation as machine state (may have) changed */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; - - tcg_temp_free(t0); - tcg_temp_free(t1); #endif /* !defined(CONFIG_USER_ONLY) */ } #endif /* defined(TARGET_PPC64) */ @@ -5050,9 +4819,6 @@ static void gen_mtmsr(DisasContext *ctx) /* Must stop the translation as machine state (may have) changed */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; - - tcg_temp_free(t0); - tcg_temp_free(t1); #endif } @@ -5125,8 +4891,6 @@ static void gen_setb(DisasContext *ctx) tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4); tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0); tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); - - tcg_temp_free_i32(t0); } #endif @@ -5141,7 +4905,6 @@ static void gen_dcbf(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_qemu_ld8u(ctx, t0, t0); - tcg_temp_free(t0); } /* dcbfep (external PID dcbf) */ @@ -5154,7 +4917,6 @@ static void gen_dcbfep(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); - tcg_temp_free(t0); } /* dcbi (Supervisor only) */ @@ -5173,8 +4935,6 @@ static void gen_dcbi(DisasContext *ctx) /* XXX: specification says this should be treated as a store by the MMU */ gen_qemu_ld8u(ctx, val, EA); gen_qemu_st8(ctx, val, EA); - tcg_temp_free(val); - tcg_temp_free(EA); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5187,7 +4947,6 @@ static void gen_dcbst(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_qemu_ld8u(ctx, t0, t0); - tcg_temp_free(t0); } /* dcbstep (dcbstep External PID version) */ @@ -5199,7 +4958,6 @@ static void gen_dcbstep(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB)); - tcg_temp_free(t0); } /* dcbt */ @@ -5250,7 +5008,14 @@ static void gen_dcbtls(DisasContext *ctx) gen_load_spr(t0, SPR_Exxx_L1CSR0); tcg_gen_ori_tl(t0, t0, L1CSR0_CUL); gen_store_spr(SPR_Exxx_L1CSR0, t0); - tcg_temp_free(t0); +} + +/* dcblc */ +static void gen_dcblc(DisasContext *ctx) +{ + /* + * interpreted as no-op + */ } /* dcbz */ @@ -5264,8 +5029,6 @@ static void gen_dcbz(DisasContext *ctx) tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000); gen_addr_reg_index(ctx, tcgv_addr); gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op); - tcg_temp_free(tcgv_addr); - tcg_temp_free_i32(tcgv_op); } /* dcbzep */ @@ -5279,8 +5042,6 @@ static void gen_dcbzep(DisasContext *ctx) tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000); gen_addr_reg_index(ctx, tcgv_addr); gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op); - tcg_temp_free(tcgv_addr); - tcg_temp_free_i32(tcgv_op); } /* dst / dstt */ @@ -5318,7 +5079,6 @@ static void gen_icbi(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_icbi(cpu_env, t0); - tcg_temp_free(t0); } /* icbiep */ @@ -5329,7 +5089,6 @@ static void gen_icbiep(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_icbiep(cpu_env, t0); - tcg_temp_free(t0); } /* Optional: */ @@ -5357,7 +5116,6 @@ static void gen_mfsr(DisasContext *ctx) CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5373,7 +5131,6 @@ static void gen_mfsrin(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5388,7 +5145,6 @@ static void gen_mtsr(DisasContext *ctx) CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5404,7 +5160,6 @@ static void gen_mtsrin(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5422,7 +5177,6 @@ static void gen_mfsr_64b(DisasContext *ctx) CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5438,7 +5192,6 @@ static void gen_mfsrin_64b(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5453,7 +5206,6 @@ static void gen_mtsr_64b(DisasContext *ctx) CHK_SV(ctx); t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5469,7 +5221,6 @@ static void gen_mtsrin_64b(DisasContext *ctx) t0 = tcg_temp_new(); tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5523,7 +5274,6 @@ static void gen_eciwx(DisasContext *ctx) gen_addr_reg_index(ctx, t0); tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, DEF_MEMOP(MO_UL | MO_ALIGN)); - tcg_temp_free(t0); } /* ecowx */ @@ -5536,7 +5286,6 @@ static void gen_ecowx(DisasContext *ctx) gen_addr_reg_index(ctx, t0); tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx, DEF_MEMOP(MO_UL | MO_ALIGN)); - tcg_temp_free(t0); } /* 602 - 603 - G2 TLB management */ @@ -5584,7 +5333,6 @@ static void gen_tlbiva(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5702,8 +5450,6 @@ static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3, } else { tcg_gen_mul_tl(cpu_gpr[rt], t0, t1); } - tcg_temp_free(t0); - tcg_temp_free(t1); if (unlikely(Rc) != 0) { /* Update Rc0 */ gen_set_Rc0(ctx, cpu_gpr[rt]); @@ -5814,7 +5560,6 @@ static void gen_mfdcr(DisasContext *ctx) CHK_SV(ctx); dcrn = tcg_const_tl(SPR(ctx->opcode)); gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn); - tcg_temp_free(dcrn); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5829,7 +5574,6 @@ static void gen_mtdcr(DisasContext *ctx) CHK_SV(ctx); dcrn = tcg_const_tl(SPR(ctx->opcode)); gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free(dcrn); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -5882,9 +5626,7 @@ static void gen_dcread(DisasContext *ctx) gen_addr_reg_index(ctx, EA); val = tcg_temp_new(); gen_qemu_ld32u(ctx, val, EA); - tcg_temp_free(val); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA); - tcg_temp_free(EA); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6002,7 +5744,6 @@ static void gen_tlbsx_40x(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); if (Rc(ctx->opcode)) { TCGLabel *l1 = gen_new_label(); tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); @@ -6055,7 +5796,6 @@ static void gen_tlbre_440(DisasContext *ctx) TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env, t0, cpu_gpr[rA(ctx->opcode)]); - tcg_temp_free_i32(t0); } break; default: @@ -6077,7 +5817,6 @@ static void gen_tlbsx_440(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0); - tcg_temp_free(t0); if (Rc(ctx->opcode)) { TCGLabel *l1 = gen_new_label(); tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); @@ -6103,7 +5842,6 @@ static void gen_tlbwe_440(DisasContext *ctx) TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode)); gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); - tcg_temp_free_i32(t0); } break; default: @@ -6144,7 +5882,6 @@ static void gen_tlbsx_booke206(DisasContext *ctx) tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]); gen_helper_booke206_tlbsx(cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6170,7 +5907,6 @@ static void gen_tlbivax_booke206(DisasContext *ctx) t0 = tcg_temp_new(); gen_addr_reg_index(ctx, t0); gen_helper_booke206_tlbivax(cpu_env, t0); - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6199,8 +5935,6 @@ static void gen_tlbilx_booke206(DisasContext *ctx) gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); break; } - - tcg_temp_free(t0); #endif /* defined(CONFIG_USER_ONLY) */ } @@ -6218,7 +5952,6 @@ static void gen_wrtee(DisasContext *ctx) tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); tcg_gen_or_tl(cpu_msr, cpu_msr, t0); gen_ppc_maybe_interrupt(ctx); - tcg_temp_free(t0); /* * Stop translation to have a chance to raise an exception if we * just set msr_ee to 1 @@ -6253,7 +5986,6 @@ static void gen_dlmzb(DisasContext *ctx) TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode)); gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); - tcg_temp_free_i32(t0); } /* mbar replaces eieio on 440 */ @@ -6290,7 +6022,6 @@ static void gen_maddld(DisasContext *ctx) tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]); - tcg_temp_free_i64(t1); } /* maddhd maddhdu */ @@ -6311,9 +6042,6 @@ static void gen_maddhd_maddhdu(DisasContext *ctx) } tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi, cpu_gpr[rC(ctx->opcode)], t1); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(hi); - tcg_temp_free_i64(t1); } #endif /* defined(TARGET_PPC64) */ @@ -6664,9 +6392,6 @@ static void gen_brh(DisasContext *ctx) tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask); tcg_gen_shli_i64(t1, t1, 8); tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } #endif @@ -6824,6 +6549,7 @@ GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE), GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), +GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ), GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC), @@ -7618,8 +7344,6 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) { ctx->base.is_jmp = DISAS_TOO_MANY; } - - translator_loop_temp_check(&ctx->base); } static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) diff --git a/target/ppc/translate/dfp-impl.c.inc b/target/ppc/translate/dfp-impl.c.inc index f9f1d58d44..62911e04c7 100644 --- a/target/ppc/translate/dfp-impl.c.inc +++ b/target/ppc/translate/dfp-impl.c.inc @@ -20,9 +20,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -36,8 +33,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ rb = gen_fprp_ptr(a->rb); \ gen_helper_##NAME(cpu_crf[a->bf], \ cpu_env, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -50,7 +45,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ rb = gen_fprp_ptr(a->rb); \ gen_helper_##NAME(cpu_crf[a->bf], \ cpu_env, tcg_constant_i32(a->uim), rb);\ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -63,7 +57,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ ra = gen_fprp_ptr(a->fra); \ gen_helper_##NAME(cpu_crf[a->bf], \ cpu_env, ra, tcg_constant_i32(a->dm)); \ - tcg_temp_free_ptr(ra); \ return true; \ } @@ -81,8 +74,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -100,9 +91,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -118,8 +106,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ return true; \ } @@ -136,8 +122,6 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ if (unlikely(a->rc)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rx); \ return true; \ } @@ -205,8 +189,6 @@ static bool trans_DCFFIXQQ(DisasContext *ctx, arg_DCFFIXQQ *a) rt = gen_fprp_ptr(a->frtp); rb = gen_avr_ptr(a->vrb); gen_helper_DCFFIXQQ(cpu_env, rt, rb); - tcg_temp_free_ptr(rt); - tcg_temp_free_ptr(rb); return true; } @@ -222,8 +204,6 @@ static bool trans_DCTFIXQQ(DisasContext *ctx, arg_DCTFIXQQ *a) rt = gen_avr_ptr(a->vrt); rb = gen_fprp_ptr(a->frbp); gen_helper_DCTFIXQQ(cpu_env, rt, rb); - tcg_temp_free_ptr(rt); - tcg_temp_free_ptr(rb); return true; } diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index 1ba56cbed5..20ea484c3d 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -42,8 +42,6 @@ static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update, if (update) { tcg_gen_mov_tl(cpu_gpr[ra], ea); } - tcg_temp_free(ea); - return true; } @@ -149,7 +147,6 @@ static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop); } } - tcg_temp_free(ea); #else qemu_build_not_reached(); #endif @@ -389,8 +386,6 @@ static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev) if (neg) { tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->rt]); } - tcg_temp_free(temp); - return true; } @@ -437,9 +432,6 @@ static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail) } tcg_gen_ctpop_i64(dst, t0); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a) @@ -519,11 +511,6 @@ static bool trans_ADDG6S(DisasContext *ctx, arg_X *a) tcg_gen_xori_tl(carry, carry, (target_long)carry_bits); tcg_gen_muli_tl(cpu_gpr[a->rt], carry, 6); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(carry); - return true; } @@ -564,9 +551,6 @@ static bool do_hash(DisasContext *ctx, arg_X *a, bool priv, ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt)); helper(cpu_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]); - - tcg_temp_free(ea); - return true; } diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index 8d5cf0f982..d5d88e7d49 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -21,7 +21,6 @@ static void gen_set_cr1_from_fpscr(DisasContext *ctx) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(tmp, cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], tmp, 28); - tcg_temp_free_i32(tmp); } #else static void gen_set_cr1_from_fpscr(DisasContext *ctx) @@ -58,10 +57,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ - tcg_temp_free_i64(t3); \ } #define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ @@ -92,9 +87,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ } #define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ _GEN_FLOAT_AB(name, 0x3F, op2, inval, set_fprf, type); \ @@ -124,9 +116,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ } #define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ _GEN_FLOAT_AC(name, 0x3F, op2, inval, set_fprf, type); \ @@ -154,8 +143,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } #define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ @@ -179,8 +166,6 @@ static void gen_f##name(DisasContext *ctx) \ if (unlikely(Rc(ctx->opcode) != 0)) { \ gen_set_cr1_from_fpscr(ctx); \ } \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } /* fadd - fadds */ @@ -218,8 +203,6 @@ static void gen_frsqrtes(DisasContext *ctx) if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static bool trans_FSEL(DisasContext *ctx, arg_A *a) @@ -242,11 +225,6 @@ static bool trans_FSEL(DisasContext *ctx, arg_A *a) if (a->rc) { gen_set_cr1_from_fpscr(ctx); } - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - return true; } @@ -273,10 +251,6 @@ static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a, if (unlikely(a->rc != 0)) { gen_set_cr1_from_fpscr(ctx); } - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -343,8 +317,6 @@ static void gen_ftdiv(DisasContext *ctx) get_fpr(t0, rA(ctx->opcode)); get_fpr(t1, rB(ctx->opcode)); gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static void gen_ftsqrt(DisasContext *ctx) @@ -357,7 +329,6 @@ static void gen_ftsqrt(DisasContext *ctx) t0 = tcg_temp_new_i64(); get_fpr(t0, rB(ctx->opcode)); gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0); - tcg_temp_free_i64(t0); } @@ -381,10 +352,7 @@ static void gen_fcmpo(DisasContext *ctx) get_fpr(t0, rA(ctx->opcode)); get_fpr(t1, rB(ctx->opcode)); gen_helper_fcmpo(cpu_env, t0, t1, crf); - tcg_temp_free_i32(crf); gen_helper_float_check_status(cpu_env); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* fcmpu */ @@ -404,10 +372,7 @@ static void gen_fcmpu(DisasContext *ctx) get_fpr(t0, rA(ctx->opcode)); get_fpr(t1, rB(ctx->opcode)); gen_helper_fcmpu(cpu_env, t0, t1, crf); - tcg_temp_free_i32(crf); gen_helper_float_check_status(cpu_env); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /*** Floating-point move ***/ @@ -429,8 +394,6 @@ static void gen_fabs(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* fmr - fmr. */ @@ -448,7 +411,6 @@ static void gen_fmr(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); } /* fnabs */ @@ -469,8 +431,6 @@ static void gen_fnabs(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* fneg */ @@ -491,8 +451,6 @@ static void gen_fneg(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* fcpsgn: PowerPC 2.05 specification */ @@ -516,9 +474,6 @@ static void gen_fcpsgn(DisasContext *ctx) if (unlikely(Rc(ctx->opcode))) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static void gen_fmrgew(DisasContext *ctx) @@ -538,9 +493,6 @@ static void gen_fmrgew(DisasContext *ctx) get_fpr(t0, rA(ctx->opcode)); tcg_gen_deposit_i64(t1, t0, b0, 0, 32); set_fpr(rD(ctx->opcode), t1); - tcg_temp_free_i64(b0); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static void gen_fmrgow(DisasContext *ctx) @@ -559,9 +511,6 @@ static void gen_fmrgow(DisasContext *ctx) get_fpr(t1, rA(ctx->opcode)); tcg_gen_deposit_i64(t2, t0, t1, 32, 32); set_fpr(rD(ctx->opcode), t2); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } /*** Floating-Point status & ctrl register ***/ @@ -587,7 +536,6 @@ static void gen_mcrfs(DisasContext *ctx) tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp); tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf); - tcg_temp_free(tmp); tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); /* Only the exception bits (including FX) should be cleared if read */ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, @@ -595,8 +543,6 @@ static void gen_mcrfs(DisasContext *ctx) /* FEX and VX need to be updated, so don't set fpscr directly */ tmask = tcg_const_i32(1 << nibble); gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask); - tcg_temp_free_i32(tmask); - tcg_temp_free_i64(tnew_fpscr); } static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) @@ -608,8 +554,6 @@ static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) tcg_gen_andi_i64(fpscr_masked, fpscr, mask); set_fpr(rt, fpscr_masked); - tcg_temp_free_i64(fpscr_masked); - return fpscr; } @@ -622,24 +566,17 @@ static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask, tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask); tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask); gen_helper_store_fpscr(cpu_env, fpscr_masked, st_mask); - - tcg_temp_free_i64(fpscr_masked); } static bool trans_MFFS(DisasContext *ctx, arg_X_t_rc *a) { - TCGv_i64 fpscr; - REQUIRE_FPU(ctx); gen_reset_fpstatus(); - fpscr = place_from_fpscr(a->rt, UINT64_MAX); + place_from_fpscr(a->rt, UINT64_MAX); if (a->rc) { gen_set_cr1_from_fpscr(ctx); } - - tcg_temp_free_i64(fpscr); - return true; } @@ -653,9 +590,6 @@ static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, UINT64_MAX); store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003); - - tcg_temp_free_i64(fpscr); - return true; } @@ -673,10 +607,6 @@ static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(fpscr); - return true; } @@ -694,10 +624,6 @@ static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(fpscr); - return true; } @@ -714,10 +640,6 @@ static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(fpscr); - return true; } @@ -734,26 +656,16 @@ static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a) gen_reset_fpstatus(); fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(fpscr); - return true; } static bool trans_MFFSL(DisasContext *ctx, arg_X_t *a) { - TCGv_i64 fpscr; - REQUIRE_INSNS_FLAGS2(ctx, ISA300); REQUIRE_FPU(ctx); gen_reset_fpstatus(); - fpscr = place_from_fpscr(a->rt, - FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); - - tcg_temp_free_i64(fpscr); - + place_from_fpscr(a->rt, FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); return true; } @@ -772,7 +684,6 @@ static void gen_mtfsb0(DisasContext *ctx) TCGv_i32 t0; t0 = tcg_const_i32(crb); gen_helper_fpscr_clrbit(cpu_env, t0); - tcg_temp_free_i32(t0); } if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); @@ -795,7 +706,6 @@ static void gen_mtfsb1(DisasContext *ctx) TCGv_i32 t0; t0 = tcg_const_i32(crb); gen_helper_fpscr_setbit(cpu_env, t0); - tcg_temp_free_i32(t0); } if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); @@ -831,14 +741,12 @@ static void gen_mtfsf(DisasContext *ctx) t1 = tcg_temp_new_i64(); get_fpr(t1, rB(ctx->opcode)); gen_helper_store_fpscr(cpu_env, t1, t0); - tcg_temp_free_i32(t0); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } /* We can raise a deferred exception */ gen_helper_fpscr_check_status(cpu_env); - tcg_temp_free_i64(t1); } /* mtfsfi */ @@ -862,8 +770,6 @@ static void gen_mtfsfi(DisasContext *ctx) t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh)); t1 = tcg_const_i32(1 << sh); gen_helper_store_fpscr(cpu_env, t0, t1); - tcg_temp_free_i64(t0); - tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) { tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); @@ -877,7 +783,6 @@ static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); gen_helper_todouble(dest, tmp); - tcg_temp_free_i32(tmp); } /* lfdepx (external PID lfdx) */ @@ -896,8 +801,6 @@ static void gen_lfdepx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ)); set_fpr(rD(ctx->opcode), t0); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* lfdp */ @@ -930,8 +833,6 @@ static void gen_lfdp(DisasContext *ctx) gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(rD(ctx->opcode) + 1, t0); } - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* lfdpx */ @@ -964,8 +865,6 @@ static void gen_lfdpx(DisasContext *ctx) gen_qemu_ld64_i64(ctx, t0, EA); set_fpr(rD(ctx->opcode) + 1, t0); } - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* lfiwax */ @@ -986,9 +885,6 @@ static void gen_lfiwax(DisasContext *ctx) gen_qemu_ld32s(ctx, t0, EA); tcg_gen_ext_tl_i64(t1, t0); set_fpr(rD(ctx->opcode), t1); - tcg_temp_free(EA); - tcg_temp_free(t0); - tcg_temp_free_i64(t1); } /* lfiwzx */ @@ -1006,8 +902,6 @@ static void gen_lfiwzx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); gen_qemu_ld32u_i64(ctx, t0, EA); set_fpr(rD(ctx->opcode), t0); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } #define GEN_STXF(name, stop, opc2, opc3, type) \ @@ -1025,8 +919,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ gen_addr_reg_index(ctx, EA); \ get_fpr(t0, rS(ctx->opcode)); \ gen_qemu_##stop(ctx, t0, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ } static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) @@ -1034,7 +926,6 @@ static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) TCGv_i32 tmp = tcg_temp_new_i32(); gen_helper_tosingle(tmp, src); tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL)); - tcg_temp_free_i32(tmp); } /* stfdepx (external PID lfdx) */ @@ -1053,8 +944,6 @@ static void gen_stfdepx(DisasContext *ctx) gen_addr_reg_index(ctx, EA); get_fpr(t0, rD(ctx->opcode)); tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ)); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* stfdp */ @@ -1087,8 +976,6 @@ static void gen_stfdp(DisasContext *ctx) get_fpr(t0, rD(ctx->opcode) + 1); gen_qemu_st64_i64(ctx, t0, EA); } - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* stfdpx */ @@ -1121,8 +1008,6 @@ static void gen_stfdpx(DisasContext *ctx) get_fpr(t0, rD(ctx->opcode) + 1); gen_qemu_st64_i64(ctx, t0, EA); } - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } /* Optional: */ @@ -1131,7 +1016,6 @@ static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2) TCGv t0 = tcg_temp_new(); tcg_gen_trunc_i64_tl(t0, arg1), gen_qemu_st32(ctx, t0, arg2); - tcg_temp_free(t0); } /* stfiwx */ GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX); @@ -1169,8 +1053,6 @@ static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, if (update) { tcg_gen_mov_tl(cpu_gpr[ra], ea); } - tcg_temp_free_i64(t0); - tcg_temp_free(ea); return true; } diff --git a/target/ppc/translate/spe-impl.c.inc b/target/ppc/translate/spe-impl.c.inc index bd8963db2b..f4a858487d 100644 --- a/target/ppc/translate/spe-impl.c.inc +++ b/target/ppc/translate/spe-impl.c.inc @@ -23,7 +23,6 @@ static inline void gen_evmra(DisasContext *ctx) /* spe_acc := tmp */ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc)); - tcg_temp_free_i64(tmp); /* rD := rA */ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); @@ -96,8 +95,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \ tcg_opi(t0, t0, rB(ctx->opcode)); \ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ } GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32); GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32); @@ -122,8 +119,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \ tcg_op(t0, t0); \ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ } GEN_SPEOP_ARITH1(evabs, tcg_gen_abs_i32); @@ -159,9 +154,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t1, cpu_gprh[rB(ctx->opcode)]); \ tcg_op(t0, t0, t1); \ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) @@ -178,7 +170,6 @@ static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) gen_set_label(l1); tcg_gen_movi_i32(ret, 0); gen_set_label(l2); - tcg_temp_free_i32(t0); } GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu); static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) @@ -195,7 +186,6 @@ static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) gen_set_label(l1); tcg_gen_movi_i32(ret, 0); gen_set_label(l2); - tcg_temp_free_i32(t0); } GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws); static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) @@ -212,7 +202,6 @@ static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) gen_set_label(l1); tcg_gen_movi_i32(ret, 0); gen_set_label(l2); - tcg_temp_free_i32(t0); } GEN_SPEOP_ARITH2(evslw, gen_op_evslw); static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) @@ -220,7 +209,6 @@ static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_andi_i32(t0, arg2, 0x1F); tcg_gen_rotl_i32(ret, arg1, t0); - tcg_temp_free_i32(t0); } GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw); static inline void gen_evmergehi(DisasContext *ctx) @@ -257,8 +245,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rB(ctx->opcode)]); \ tcg_op(t0, t0, rA(ctx->opcode)); \ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ } GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32); GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32); @@ -341,7 +327,6 @@ static inline void gen_evmergelohi(DisasContext *ctx) tcg_gen_mov_tl(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], tmp); - tcg_temp_free(tmp); } else { tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); @@ -394,7 +379,6 @@ static inline void gen_evsel(DisasContext *ctx) gen_set_label(l3); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); gen_set_label(l4); - tcg_temp_free_i32(t0); } static void gen_evsel0(DisasContext *ctx) @@ -456,9 +440,6 @@ static inline void gen_evmwumi(DisasContext *ctx) tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */ - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static inline void gen_evmwumia(DisasContext *ctx) @@ -477,7 +458,6 @@ static inline void gen_evmwumia(DisasContext *ctx) /* acc := rD */ gen_load_gpr64(tmp, rD(ctx->opcode)); tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc)); - tcg_temp_free_i64(tmp); } static inline void gen_evmwumiaa(DisasContext *ctx) @@ -509,9 +489,6 @@ static inline void gen_evmwumiaa(DisasContext *ctx) /* rD := acc */ gen_store_gpr64(rD(ctx->opcode), acc); - - tcg_temp_free_i64(acc); - tcg_temp_free_i64(tmp); } static inline void gen_evmwsmi(DisasContext *ctx) @@ -535,9 +512,6 @@ static inline void gen_evmwsmi(DisasContext *ctx) tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */ - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static inline void gen_evmwsmia(DisasContext *ctx) @@ -556,8 +530,6 @@ static inline void gen_evmwsmia(DisasContext *ctx) /* acc := rD */ gen_load_gpr64(tmp, rD(ctx->opcode)); tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc)); - - tcg_temp_free_i64(tmp); } static inline void gen_evmwsmiaa(DisasContext *ctx) @@ -589,9 +561,6 @@ static inline void gen_evmwsmiaa(DisasContext *ctx) /* rD := acc */ gen_store_gpr64(rD(ctx->opcode), acc); - - tcg_temp_free_i64(acc); - tcg_temp_free_i64(tmp); } GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); //// @@ -644,7 +613,6 @@ static inline void gen_op_evldd(DisasContext *ctx, TCGv addr) TCGv_i64 t0 = tcg_temp_new_i64(); gen_qemu_ld64_i64(ctx, t0, addr); gen_store_gpr64(rD(ctx->opcode), t0); - tcg_temp_free_i64(t0); } static inline void gen_op_evldw(DisasContext *ctx, TCGv addr) @@ -668,7 +636,6 @@ static inline void gen_op_evldh(DisasContext *ctx, TCGv addr) gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr) @@ -678,7 +645,6 @@ static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr) tcg_gen_shli_tl(t0, t0, 16); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr) @@ -687,7 +653,6 @@ static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr) gen_qemu_ld16u(ctx, t0, addr); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr) @@ -696,7 +661,6 @@ static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr) gen_qemu_ld16s(ctx, t0, addr); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr) @@ -707,7 +671,6 @@ static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr) gen_addr_add(ctx, addr, addr, 2); gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16); - tcg_temp_free(t0); } static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr) @@ -730,7 +693,6 @@ static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr) gen_qemu_ld32u(ctx, t0, addr); tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr) @@ -743,7 +705,6 @@ static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr) gen_qemu_ld16u(ctx, t0, addr); tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16); tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0); - tcg_temp_free(t0); } static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr) @@ -751,7 +712,6 @@ static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr) TCGv_i64 t0 = tcg_temp_new_i64(); gen_load_gpr64(t0, rS(ctx->opcode)); gen_qemu_st64_i64(ctx, t0, addr); - tcg_temp_free_i64(t0); } static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr) @@ -771,7 +731,6 @@ static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr) gen_addr_add(ctx, addr, addr, 2); tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16); gen_qemu_st16(ctx, t0, addr); - tcg_temp_free(t0); gen_addr_add(ctx, addr, addr, 2); gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr); } @@ -784,7 +743,6 @@ static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr) gen_addr_add(ctx, addr, addr, 2); tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16); gen_qemu_st16(ctx, t0, addr); - tcg_temp_free(t0); } static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr) @@ -820,7 +778,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ gen_addr_reg_index(ctx, t0); \ } \ gen_op_##name(ctx, t0); \ - tcg_temp_free(t0); \ } GEN_SPEOP_LDST(evldd, 0x00, 3); @@ -923,7 +880,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(t0, cpu_env, t0); \ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \ - tcg_temp_free_i32(t0); \ } #define GEN_SPEFPUOP_CONV_32_64(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -939,8 +895,6 @@ static inline void gen_##name(DisasContext *ctx) \ gen_load_gpr64(t0, rB(ctx->opcode)); \ gen_helper_##name(t1, cpu_env, t0); \ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i32(t1); \ } #define GEN_SPEFPUOP_CONV_64_32(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -956,8 +910,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(t0, cpu_env, t1); \ gen_store_gpr64(rD(ctx->opcode), t0); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i32(t1); \ } #define GEN_SPEFPUOP_CONV_64_64(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -971,7 +923,6 @@ static inline void gen_##name(DisasContext *ctx) \ gen_load_gpr64(t0, rB(ctx->opcode)); \ gen_helper_##name(t0, cpu_env, t0); \ gen_store_gpr64(rD(ctx->opcode), t0); \ - tcg_temp_free_i64(t0); \ } #define GEN_SPEFPUOP_ARITH2_32_32(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -982,9 +933,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(t0, cpu_env, t0, t1); \ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \ - \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } #define GEN_SPEFPUOP_ARITH2_64_64(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -1000,8 +948,6 @@ static inline void gen_##name(DisasContext *ctx) \ gen_load_gpr64(t1, rB(ctx->opcode)); \ gen_helper_##name(t0, cpu_env, t0, t1); \ gen_store_gpr64(rD(ctx->opcode), t0); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } #define GEN_SPEFPUOP_COMP_32(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -1012,9 +958,6 @@ static inline void gen_##name(DisasContext *ctx) \ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \ - \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } #define GEN_SPEFPUOP_COMP_64(name) \ static inline void gen_##name(DisasContext *ctx) \ @@ -1029,8 +972,6 @@ static inline void gen_##name(DisasContext *ctx) \ gen_load_gpr64(t0, rA(ctx->opcode)); \ gen_load_gpr64(t1, rB(ctx->opcode)); \ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } /* Single precision floating-point vectors operations */ diff --git a/target/ppc/translate/storage-ctrl-impl.c.inc b/target/ppc/translate/storage-ctrl-impl.c.inc index 6ea1d22ef9..faa7b04bbc 100644 --- a/target/ppc/translate/storage-ctrl-impl.c.inc +++ b/target/ppc/translate/storage-ctrl-impl.c.inc @@ -212,7 +212,6 @@ static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local) TCGv t0 = tcg_temp_new(); tcg_gen_ext32u_tl(t0, cpu_gpr[rb]); gen_helper_tlbie(cpu_env, t0); - tcg_temp_free(t0); #if defined(TARGET_PPC64) /* @@ -240,7 +239,6 @@ static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local) tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH); tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush)); - tcg_temp_free_i32(t1); return true; #endif diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 2dd17ab106..05ba9c9492 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -45,8 +45,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ gen_qemu_ld64_i64(ctx, avr, EA); \ set_avr64(rD(ctx->opcode), avr, false); \ } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(avr); \ } #define GEN_VR_STX(name, opc2, opc3) \ @@ -80,8 +78,6 @@ static void gen_st##name(DisasContext *ctx) \ get_avr64(avr, rD(ctx->opcode), false); \ gen_qemu_st64_i64(ctx, avr, EA); \ } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(avr); \ } #define GEN_VR_LVE(name, opc2, opc3, size) \ @@ -101,8 +97,6 @@ static void gen_lve##name(DisasContext *ctx) \ } \ rs = gen_avr_ptr(rS(ctx->opcode)); \ gen_helper_lve##name(cpu_env, rs, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_ptr(rs); \ } #define GEN_VR_STVE(name, opc2, opc3, size) \ @@ -122,8 +116,6 @@ static void gen_stve##name(DisasContext *ctx) \ } \ rs = gen_avr_ptr(rS(ctx->opcode)); \ gen_helper_stve##name(cpu_env, rs, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_ptr(rs); \ } GEN_VR_LDX(lvx, 0x07, 0x03); @@ -157,8 +149,6 @@ static void gen_mfvscr(DisasContext *ctx) gen_helper_mfvscr(t, cpu_env); tcg_gen_extu_i32_i64(avr, t); set_avr64(rD(ctx->opcode), avr, false); - tcg_temp_free_i32(t); - tcg_temp_free_i64(avr); } static void gen_mtvscr(DisasContext *ctx) @@ -179,7 +169,6 @@ static void gen_mtvscr(DisasContext *ctx) tcg_gen_ld_i32(val, cpu_env, bofs); gen_helper_mtvscr(cpu_env, val); - tcg_temp_free_i32(val); } #define GEN_VX_VMUL10(name, add_cin, ret_carry) \ @@ -228,13 +217,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ tcg_gen_add_i64(avr, t0, t2); \ set_avr64(rD(ctx->opcode), avr, true); \ } \ - \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ - tcg_temp_free_i64(avr); \ - tcg_temp_free_i64(ten); \ - tcg_temp_free_i64(z); \ } \ GEN_VX_VMUL10(vmul10uq, 0, 0); @@ -279,9 +261,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_TRANS(name, opc2, opc3) \ @@ -306,9 +285,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(cpu_env, rd, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM3(name, opc2, opc3) \ @@ -324,10 +300,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rc = gen_avr_ptr(rC(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, ra, rb, rc); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rc); \ - tcg_temp_free_ptr(rd); \ } /* @@ -400,7 +372,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ rb = gen_avr_ptr(rB(ctx->opcode)); \ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \ - tcg_temp_free_ptr(rb); \ } GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0); @@ -457,9 +428,6 @@ static void trans_vmrgew(DisasContext *ctx) get_avr64(avr, VA, false); tcg_gen_deposit_i64(avr, avr, tmp, 0, 32); set_avr64(VT, avr, false); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(avr); } static void trans_vmrgow(DisasContext *ctx) @@ -480,10 +448,6 @@ static void trans_vmrgow(DisasContext *ctx) get_avr64(t1, VA, false); tcg_gen_deposit_i64(avr, t0, t1, 32, 32); set_avr64(VT, avr, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(avr); } /* @@ -518,10 +482,6 @@ static void trans_lvsl(DisasContext *ctx) */ tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL); set_avr64(VT, result, false); - - tcg_temp_free_i64(result); - tcg_temp_free_i64(sh); - tcg_temp_free(EA); } /* @@ -557,10 +517,6 @@ static void trans_lvsr(DisasContext *ctx) */ tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh); set_avr64(VT, result, false); - - tcg_temp_free_i64(result); - tcg_temp_free_i64(sh); - tcg_temp_free(EA); } /* @@ -603,11 +559,6 @@ static void trans_vsl(DisasContext *ctx) tcg_gen_shl_i64(avr, avr, sh); tcg_gen_or_i64(avr, avr, carry); set_avr64(VT, avr, true); - - tcg_temp_free_i64(avr); - tcg_temp_free_i64(sh); - tcg_temp_free_i64(carry); - tcg_temp_free_i64(tmp); } /* @@ -649,11 +600,6 @@ static void trans_vsr(DisasContext *ctx) tcg_gen_shr_i64(avr, avr, sh); tcg_gen_or_i64(avr, avr, carry); set_avr64(VT, avr, false); - - tcg_temp_free_i64(avr); - tcg_temp_free_i64(sh); - tcg_temp_free_i64(carry); - tcg_temp_free_i64(tmp); } /* @@ -722,13 +668,6 @@ static void trans_vgbbd(DisasContext *ctx) for (j = 0; j < 2; j++) { set_avr64(VT, result[j], j); } - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(tcg_mask); - tcg_temp_free_i64(result[0]); - tcg_temp_free_i64(result[1]); - tcg_temp_free_i64(avr[0]); - tcg_temp_free_i64(avr[1]); } /* @@ -753,8 +692,6 @@ static void trans_vclzw(DisasContext *ctx) tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4); } - - tcg_temp_free_i32(tmp); } /* @@ -779,8 +716,6 @@ static void trans_vclzd(DisasContext *ctx) get_avr64(avr, VB, false); tcg_gen_clzi_i64(avr, avr, 64); set_avr64(VT, avr, false); - - tcg_temp_free_i64(avr); } GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2); @@ -849,9 +784,6 @@ static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb) /* negate the mask */ tcg_gen_xor_vec(vece, t0, t0, t2); - tcg_temp_free_vec(t1); - tcg_temp_free_vec(t2); - return t0; } @@ -870,9 +802,6 @@ static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra, /* Rotate and mask */ tcg_gen_rotlv_vec(vece, vrt, vra, n); tcg_gen_and_vec(vece, vrt, vrt, mask); - - tcg_temp_free_vec(n); - tcg_temp_free_vec(mask); } static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece) @@ -926,10 +855,6 @@ static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra, /* Rotate and insert */ tcg_gen_rotlv_vec(vece, tmp, vra, n); tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt); - - tcg_temp_free_vec(n); - tcg_temp_free_vec(tmp); - tcg_temp_free_vec(mask); } static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece) @@ -1024,13 +949,6 @@ static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right, } tcg_gen_or_i64(hi, hi, lo); set_avr64(a->vrt, hi, !right); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(n); - return true; } @@ -1083,11 +1001,6 @@ static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e) tcg_gen_xor_i64(mh, mh, t0); tcg_gen_xor_i64(ml, ml, t0); - - tcg_temp_free_i64(th); - tcg_temp_free_i64(tl); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask, @@ -1149,14 +1062,6 @@ static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask, set_avr64(a->vrt, t0, true); set_avr64(a->vrt, t1, false); - - tcg_temp_free_i64(ah); - tcg_temp_free_i64(al); - tcg_temp_free_i64(vrb); - tcg_temp_free_i64(n); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -1174,7 +1079,6 @@ static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \ glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \ tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \ tcg_gen_or_vec(VECE, sat, sat, x); \ - tcg_temp_free_vec(x); \ } \ static void glue(gen_, NAME)(DisasContext *ctx) \ { \ @@ -1266,9 +1170,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##opname(cpu_env, rd, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXRFORM(name, opc2, opc3) \ @@ -1325,10 +1226,6 @@ static void do_vcmp_rc(int vrt) tcg_gen_or_i64(tmp, set, clr); tcg_gen_extrl_i64_i32(cpu_crf[6], tmp); - - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(set); - tcg_temp_free_i64(clr); } static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece) @@ -1377,9 +1274,6 @@ static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b) tcg_gen_or_vec(vece, t, t, t0); tcg_gen_or_vec(vece, t, t, t1); - - tcg_temp_free_vec(t0); - tcg_temp_free_vec(t1); } static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece) @@ -1453,11 +1347,6 @@ static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a) tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa); tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2); } - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - return true; } @@ -1489,11 +1378,6 @@ static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign) tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa); tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2); } - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - return true; } @@ -1536,9 +1420,6 @@ static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign) tcg_gen_br(done); gen_set_label(done); - tcg_temp_free_i64(vra); - tcg_temp_free_i64(vrb); - return true; } @@ -1581,8 +1462,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, rb); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ @@ -1597,8 +1476,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(cpu_env, rd, rb); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \ @@ -1612,8 +1489,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, rb); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \ @@ -1626,7 +1501,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ rb = gen_avr_ptr(rB(ctx->opcode)); \ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \ - tcg_temp_free_ptr(rb); \ } GEN_VXFORM_NOA(vupkhsb, 7, 8); GEN_VXFORM_NOA(vupkhsh, 7, 9); @@ -1655,7 +1529,6 @@ static void gen_vprtyb_vec(unsigned vece, TCGv_vec t, TCGv_vec b) tcg_gen_xor_vec(vece, b, tmp, b); } tcg_gen_and_vec(vece, t, b, tcg_constant_vec_matching(t, vece, 1)); - tcg_temp_free_vec(tmp); } /* vprtybw */ @@ -1750,9 +1623,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(cpu_env, rd, rb, uimm); \ - tcg_temp_free_i32(uimm); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } #define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \ @@ -1773,9 +1643,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ rb = gen_avr_ptr(rB(ctx->opcode)); \ rd = gen_avr_ptr(rD(ctx->opcode)); \ gen_helper_##name(rd, rb, t0); \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ } GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8); @@ -1922,12 +1789,6 @@ static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a) tcg_gen_shri_i64(lo, lo, nbits); tcg_gen_or_i64(hi, hi, lo); tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -1950,11 +1811,6 @@ static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right, tcg_gen_subfi_tl(rc, 32 - size, rc); } gen_helper(cpu_env, vrt, vra, vrb, rc); - - tcg_temp_free_ptr(vrt); - tcg_temp_free_ptr(vra); - tcg_temp_free_ptr(vrb); - tcg_temp_free(rc); return true; } @@ -1983,31 +1839,22 @@ static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, } gen_helper(cpu_env, t, rb, idx); - - tcg_temp_free_ptr(t); - tcg_temp_free(idx); - return true; } static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) { - bool ok; TCGv_i64 val; val = tcg_temp_new_i64(); get_avr64(val, vrb, true); - ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper); - - tcg_temp_free_i64(val); - return ok; + return do_vinsx(ctx, vrt, size, right, ra, val, gen_helper); } static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) { - bool ok; TCGv_i64 val; REQUIRE_INSNS_FLAGS2(ctx, ISA310); @@ -2016,10 +1863,7 @@ static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, val = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); - ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper); - - tcg_temp_free_i64(val); - return ok; + return do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper); } static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, @@ -2035,7 +1879,6 @@ static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) { - bool ok; TCGv_i64 val; REQUIRE_INSNS_FLAGS2(ctx, ISA310); @@ -2059,11 +1902,8 @@ static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, val = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); - ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val, - gen_helper); - - tcg_temp_free_i64(val); - return ok; + return do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val, + gen_helper); } static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, @@ -2122,10 +1962,6 @@ static void gen_vsldoi(DisasContext *ctx) rd = gen_avr_ptr(rD(ctx->opcode)); sh = tcg_const_i32(VSH(ctx->opcode)); gen_helper_vsldoi(rd, ra, rb, sh); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rb); - tcg_temp_free_ptr(rd); - tcg_temp_free_i32(sh); } static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a) @@ -2148,16 +1984,10 @@ static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a) tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh); tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh); - - tcg_temp_free_i64(t2); } set_avr64(a->vrt, t0, true); set_avr64(a->vrt, t1, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -2181,16 +2011,10 @@ static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a) tcg_gen_extract2_i64(t0, t0, t1, a->sh); tcg_gen_extract2_i64(t1, t1, t2, a->sh); - - tcg_temp_free_i64(t2); } set_avr64(a->vrt, t0, false); set_avr64(a->vrt, t1, true); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -2223,8 +2047,6 @@ static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a) tcg_gen_sari_i64(tmp, tmp, 63); set_avr64(a->vrt, tmp, false); set_avr64(a->vrt, tmp, true); - - tcg_temp_free_i64(tmp); return true; } @@ -2278,12 +2100,6 @@ static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) tcg_gen_shri_i64(hi, hi, 64 - elem_count_half); tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half); tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -2304,9 +2120,6 @@ static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a) get_avr64(tmp, a->vrb, true); tcg_gen_shri_i64(tmp, tmp, 63); tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp); - - tcg_temp_free_i64(tmp); - return true; } @@ -2367,12 +2180,6 @@ static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece) set_avr64(a->vrt, lo, false); set_avr64(a->vrt, hi, true); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - return true; } @@ -2394,9 +2201,6 @@ static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a) tcg_gen_sextract_i64(tmp, tmp, 0, 1); set_avr64(a->vrt, tmp, false); set_avr64(a->vrt, tmp, true); - - tcg_temp_free_i64(tmp); - return true; } @@ -2445,10 +2249,6 @@ static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece) tcg_gen_shli_i64(rt, rt, TARGET_LONG_BITS - 8 + vece); tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], rt); - - tcg_temp_free_i64(vrb); - tcg_temp_free_i64(rt); - return true; } @@ -2473,12 +2273,7 @@ static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a, } else { TCGv_i32 discard = tcg_temp_new_i32(); gen_helper(discard, vrt, vrb); - tcg_temp_free_i32(discard); } - - tcg_temp_free_ptr(vrt); - tcg_temp_free_ptr(vrb); - return true; } @@ -2531,12 +2326,6 @@ static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right) get_avr64(tmp, a->vra, false); tcg_gen_and_i64(tmp, tmp, ml); set_avr64(a->vrt, tmp, false); - - tcg_temp_free_i64(rb); - tcg_temp_free_i64(mh); - tcg_temp_free_i64(ml); - tcg_temp_free_i64(tmp); - return true; } @@ -2560,10 +2349,6 @@ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ } else { \ gen_helper_##name0(cpu_env, rd, ra, rb, rc); \ } \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rc); \ - tcg_temp_free_ptr(rd); \ } GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23) @@ -2579,11 +2364,6 @@ static bool do_va_helper(DisasContext *ctx, arg_VA *a, vrb = gen_avr_ptr(a->vrb); vrc = gen_avr_ptr(a->rc); gen_helper(vrt, vra, vrb, vrc); - tcg_temp_free_ptr(vrt); - tcg_temp_free_ptr(vra); - tcg_temp_free_ptr(vrb); - tcg_temp_free_ptr(vrc); - return true; } @@ -2654,11 +2434,6 @@ static bool do_va_env_helper(DisasContext *ctx, arg_VA *a, vrb = gen_avr_ptr(a->vrb); vrc = gen_avr_ptr(a->rc); gen_helper(cpu_env, vrt, vra, vrb, vrc); - tcg_temp_free_ptr(vrt); - tcg_temp_free_ptr(vra); - tcg_temp_free_ptr(vrb); - tcg_temp_free_ptr(vrc); - return true; } @@ -2751,8 +2526,6 @@ static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a) set_avr64(a->vrt, tmp, false); tcg_gen_sari_i64(tmp, tmp, 63); set_avr64(a->vrt, tmp, true); - - tcg_temp_free_i64(tmp); return true; } @@ -2799,11 +2572,6 @@ static void gen_##op(DisasContext *ctx) \ ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ \ gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \ - \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_i32(ps); \ } #define GEN_BCD2(op) \ @@ -2823,10 +2591,6 @@ static void gen_##op(DisasContext *ctx) \ ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \ \ gen_helper_##op(cpu_crf[6], rd, rb, ps); \ - \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_i32(ps); \ } GEN_BCD(bcdadd) @@ -2933,8 +2697,6 @@ static void gen_vsbox(DisasContext *ctx) ra = gen_avr_ptr(rA(ctx->opcode)); rd = gen_avr_ptr(rD(ctx->opcode)); gen_helper_vsbox(rd, ra); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rd); } GEN_VXFORM(vcipher, 4, 20) @@ -2960,9 +2722,6 @@ static void gen_##op(DisasContext *ctx) \ rd = gen_avr_ptr(rD(ctx->opcode)); \ st_six = tcg_const_i32(rB(ctx->opcode)); \ gen_helper_##op(rd, ra, st_six); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_i32(st_six); \ } VSHASIGMA(vshasigmaw) @@ -3077,12 +2836,6 @@ static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a) set_avr64(a->vrt, rl, false); set_avr64(a->vrt, rh, true); - - tcg_temp_free_i64(rl); - tcg_temp_free_i64(rh); - tcg_temp_free_i64(src1); - tcg_temp_free_i64(src2); - return true; } @@ -3128,14 +2881,6 @@ static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a) /* Discard 64 more bits to complete the CHOP128(temp >> 128) */ set_avr64(a->vrt, tmp0, false); set_avr64(a->vrt, zero, true); - - tcg_temp_free_i64(tmp0); - tcg_temp_free_i64(tmp1); - tcg_temp_free_i64(prod1h); - tcg_temp_free_i64(prod1l); - tcg_temp_free_i64(prod0h); - tcg_temp_free_i64(prod0l); - return true; } @@ -3149,10 +2894,6 @@ static bool do_vx_helper(DisasContext *ctx, arg_VX *a, rb = gen_avr_ptr(a->vrb); rd = gen_avr_ptr(a->vrt); gen_helper(rd, ra, rb); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rb); - tcg_temp_free_ptr(rd); - return true; } @@ -3237,12 +2978,6 @@ static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even, gen_mul(vrt0, vrt1, vra, vrb); set_avr64(a->vrt, vrt0, false); set_avr64(a->vrt, vrt1, true); - - tcg_temp_free_i64(vra); - tcg_temp_free_i64(vrb); - tcg_temp_free_i64(vrt0); - tcg_temp_free_i64(vrt1); - return true; } @@ -3302,10 +3037,6 @@ static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign) tcg_gen_shri_i64(lh, lh, 32); tcg_gen_deposit_i64(t, hh, lh, 0, 32); - - tcg_temp_free_i64(hh); - tcg_temp_free_i64(lh); - tcg_temp_free_i64(temp); } static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign) @@ -3318,8 +3049,6 @@ static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign) } else { tcg_gen_mulu2_i64(tlow, t, a, b); } - - tcg_temp_free_i64(tlow); } static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign, @@ -3344,13 +3073,7 @@ static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign, set_avr64(a->vrt, vrt, i); } - - tcg_temp_free_i64(vra); - tcg_temp_free_i64(vrb); - tcg_temp_free_i64(vrt); - return true; - } TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64) @@ -3368,7 +3091,6 @@ static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, gen_shr_vec(vece, b, b, 1); tcg_gen_add_vec(vece, t, a, b); tcg_gen_add_vec(vece, t, t, tmp); - tcg_temp_free_vec(tmp); } QEMU_FLATTEN @@ -3538,8 +3260,6 @@ static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \ tcg_gen_movi_i32(t1, 0); \ tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b); \ DIV(t, a, b); \ - tcg_temp_free_i32(t0); \ - tcg_temp_free_i32(t1); \ } #define DIVU64(NAME, DIV) \ @@ -3564,8 +3284,6 @@ static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \ tcg_gen_movi_i64(t1, 0); \ tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b); \ DIV(t, a, b); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } DIVS32(do_divsw, tcg_gen_div_i32) @@ -3596,9 +3314,6 @@ static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) /* if quotient doesn't fit in 32 bits the result is undefined */ tcg_gen_extrl_i64_i32(t, val1); - - tcg_temp_free_i64(val1); - tcg_temp_free_i64(val2); } static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) @@ -3617,9 +3332,6 @@ static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) /* if quotient doesn't fit in 32 bits the result is undefined */ tcg_gen_extrl_i64_i32(t, val1); - - tcg_temp_free_i64(val1); - tcg_temp_free_i64(val2); } DIVS32(do_divesw, do_dives_i32) diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index 4deb29ee42..6e63403727 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -40,8 +40,6 @@ static void gen_##name(DisasContext *ctx) \ gen_qemu_##operation(ctx, t0, EA); \ set_cpu_vsr(xT(ctx->opcode), t0, true); \ /* NOTE: cpu_vsrl is undefined */ \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ } VSX_LOAD_SCALAR(lxsdx, ld64_i64) @@ -68,8 +66,6 @@ static void gen_lxvd2x(DisasContext *ctx) tcg_gen_addi_tl(EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); set_cpu_vsr(xT(ctx->opcode), t0, false); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } static void gen_lxvw4x(DisasContext *ctx) @@ -99,8 +95,6 @@ static void gen_lxvw4x(DisasContext *ctx) tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ); tcg_gen_shri_i64(t1, t0, 32); tcg_gen_deposit_i64(xtl, t1, t0, 32, 32); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } else { tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); @@ -108,9 +102,6 @@ static void gen_lxvw4x(DisasContext *ctx) } set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - tcg_temp_free(EA); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } static void gen_lxvwsx(DisasContext *ctx) @@ -138,9 +129,6 @@ static void gen_lxvwsx(DisasContext *ctx) data = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL)); tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); - - tcg_temp_free(EA); - tcg_temp_free_i32(data); } static void gen_lxvdsx(DisasContext *ctx) @@ -161,9 +149,6 @@ static void gen_lxvdsx(DisasContext *ctx) data = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ)); tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data); - - tcg_temp_free(EA); - tcg_temp_free_i64(data); } static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl, @@ -186,10 +171,6 @@ static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl, tcg_gen_shri_i64(t1, inl, 8); tcg_gen_and_i64(t1, t1, mask); tcg_gen_or_i64(outl, t0, t1); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(mask); } static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl, @@ -204,10 +185,8 @@ static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl, tcg_gen_deposit_i64(outh, outh, hi, 32, 32); tcg_gen_shri_i64(outl, lo, 32); tcg_gen_deposit_i64(outl, outl, lo, 32, 32); - - tcg_temp_free_i64(hi); - tcg_temp_free_i64(lo); } + static void gen_lxvh8x(DisasContext *ctx) { TCGv EA; @@ -232,9 +211,6 @@ static void gen_lxvh8x(DisasContext *ctx) } set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - tcg_temp_free(EA); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } static void gen_lxvb16x(DisasContext *ctx) @@ -257,9 +233,6 @@ static void gen_lxvb16x(DisasContext *ctx) tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - tcg_temp_free(EA); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } #ifdef TARGET_PPC64 @@ -285,8 +258,6 @@ static void gen_##name(DisasContext *ctx) \ gen_set_access_type(ctx, ACCESS_INT); \ gen_addr_register(ctx, EA); \ gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \ - tcg_temp_free(EA); \ - tcg_temp_free_ptr(xt); \ } VSX_VECTOR_LOAD_STORE_LENGTH(lxvl) @@ -310,8 +281,6 @@ static void gen_##name(DisasContext *ctx) \ gen_addr_reg_index(ctx, EA); \ get_cpu_vsr(t0, xS(ctx->opcode), true); \ gen_qemu_##operation(ctx, t0, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ } VSX_STORE_SCALAR(stxsdx, st64_i64) @@ -338,8 +307,6 @@ static void gen_stxvd2x(DisasContext *ctx) tcg_gen_addi_tl(EA, EA, 8); get_cpu_vsr(t0, xS(ctx->opcode), false); gen_qemu_st64_i64(ctx, t0, EA); - tcg_temp_free(EA); - tcg_temp_free_i64(t0); } static void gen_stxvw4x(DisasContext *ctx) @@ -370,16 +337,11 @@ static void gen_stxvw4x(DisasContext *ctx) tcg_gen_shri_i64(t0, xsl, 32); tcg_gen_deposit_i64(t1, t0, xsl, 32, 32); tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } else { tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); } - tcg_temp_free(EA); - tcg_temp_free_i64(xsh); - tcg_temp_free_i64(xsl); } static void gen_stxvh8x(DisasContext *ctx) @@ -407,16 +369,11 @@ static void gen_stxvh8x(DisasContext *ctx) tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ); - tcg_temp_free_i64(outh); - tcg_temp_free_i64(outl); } else { tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); } - tcg_temp_free(EA); - tcg_temp_free_i64(xsh); - tcg_temp_free_i64(xsl); } static void gen_stxvb16x(DisasContext *ctx) @@ -439,9 +396,6 @@ static void gen_stxvb16x(DisasContext *ctx) tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ); - tcg_temp_free(EA); - tcg_temp_free_i64(xsh); - tcg_temp_free_i64(xsl); } static void gen_mfvsrwz(DisasContext *ctx) @@ -462,8 +416,6 @@ static void gen_mfvsrwz(DisasContext *ctx) get_cpu_vsr(xsh, xS(ctx->opcode), true); tcg_gen_ext32u_i64(tmp, xsh); tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp); - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(xsh); } static void gen_mtvsrwa(DisasContext *ctx) @@ -484,8 +436,6 @@ static void gen_mtvsrwa(DisasContext *ctx) tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_i64(xsh, tmp); set_cpu_vsr(xT(ctx->opcode), xsh, true); - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(xsh); } static void gen_mtvsrwz(DisasContext *ctx) @@ -506,8 +456,6 @@ static void gen_mtvsrwz(DisasContext *ctx) tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32u_i64(xsh, tmp); set_cpu_vsr(xT(ctx->opcode), xsh, true); - tcg_temp_free_i64(tmp); - tcg_temp_free_i64(xsh); } #if defined(TARGET_PPC64) @@ -528,7 +476,6 @@ static void gen_mfvsrd(DisasContext *ctx) t0 = tcg_temp_new_i64(); get_cpu_vsr(t0, xS(ctx->opcode), true); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); - tcg_temp_free_i64(t0); } static void gen_mtvsrd(DisasContext *ctx) @@ -548,7 +495,6 @@ static void gen_mtvsrd(DisasContext *ctx) t0 = tcg_temp_new_i64(); tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); set_cpu_vsr(xT(ctx->opcode), t0, true); - tcg_temp_free_i64(t0); } static void gen_mfvsrld(DisasContext *ctx) @@ -568,7 +514,6 @@ static void gen_mfvsrld(DisasContext *ctx) t0 = tcg_temp_new_i64(); get_cpu_vsr(t0, xS(ctx->opcode), false); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); - tcg_temp_free_i64(t0); } static void gen_mtvsrdd(DisasContext *ctx) @@ -596,7 +541,6 @@ static void gen_mtvsrdd(DisasContext *ctx) tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]); set_cpu_vsr(xT(ctx->opcode), t0, false); - tcg_temp_free_i64(t0); } static void gen_mtvsrws(DisasContext *ctx) @@ -619,7 +563,6 @@ static void gen_mtvsrws(DisasContext *ctx) cpu_gpr[rA(ctx->opcode)], 32, 32); set_cpu_vsr(xT(ctx->opcode), t0, false); set_cpu_vsr(xT(ctx->opcode), t0, true); - tcg_temp_free_i64(t0); } #endif @@ -666,14 +609,11 @@ static void glue(gen_, name)(DisasContext *ctx) \ tcg_gen_and_i64(xa, xa, sgm); \ tcg_gen_andc_i64(xb, xb, sgm); \ tcg_gen_or_i64(xb, xb, xa); \ - tcg_temp_free_i64(xa); \ break; \ } \ } \ set_cpu_vsr(xT(ctx->opcode), xb, true); \ set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \ - tcg_temp_free_i64(xb); \ - tcg_temp_free_i64(sgm); \ } VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP) @@ -717,15 +657,10 @@ static void glue(gen_, name)(DisasContext *ctx) \ tcg_gen_and_i64(xah, tmp, sgm); \ tcg_gen_andc_i64(xbh, xbh, sgm); \ tcg_gen_or_i64(xbh, xbh, xah); \ - tcg_temp_free_i64(xah); \ break; \ } \ set_cpu_vsr(xt, xbh, true); \ set_cpu_vsr(xt, xbl, false); \ - tcg_temp_free_i64(xbl); \ - tcg_temp_free_i64(xbh); \ - tcg_temp_free_i64(sgm); \ - tcg_temp_free_i64(tmp); \ } VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP) @@ -870,11 +805,7 @@ static void gen_##name(DisasContext *ctx) \ } else { \ ignored = tcg_temp_new_i32(); \ gen_helper_##name(ignored, cpu_env, xt, xa, xb); \ - tcg_temp_free_i32(ignored); \ } \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX) @@ -899,10 +830,6 @@ static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a) xt = gen_avr_ptr(a->rt); xb = gen_avr_ptr(a->rb); gen_helper_XSCVQPDP(cpu_env, ro, xt, xb); - tcg_temp_free_i32(ro); - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xb); - return true; } @@ -917,9 +844,6 @@ static bool do_helper_env_X_tb(DisasContext *ctx, arg_X_tb *a, xt = gen_avr_ptr(a->rt); xb = gen_avr_ptr(a->rb); gen_helper(cpu_env, xt, xb); - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xb); - return true; } @@ -938,7 +862,6 @@ static void gen_##name(DisasContext *ctx) \ } \ opc = tcg_const_i32(ctx->opcode); \ gen_helper_##name(cpu_env, opc); \ - tcg_temp_free_i32(opc); \ } #define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \ @@ -953,9 +876,6 @@ static void gen_##name(DisasContext *ctx) \ xa = gen_vsr_ptr(xA(ctx->opcode)); \ xb = gen_vsr_ptr(xB(ctx->opcode)); \ gen_helper_##name(cpu_env, xt, xa, xb); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \ @@ -969,8 +889,6 @@ static void gen_##name(DisasContext *ctx) \ xt = gen_vsr_ptr(xT(ctx->opcode)); \ xb = gen_vsr_ptr(xB(ctx->opcode)); \ gen_helper_##name(cpu_env, xt, xb); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \ @@ -986,9 +904,6 @@ static void gen_##name(DisasContext *ctx) \ xa = gen_vsr_ptr(xA(ctx->opcode)); \ xb = gen_vsr_ptr(xB(ctx->opcode)); \ gen_helper_##name(cpu_env, opc, xa, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \ @@ -1003,8 +918,6 @@ static void gen_##name(DisasContext *ctx) \ opc = tcg_const_i32(ctx->opcode); \ xb = gen_vsr_ptr(xB(ctx->opcode)); \ gen_helper_##name(cpu_env, opc, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \ @@ -1021,10 +934,6 @@ static void gen_##name(DisasContext *ctx) \ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ gen_helper_##name(cpu_env, opc, xt, xa, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \ @@ -1040,9 +949,6 @@ static void gen_##name(DisasContext *ctx) \ xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ gen_helper_##name(cpu_env, opc, xt, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \ @@ -1058,9 +964,6 @@ static void gen_##name(DisasContext *ctx) \ xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \ xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \ gen_helper_##name(cpu_env, opc, xa, xb); \ - tcg_temp_free_i32(opc); \ - tcg_temp_free_ptr(xa); \ - tcg_temp_free_ptr(xb); \ } #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \ @@ -1078,8 +981,6 @@ static void gen_##name(DisasContext *ctx) \ gen_helper_##name(t1, cpu_env, t0); \ set_cpu_vsr(xT(ctx->opcode), t1, true); \ set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ } GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX) @@ -1291,8 +1192,6 @@ static bool do_XX2_bf_uim(DisasContext *ctx, arg_XX2_bf_uim *a, bool vsr, REQUIRE_VSX(ctx); xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb); gen_helper(cpu_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb); - tcg_temp_free_ptr(xb); - return true; } @@ -1314,9 +1213,6 @@ bool trans_XSCVSPDPN(DisasContext *ctx, arg_XX2 *a) set_cpu_vsr(a->xt, tmp, true); set_cpu_vsr(a->xt, tcg_constant_i64(0), false); - - tcg_temp_free_i64(tmp); - return true; } @@ -1413,11 +1309,6 @@ static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a) xb = gen_vsr_ptr(a->xb); gen_helper_VPERM(xt, xa, xt, xb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); - return true; } @@ -1433,11 +1324,6 @@ static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a) xb = gen_vsr_ptr(a->xb); gen_helper_VPERMR(xt, xa, xt, xb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); - return true; } @@ -1458,8 +1344,6 @@ static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a) set_cpu_vsr(a->xt, t0, true); set_cpu_vsr(a->xt, t1, false); - - tcg_temp_free_i64(t1); } else { get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0); set_cpu_vsr(a->xt, t0, true); @@ -1467,9 +1351,6 @@ static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a) get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0); set_cpu_vsr(a->xt, t0, false); } - - tcg_temp_free_i64(t0); - return true; } @@ -1486,12 +1367,6 @@ static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a) xc = gen_vsr_ptr(a->xc); gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3)); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); - tcg_temp_free_ptr(xc); - return true; } @@ -1514,10 +1389,6 @@ static bool do_xxgenpcv(DisasContext *ctx, arg_X_imm5 *a, vrb = gen_avr_ptr(a->vrb); fn[a->imm](xt, vrb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(vrb); - return true; } @@ -1550,12 +1421,6 @@ static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3, s3 = gen_vsr_ptr(src3); gen_helper(cpu_env, t, s1, s2, s3); - - tcg_temp_free_ptr(t); - tcg_temp_free_ptr(s1); - tcg_temp_free_ptr(s2); - tcg_temp_free_ptr(s3); - return true; } @@ -1636,10 +1501,6 @@ static void gen_##name(DisasContext *ctx) \ s3 = gen_vsr_ptr(xB(ctx->opcode)); \ } \ gen_helper_##name(cpu_env, xt, s1, s2, s3); \ - tcg_temp_free_ptr(xt); \ - tcg_temp_free_ptr(s1); \ - tcg_temp_free_ptr(s2); \ - tcg_temp_free_ptr(s3); \ } GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX) @@ -1673,11 +1534,6 @@ static void gen_xxbrd(DisasContext *ctx) tcg_gen_bswap64_i64(xtl, xbl); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xxbrh(DisasContext *ctx) @@ -1701,11 +1557,6 @@ static void gen_xxbrh(DisasContext *ctx) gen_bswap16x8(xth, xtl, xbh, xbl); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xxbrq(DisasContext *ctx) @@ -1733,12 +1584,6 @@ static void gen_xxbrq(DisasContext *ctx) set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_gen_mov_i64(xth, t0); set_cpu_vsr(xT(ctx->opcode), xth, true); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xxbrw(DisasContext *ctx) @@ -1762,11 +1607,6 @@ static void gen_xxbrw(DisasContext *ctx) gen_bswap32x4(xth, xtl, xbh, xbl); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } #define VSX_LOGICAL(name, vece, tcg_op) \ @@ -1813,11 +1653,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ set_cpu_vsr(xT(ctx->opcode), tmp, true); \ tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \ set_cpu_vsr(xT(ctx->opcode), tmp, false); \ - tcg_temp_free_i64(a0); \ - tcg_temp_free_i64(a1); \ - tcg_temp_free_i64(b0); \ - tcg_temp_free_i64(b1); \ - tcg_temp_free_i64(tmp); \ } VSX_XXMRG(xxmrghw, 1) @@ -1974,13 +1809,6 @@ static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a) tcg_gen_or_i64(t0, all_false, all_true); tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0); - - tcg_temp_free_i64(xb); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(all_true); - tcg_temp_free_i64(all_false); - return true; } @@ -2012,7 +1840,6 @@ static void gen_xxsldwi(DisasContext *ctx) get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); - tcg_temp_free_i64(t0); break; } case 2: { @@ -2032,16 +1859,12 @@ static void gen_xxsldwi(DisasContext *ctx) get_cpu_vsr(t0, xB(ctx->opcode), false); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); - tcg_temp_free_i64(t0); break; } } set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a, @@ -2064,10 +1887,7 @@ static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a, xt = gen_vsr_ptr(a->xt); xb = gen_vsr_ptr(a->xb); gen_helper(xt, xb, tcg_constant_i32(a->uim)); - tcg_temp_free_ptr(xb); - tcg_temp_free_ptr(xt); } - return true; } @@ -2086,7 +1906,6 @@ static void gen_xsxexpdp(DisasContext *ctx) t0 = tcg_temp_new_i64(); get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_extract_i64(rt, t0, 52, 11); - tcg_temp_free_i64(t0); } static void gen_xsxexpqp(DisasContext *ctx) @@ -2108,10 +1927,6 @@ static void gen_xsxexpqp(DisasContext *ctx) set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_movi_i64(xtl, 0); set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); - - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); } static void gen_xsiexpdp(DisasContext *ctx) @@ -2133,8 +1948,6 @@ static void gen_xsiexpdp(DisasContext *ctx) tcg_gen_or_i64(xth, xth, t0); set_cpu_vsr(xT(ctx->opcode), xth, true); set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(xth); } static void gen_xsiexpqp(DisasContext *ctx) @@ -2167,13 +1980,6 @@ static void gen_xsiexpqp(DisasContext *ctx) set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xal); set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xah); - tcg_temp_free_i64(xal); - tcg_temp_free_i64(xbh); } static void gen_xsxsigdp(DisasContext *ctx) @@ -2198,12 +2004,6 @@ static void gen_xsxsigdp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_deposit_i64(rt, t0, t1, 0, 52); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(exp); - tcg_temp_free_i64(zr); - tcg_temp_free_i64(nan); } static void gen_xsxsigqp(DisasContext *ctx) @@ -2237,15 +2037,6 @@ static void gen_xsxsigqp(DisasContext *ctx) set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xbl); set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(exp); - tcg_temp_free_i64(zr); - tcg_temp_free_i64(nan); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } #endif @@ -2285,14 +2076,6 @@ static void gen_xviexpsp(DisasContext *ctx) tcg_gen_shli_i64(t0, t0, 23); tcg_gen_or_i64(xtl, xtl, t0); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xah); - tcg_temp_free_i64(xal); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xviexpdp(DisasContext *ctx) @@ -2324,13 +2107,6 @@ static void gen_xviexpdp(DisasContext *ctx) tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xah); - tcg_temp_free_i64(xal); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xvxexpsp(DisasContext *ctx) @@ -2357,11 +2133,6 @@ static void gen_xvxexpsp(DisasContext *ctx) tcg_gen_shri_i64(xtl, xbl, 23); tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static void gen_xvxexpdp(DisasContext *ctx) @@ -2386,11 +2157,6 @@ static void gen_xvxexpdp(DisasContext *ctx) set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_extract_i64(xtl, xbl, 52, 11); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a) @@ -2404,10 +2170,6 @@ static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a) b = gen_vsr_ptr(a->xb); gen_helper_XVXSIGSP(t, b); - - tcg_temp_free_ptr(t); - tcg_temp_free_ptr(b); - return true; } @@ -2447,15 +2209,6 @@ static void gen_xvxsigdp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52); set_cpu_vsr(xT(ctx->opcode), xtl, false); - - tcg_temp_free_i64(t0); - tcg_temp_free_i64(exp); - tcg_temp_free_i64(zr); - tcg_temp_free_i64(nan); - tcg_temp_free_i64(xth); - tcg_temp_free_i64(xtl); - tcg_temp_free_i64(xbh); - tcg_temp_free_i64(xbl); } static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ, @@ -2510,9 +2263,6 @@ static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ, set_cpu_vsr(rt2, xt, ctx->le_mode); } } - - tcg_temp_free(ea); - tcg_temp_free_i64(xt); return true; } @@ -2577,10 +2327,6 @@ static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store) set_cpu_vsr(rt + 32, xt, true); set_cpu_vsr(rt + 32, tcg_constant_i64(0), false); } - - tcg_temp_free(ea); - tcg_temp_free_i64(xt); - return true; } @@ -2620,10 +2366,6 @@ static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store set_cpu_vsr(rt + 32, xt, true); set_cpu_vsr(rt + 32, tcg_constant_i64(0), false); } - - tcg_temp_free(ea); - tcg_temp_free_i64(xt); - return true; } @@ -2684,9 +2426,6 @@ static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store) set_cpu_vsr(a->rt, xt, false); set_cpu_vsr(a->rt, tcg_constant_i64(0), true); } - - tcg_temp_free(ea); - tcg_temp_free_i64(xt); return true; } @@ -2741,9 +2480,6 @@ static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c, } tcg_gen_mov_i64(t, disj); - - tcg_temp_free_i64(conj); - tcg_temp_free_i64(disj); } static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, @@ -2788,9 +2524,6 @@ static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, } tcg_gen_mov_vec(t, disj); - - tcg_temp_free_vec(disj); - tcg_temp_free_vec(conj); } static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a) @@ -2925,7 +2658,6 @@ static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, TCGv_vec tmp = tcg_temp_new_vec_matching(c); tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1); tcg_gen_bitsel_vec(vece, t, tmp, b, a); - tcg_temp_free_vec(tmp); } static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece) @@ -2987,11 +2719,6 @@ static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a, xb = gen_vsr_ptr(a->xb); helper(cpu_env, xt, xa, xb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); - return true; } @@ -3013,11 +2740,6 @@ static bool do_helper_X(arg_X *a, rb = gen_avr_ptr(a->rb); helper(cpu_env, rt, ra, rb); - - tcg_temp_free_ptr(rt); - tcg_temp_free_ptr(ra); - tcg_temp_free_ptr(rb); - return true; } @@ -3047,10 +2769,6 @@ static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a) xb = gen_vsr_ptr(a->xb); gen_helper_XVCVSPBF16(cpu_env, xt, xb); - - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xb); - return true; } @@ -3114,9 +2832,6 @@ static bool do_ger(DisasContext *ctx, arg_MMIRR_XX3 *a, mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk); helper(cpu_env, xa, xb, xt, tcg_constant_i32(mask)); - tcg_temp_free_ptr(xt); - tcg_temp_free_ptr(xa); - tcg_temp_free_ptr(xb); return true; } diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 9eb748a283..1e97473af2 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -28,6 +28,7 @@ #include "time_helper.h" #include "exec/exec-all.h" #include "qapi/error.h" +#include "qapi/visitor.h" #include "qemu/error-report.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" @@ -75,6 +76,9 @@ struct isa_ext_data { static const struct isa_ext_data isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h), ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v), + ISA_EXT_DATA_ENTRY(zicbom, true, PRIV_VERSION_1_12_0, ext_icbom), + ISA_EXT_DATA_ENTRY(zicboz, true, PRIV_VERSION_1_12_0, ext_icboz), + ISA_EXT_DATA_ENTRY(zicond, true, PRIV_VERSION_1_12_0, ext_zicond), ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr), ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei), ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause), @@ -102,12 +106,16 @@ static const struct isa_ext_data isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt), ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f), ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f), + ISA_EXT_DATA_ENTRY(zve64d, true, PRIV_VERSION_1_12_0, ext_zve64d), + ISA_EXT_DATA_ENTRY(zvfh, true, PRIV_VERSION_1_12_0, ext_zvfh), + ISA_EXT_DATA_ENTRY(zvfhmin, true, PRIV_VERSION_1_12_0, ext_zvfhmin), ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx), ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin), ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia), ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia), ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf), ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc), + ISA_EXT_DATA_ENTRY(svadu, true, PRIV_VERSION_1_12_0, ext_svadu), ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval), ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot), ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt), @@ -213,7 +221,7 @@ static const char * const riscv_intr_names[] = { "reserved" }; -static void register_cpu_props(DeviceState *dev); +static void register_cpu_props(Object *obj); const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) { @@ -242,6 +250,89 @@ static void set_vext_version(CPURISCVState *env, int vext_ver) env->vext_ver = vext_ver; } +#ifndef CONFIG_USER_ONLY +static uint8_t satp_mode_from_str(const char *satp_mode_str) +{ + if (!strncmp(satp_mode_str, "mbare", 5)) { + return VM_1_10_MBARE; + } + + if (!strncmp(satp_mode_str, "sv32", 4)) { + return VM_1_10_SV32; + } + + if (!strncmp(satp_mode_str, "sv39", 4)) { + return VM_1_10_SV39; + } + + if (!strncmp(satp_mode_str, "sv48", 4)) { + return VM_1_10_SV48; + } + + if (!strncmp(satp_mode_str, "sv57", 4)) { + return VM_1_10_SV57; + } + + if (!strncmp(satp_mode_str, "sv64", 4)) { + return VM_1_10_SV64; + } + + g_assert_not_reached(); +} + +uint8_t satp_mode_max_from_map(uint32_t map) +{ + /* map here has at least one bit set, so no problem with clz */ + return 31 - __builtin_clz(map); +} + +const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) +{ + if (is_32_bit) { + switch (satp_mode) { + case VM_1_10_SV32: + return "sv32"; + case VM_1_10_MBARE: + return "none"; + } + } else { + switch (satp_mode) { + case VM_1_10_SV64: + return "sv64"; + case VM_1_10_SV57: + return "sv57"; + case VM_1_10_SV48: + return "sv48"; + case VM_1_10_SV39: + return "sv39"; + case VM_1_10_MBARE: + return "none"; + } + } + + g_assert_not_reached(); +} + +static void set_satp_mode_max_supported(RISCVCPU *cpu, + uint8_t satp_mode) +{ + bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; + const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; + + for (int i = 0; i <= satp_mode; ++i) { + if (valid_vm[i]) { + cpu->cfg.satp_mode.supported |= (1 << i); + } + } +} + +/* Set the satp mode to the max supported */ +static void set_satp_mode_default_map(RISCVCPU *cpu) +{ + cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; +} +#endif + static void riscv_any_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; @@ -250,8 +341,15 @@ static void riscv_any_cpu_init(Object *obj) #elif defined(TARGET_RISCV64) set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); #endif + +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), + riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? + VM_1_10_SV32 : VM_1_10_SV57); +#endif + set_priv_version(env, PRIV_VERSION_1_12_0); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); } #if defined(TARGET_RISCV64) @@ -260,17 +358,23 @@ static void rv64_base_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ set_misa(env, MXL_RV64, 0); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); /* Set latest version of privileged specification */ set_priv_version(env, PRIV_VERSION_1_12_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); +#endif } static void rv64_sifive_u_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); set_priv_version(env, PRIV_VERSION_1_10_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); +#endif } static void rv64_sifive_e_cpu_init(Object *obj) @@ -279,9 +383,12 @@ static void rv64_sifive_e_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); set_priv_version(env, PRIV_VERSION_1_10_0); cpu->cfg.mmu = false; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_MBARE); +#endif } static void rv64_thead_c906_cpu_init(Object *obj) @@ -311,6 +418,9 @@ static void rv64_thead_c906_cpu_init(Object *obj) cpu->cfg.ext_xtheadsync = true; cpu->cfg.mvendorid = THEAD_VENDOR_ID; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_SV39); +#endif } static void rv128_base_cpu_init(Object *obj) @@ -324,9 +434,12 @@ static void rv128_base_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ set_misa(env, MXL_RV128, 0); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); /* Set latest version of privileged specification */ set_priv_version(env, PRIV_VERSION_1_12_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); +#endif } #else static void rv32_base_cpu_init(Object *obj) @@ -334,17 +447,23 @@ static void rv32_base_cpu_init(Object *obj) CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ set_misa(env, MXL_RV32, 0); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); /* Set latest version of privileged specification */ set_priv_version(env, PRIV_VERSION_1_12_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); +#endif } static void rv32_sifive_u_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); set_priv_version(env, PRIV_VERSION_1_10_0); +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); +#endif } static void rv32_sifive_e_cpu_init(Object *obj) @@ -353,9 +472,12 @@ static void rv32_sifive_e_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); set_priv_version(env, PRIV_VERSION_1_10_0); cpu->cfg.mmu = false; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_MBARE); +#endif } static void rv32_ibex_cpu_init(Object *obj) @@ -364,9 +486,12 @@ static void rv32_ibex_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); set_priv_version(env, PRIV_VERSION_1_11_0); cpu->cfg.mmu = false; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_MBARE); +#endif cpu->cfg.epmp = true; } @@ -376,9 +501,12 @@ static void rv32_imafcu_nommu_cpu_init(Object *obj) RISCVCPU *cpu = RISCV_CPU(obj); set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); set_priv_version(env, PRIV_VERSION_1_10_0); cpu->cfg.mmu = false; +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(cpu, VM_1_10_MBARE); +#endif } #endif @@ -391,7 +519,7 @@ static void riscv_host_cpu_init(Object *obj) #elif defined(TARGET_RISCV64) set_misa(env, MXL_RV64, 0); #endif - register_cpu_props(DEVICE(obj)); + register_cpu_props(obj); } #endif @@ -616,6 +744,11 @@ static void riscv_cpu_reset_hold(Object *obj) env->bins = 0; env->two_stage_lookup = false; + env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); + env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | + (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); + /* Initialized default priorities of local interrupts. */ for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { iprio = riscv_cpu_default_priority(i); @@ -640,7 +773,7 @@ static void riscv_cpu_reset_hold(Object *obj) set_default_nan_mode(1, &env->fp_status); #ifndef CONFIG_USER_ONLY - if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { + if (cpu->cfg.debug) { riscv_trigger_init(env); } @@ -732,7 +865,11 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) { + if (cpu->cfg.ext_zfh) { + cpu->cfg.ext_zfhmin = true; + } + + if (cpu->cfg.ext_zfhmin && !cpu->cfg.ext_f) { error_setg(errp, "Zfh/Zfhmin extensions require F extension"); return; } @@ -742,19 +879,51 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (cpu->cfg.ext_v && !cpu->cfg.ext_d) { - error_setg(errp, "V extension requires D extension"); + /* The V vector extension depends on the Zve64d extension */ + if (cpu->cfg.ext_v) { + cpu->cfg.ext_zve64d = true; + } + + /* The Zve64d extension depends on the Zve64f extension */ + if (cpu->cfg.ext_zve64d) { + cpu->cfg.ext_zve64f = true; + } + + /* The Zve64f extension depends on the Zve32f extension */ + if (cpu->cfg.ext_zve64f) { + cpu->cfg.ext_zve32f = true; + } + + if (cpu->cfg.ext_zve64d && !cpu->cfg.ext_d) { + error_setg(errp, "Zve64d/V extensions require D extension"); return; } - if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) { + if (cpu->cfg.ext_zve32f && !cpu->cfg.ext_f) { error_setg(errp, "Zve32f/Zve64f extensions require F extension"); return; } + if (cpu->cfg.ext_zvfh) { + cpu->cfg.ext_zvfhmin = true; + } + + if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { + error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); + return; + } + + if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { + error_setg(errp, "Zvfh extensions requires Zfhmin extension"); + return; + } + /* Set the ISA extensions, checks should have happened above */ - if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx || - cpu->cfg.ext_zhinxmin) { + if (cpu->cfg.ext_zhinx) { + cpu->cfg.ext_zhinxmin = true; + } + + if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) { cpu->cfg.ext_zfinx = true; } @@ -765,7 +934,7 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) } if (cpu->cfg.ext_f) { error_setg(errp, - "Zfinx cannot be supported together with F extension"); + "Zfinx cannot be supported together with F extension"); return; } } @@ -828,40 +997,40 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) ext |= RVV; if (!is_power_of_2(cpu->cfg.vlen)) { error_setg(errp, - "Vector extension VLEN must be power of 2"); + "Vector extension VLEN must be power of 2"); return; } if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) { error_setg(errp, - "Vector extension implementation only supports VLEN " - "in the range [128, %d]", RV_VLEN_MAX); + "Vector extension implementation only supports VLEN " + "in the range [128, %d]", RV_VLEN_MAX); return; } if (!is_power_of_2(cpu->cfg.elen)) { error_setg(errp, - "Vector extension ELEN must be power of 2"); + "Vector extension ELEN must be power of 2"); return; } - if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) { - error_setg(errp, - "Vector extension implementation only supports ELEN " - "in the range [8, 64]"); - return; - } - if (cpu->cfg.vext_spec) { - if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) { - vext_version = VEXT_VERSION_1_00_0; - } else { + if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) { error_setg(errp, - "Unsupported vector spec version '%s'", - cpu->cfg.vext_spec); + "Vector extension implementation only supports ELEN " + "in the range [8, 64]"); return; } - } else { - qemu_log("vector version is not specified, " - "use the default value v1.0\n"); - } - set_vext_version(env, vext_version); + if (cpu->cfg.vext_spec) { + if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) { + vext_version = VEXT_VERSION_1_00_0; + } else { + error_setg(errp, + "Unsupported vector spec version '%s'", + cpu->cfg.vext_spec); + return; + } + } else { + qemu_log("vector version is not specified, " + "use the default value v1.0\n"); + } + set_vext_version(env, vext_version); } if (cpu->cfg.ext_j) { ext |= RVJ; @@ -870,6 +1039,88 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) set_misa(env, env->misa_mxl, ext); } +#ifndef CONFIG_USER_ONLY +static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) +{ + bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; + uint8_t satp_mode_map_max; + uint8_t satp_mode_supported_max = + satp_mode_max_from_map(cpu->cfg.satp_mode.supported); + + if (cpu->cfg.satp_mode.map == 0) { + if (cpu->cfg.satp_mode.init == 0) { + /* If unset by the user, we fallback to the default satp mode. */ + set_satp_mode_default_map(cpu); + } else { + /* + * Find the lowest level that was disabled and then enable the + * first valid level below which can be found in + * valid_vm_1_10_32/64. + */ + for (int i = 1; i < 16; ++i) { + if ((cpu->cfg.satp_mode.init & (1 << i)) && + (cpu->cfg.satp_mode.supported & (1 << i))) { + for (int j = i - 1; j >= 0; --j) { + if (cpu->cfg.satp_mode.supported & (1 << j)) { + cpu->cfg.satp_mode.map |= (1 << j); + break; + } + } + break; + } + } + } + } + + satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); + + /* Make sure the user asked for a supported configuration (HW and qemu) */ + if (satp_mode_map_max > satp_mode_supported_max) { + error_setg(errp, "satp_mode %s is higher than hw max capability %s", + satp_mode_str(satp_mode_map_max, rv32), + satp_mode_str(satp_mode_supported_max, rv32)); + return; + } + + /* + * Make sure the user did not ask for an invalid configuration as per + * the specification. + */ + if (!rv32) { + for (int i = satp_mode_map_max - 1; i >= 0; --i) { + if (!(cpu->cfg.satp_mode.map & (1 << i)) && + (cpu->cfg.satp_mode.init & (1 << i)) && + (cpu->cfg.satp_mode.supported & (1 << i))) { + error_setg(errp, "cannot disable %s satp mode if %s " + "is enabled", satp_mode_str(i, false), + satp_mode_str(satp_mode_map_max, false)); + return; + } + } + } + + /* Finally expand the map so that all valid modes are set */ + for (int i = satp_mode_map_max - 1; i >= 0; --i) { + if (cpu->cfg.satp_mode.supported & (1 << i)) { + cpu->cfg.satp_mode.map |= (1 << i); + } + } +} +#endif + +static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) +{ +#ifndef CONFIG_USER_ONLY + Error *local_err = NULL; + + riscv_cpu_satp_mode_finalize(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } +#endif +} + static void riscv_cpu_realize(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); @@ -922,24 +1173,13 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } } - if (cpu->cfg.mmu) { - riscv_set_feature(env, RISCV_FEATURE_MMU); - } - - if (cpu->cfg.pmp) { - riscv_set_feature(env, RISCV_FEATURE_PMP); - + if (cpu->cfg.epmp && !cpu->cfg.pmp) { /* * Enhanced PMP should only be available * on harts with PMP support */ - if (cpu->cfg.epmp) { - riscv_set_feature(env, RISCV_FEATURE_EPMP); - } - } - - if (cpu->cfg.debug) { - riscv_set_feature(env, RISCV_FEATURE_DEBUG); + error_setg(errp, "Invalid configuration: EPMP requires PMP support"); + return; } @@ -980,6 +1220,12 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } #endif + riscv_cpu_finalize_features(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + riscv_cpu_register_gdb_regs_for_features(cs); qemu_init_vcpu(cs); @@ -989,6 +1235,52 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } #ifndef CONFIG_USER_ONLY +static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVSATPMap *satp_map = opaque; + uint8_t satp = satp_mode_from_str(name); + bool value; + + value = satp_map->map & (1 << satp); + + visit_type_bool(v, name, &value, errp); +} + +static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVSATPMap *satp_map = opaque; + uint8_t satp = satp_mode_from_str(name); + bool value; + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + + satp_map->map = deposit32(satp_map->map, satp, 1, value); + satp_map->init |= 1 << satp; +} + +static void riscv_add_satp_mode_properties(Object *obj) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + + if (cpu->env.misa_mxl == MXL_RV32) { + object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + } else { + object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, + cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + } +} + static void riscv_cpu_set_irq(void *opaque, int irq, int level) { RISCVCPU *cpu = RISCV_CPU(opaque); @@ -1093,6 +1385,7 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), + DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), @@ -1102,6 +1395,8 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), + DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), + DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), @@ -1129,6 +1424,11 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), + DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), + DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), + DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), + DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), + DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), /* Vendor-specific custom extensions */ @@ -1146,12 +1446,16 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), /* These are experimental so mark with 'x-' */ + DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false), /* ePMP 0.9.3 */ DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), + DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), + DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), + DEFINE_PROP_END_OF_LIST(), }; @@ -1161,11 +1465,12 @@ static Property riscv_cpu_extensions[] = { * properties and leave. env.misa_ext = 0 means that we want * all the default properties to be registered. */ -static void register_cpu_props(DeviceState *dev) +static void register_cpu_props(Object *obj) { - RISCVCPU *cpu = RISCV_CPU(OBJECT(dev)); + RISCVCPU *cpu = RISCV_CPU(obj); uint32_t misa_ext = cpu->env.misa_ext; Property *prop; + DeviceState *dev = DEVICE(obj); /* * If misa_ext is not zero, set cfg properties now to @@ -1196,6 +1501,10 @@ static void register_cpu_props(DeviceState *dev) for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { qdev_property_add_static(dev, prop); } + +#ifndef CONFIG_USER_ONLY + riscv_add_satp_mode_properties(obj); +#endif } static Property riscv_cpu_properties[] = { @@ -1213,6 +1522,12 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), + + /* + * write_misa() is marked as experimental for now so mark + * it with -x and default to 'false'. + */ + DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), DEFINE_PROP_END_OF_LIST(), }; @@ -1246,6 +1561,13 @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) } #ifndef CONFIG_USER_ONLY +static int64_t riscv_get_arch_id(CPUState *cs) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + + return cpu->env.mhartid; +} + #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps riscv_sysemu_ops = { @@ -1300,6 +1622,7 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) cc->disas_set_info = riscv_cpu_disas_set_info; #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &riscv_sysemu_ops; + cc->get_arch_id = riscv_get_arch_id; #endif cc->gdb_arch_name = riscv_gdb_arch_name; cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 31537fc05f..638e47c75a 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -27,6 +27,7 @@ #include "qom/object.h" #include "qemu/int128.h" #include "cpu_bits.h" +#include "qapi/qapi-types-common.h" #define TCG_GUEST_DEFAULT_MO 0 @@ -81,17 +82,6 @@ #define RVH RV('H') #define RVJ RV('J') -/* S extension denotes that Supervisor mode exists, however it is possible - to have a core that support S mode but does not have an MMU and there - is currently no bit in misa to indicate whether an MMU exists or not - so a cpu features bitfield is required, likewise for optional PMP support */ -enum { - RISCV_FEATURE_MMU, - RISCV_FEATURE_PMP, - RISCV_FEATURE_EPMP, - RISCV_FEATURE_MISA, - RISCV_FEATURE_DEBUG -}; /* Privileged specification version */ enum { @@ -186,8 +176,6 @@ struct CPUArchState { /* 128-bit helpers upper part return value */ target_ulong retxh; - uint32_t features; - #ifdef CONFIG_USER_ONLY uint32_t elf_flags; #endif @@ -414,6 +402,21 @@ struct RISCVCPUClass { ResettablePhases parent_phases; }; +/* + * map is a 16-bit bitmap: the most significant set bit in map is the maximum + * satp mode that is supported. It may be chosen by the user and must respect + * what qemu implements (valid_1_10_32/64) and what the hw is capable of + * (supported bitmap below). + * + * init is a 16-bit bitmap used to make sure the user selected a correct + * configuration as per the specification. + * + * supported is a 16-bit bitmap used to reflect the hw capabilities. + */ +typedef struct { + uint16_t map, init, supported; +} RISCVSATPMap; + struct RISCVCPUConfig { bool ext_i; bool ext_e; @@ -447,9 +450,13 @@ struct RISCVCPUConfig { bool ext_zkt; bool ext_ifencei; bool ext_icsr; + bool ext_icbom; + bool ext_icboz; + bool ext_zicond; bool ext_zihintpause; bool ext_smstateen; bool ext_sstc; + bool ext_svadu; bool ext_svinval; bool ext_svnapot; bool ext_svpbmt; @@ -462,7 +469,10 @@ struct RISCVCPUConfig { bool ext_zhinxmin; bool ext_zve32f; bool ext_zve64f; + bool ext_zve64d; bool ext_zmmul; + bool ext_zvfh; + bool ext_zvfhmin; bool ext_smaia; bool ext_ssaia; bool ext_sscofpmf; @@ -494,12 +504,19 @@ struct RISCVCPUConfig { char *vext_spec; uint16_t vlen; uint16_t elen; + uint16_t cbom_blocksize; + uint16_t cboz_blocksize; bool mmu; bool pmp; bool epmp; bool debug; + bool misa_w; bool short_isa_string; + +#ifndef CONFIG_USER_ONLY + RISCVSATPMap satp_mode; +#endif }; typedef struct RISCVCPUConfig RISCVCPUConfig; @@ -535,16 +552,6 @@ static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) return (env->misa_ext & ext) != 0; } -static inline bool riscv_feature(CPURISCVState *env, int feature) -{ - return env->features & (1ULL << feature); -} - -static inline void riscv_set_feature(CPURISCVState *env, int feature) -{ - env->features |= (1ULL << feature); -} - #include "cpu_user.h" extern const char * const riscv_int_regnames[]; @@ -654,6 +661,11 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) #endif #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) +static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env) +{ + return &env_archcpu(env)->cfg; +} + #if defined(TARGET_RISCV32) #define cpu_recompute_xl(env) ((void)(env), MXL_RV32) #else @@ -806,9 +818,14 @@ enum riscv_pmu_event_idx { /* CSR function table */ extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; +extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[]; + void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); +uint8_t satp_mode_max_from_map(uint32_t map); +const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit); + #endif /* RISCV_CPU_H */ diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 8b0d7e20ea..fca7ef0cef 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -747,10 +747,12 @@ typedef enum RISCVException { #define MENVCFG_CBIE (3UL << 4) #define MENVCFG_CBCFE BIT(6) #define MENVCFG_CBZE BIT(7) +#define MENVCFG_HADE (1ULL << 61) #define MENVCFG_PBMTE (1ULL << 62) #define MENVCFG_STCE (1ULL << 63) /* For RV32 */ +#define MENVCFGH_HADE BIT(29) #define MENVCFGH_PBMTE BIT(30) #define MENVCFGH_STCE BIT(31) @@ -763,10 +765,12 @@ typedef enum RISCVException { #define HENVCFG_CBIE MENVCFG_CBIE #define HENVCFG_CBCFE MENVCFG_CBCFE #define HENVCFG_CBZE MENVCFG_CBZE +#define HENVCFG_HADE MENVCFG_HADE #define HENVCFG_PBMTE MENVCFG_PBMTE #define HENVCFG_STCE MENVCFG_STCE /* For RV32 */ +#define HENVCFGH_HADE MENVCFGH_HADE #define HENVCFGH_PBMTE MENVCFGH_PBMTE #define HENVCFGH_STCE MENVCFGH_STCE diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 3a9472a2ff..f88c503cf4 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -51,7 +51,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; *cs_base = 0; - if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { + if (cpu->cfg.ext_zve32f) { /* * If env->vl equals to VLMAX, we can use generic vector operation * expanders (GVEC) to accerlate the vector operations. @@ -105,7 +105,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS, get_field(env->mstatus_hs, MSTATUS_VS)); } - if (riscv_feature(env, RISCV_FEATURE_DEBUG) && !icount_enabled()) { + if (cpu->cfg.debug && !icount_enabled()) { flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); } #endif @@ -706,7 +706,7 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, pmp_priv_t pmp_priv; int pmp_index = -1; - if (!riscv_feature(env, RISCV_FEATURE_PMP)) { + if (!riscv_cpu_cfg(env)->pmp) { *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; } @@ -796,7 +796,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, mode = PRV_U; } - if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { + if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) { *physical = addr; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; @@ -936,9 +936,17 @@ restart: return TRANSLATE_FAIL; } + bool pbmte = env->menvcfg & MENVCFG_PBMTE; + bool hade = env->menvcfg & MENVCFG_HADE; + + if (first_stage && two_stage && riscv_cpu_virt_enabled(env)) { + pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); + hade = hade && (env->henvcfg & HENVCFG_HADE); + } + if (riscv_cpu_sxl(env) == MXL_RV32) { ppn = pte >> PTE_PPN_SHIFT; - } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) { + } else if (pbmte || cpu->cfg.ext_svnapot) { ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; } else { ppn = pte >> PTE_PPN_SHIFT; @@ -950,7 +958,7 @@ restart: if (!(pte & PTE_V)) { /* Invalid PTE */ return TRANSLATE_FAIL; - } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) { + } else if (!pbmte && (pte & PTE_PBMT)) { return TRANSLATE_FAIL; } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { /* Inner PTE, continue walking */ @@ -992,6 +1000,10 @@ restart: /* Page table updates need to be atomic with MTTCG enabled */ if (updated_pte != pte) { + if (!hade) { + return TRANSLATE_FAIL; + } + /* * - if accessed or dirty bits need updating, and the PTE is * in RAM, then we do so atomically with a compare and swap. diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 1b0a0c1693..ab566639e5 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -46,10 +46,8 @@ static RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit) { bool virt = riscv_cpu_virt_enabled(env); - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) { + if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) { return RISCV_EXCP_NONE; } @@ -81,7 +79,7 @@ static RISCVException fs(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env) && - !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + !riscv_cpu_cfg(env)->ext_zfinx) { return RISCV_EXCP_ILLEGAL_INST; } #endif @@ -90,11 +88,9 @@ static RISCVException fs(CPURISCVState *env, int csrno) static RISCVException vs(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); - if (env->misa_ext & RVV || - cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { + if (cpu->cfg.ext_zve32f) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_vector_enabled(env)) { return RISCV_EXCP_ILLEGAL_INST; @@ -108,8 +104,7 @@ static RISCVException vs(CPURISCVState *env, int csrno) static RISCVException ctr(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); int ctr_index; target_ulong ctr_mask; int base_csrno = CSR_CYCLE; @@ -134,6 +129,10 @@ static RISCVException ctr(CPURISCVState *env, int csrno) skip_ext_pmu_check: + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) { return RISCV_EXCP_ILLEGAL_INST; } @@ -166,8 +165,7 @@ static RISCVException ctr32(CPURISCVState *env, int csrno) #if !defined(CONFIG_USER_ONLY) static RISCVException mctr(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + int pmu_num = riscv_cpu_cfg(env)->pmu_num; int ctr_index; int base_csrno = CSR_MHPMCOUNTER3; @@ -176,7 +174,7 @@ static RISCVException mctr(CPURISCVState *env, int csrno) base_csrno += 0x80; } ctr_index = csrno - base_csrno; - if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) { + if (!pmu_num || ctr_index >= pmu_num) { /* The PMU is not enabled or counter is out of range*/ return RISCV_EXCP_ILLEGAL_INST; } @@ -195,8 +193,7 @@ static RISCVException mctr32(CPURISCVState *env, int csrno) static RISCVException sscofpmf(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); if (!cpu->cfg.ext_sscofpmf) { return RISCV_EXCP_ILLEGAL_INST; @@ -222,9 +219,7 @@ static RISCVException any32(CPURISCVState *env, int csrno) static int aia_any(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_smaia) { + if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -233,9 +228,7 @@ static int aia_any(CPURISCVState *env, int csrno) static int aia_any32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_smaia) { + if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -262,9 +255,7 @@ static int smode32(CPURISCVState *env, int csrno) static int aia_smode(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -273,9 +264,7 @@ static int aia_smode(CPURISCVState *env, int csrno) static int aia_smode32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -321,8 +310,7 @@ static RISCVException umode32(CPURISCVState *env, int csrno) static RISCVException mstateen(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); if (!cpu->cfg.ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; @@ -333,20 +321,28 @@ static RISCVException mstateen(CPURISCVState *env, int csrno) static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); if (!cpu->cfg.ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; } + RISCVException ret = hmode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M) { if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) { return RISCV_EXCP_ILLEGAL_INST; } } - return hmode(env, csrno); + return RISCV_EXCP_NONE; } static RISCVException hstateen(CPURISCVState *env, int csrno) @@ -363,13 +359,20 @@ static RISCVException sstateen(CPURISCVState *env, int csrno) { bool virt = riscv_cpu_virt_enabled(env); int index = csrno - CSR_SSTATEEN0; - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - if (!cpu->cfg.ext_smstateen) { + if (!riscv_cpu_cfg(env)->ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; } + RISCVException ret = smode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M) { if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) { return RISCV_EXCP_ILLEGAL_INST; @@ -382,7 +385,61 @@ static RISCVException sstateen(CPURISCVState *env, int csrno) } } - return smode(env, csrno); + return RISCV_EXCP_NONE; +} + +static RISCVException sstc(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + bool hmode_check = false; + + if (!cpu->cfg.ext_sstc || !env->rdtime_fn) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { + hmode_check = true; + } + + RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + + if (env->priv == PRV_M) { + return RISCV_EXCP_NONE; + } + + /* + * No need of separate function for rv32 as menvcfg stores both menvcfg + * menvcfgh for RV32. + */ + if (!(get_field(env->mcounteren, COUNTEREN_TM) && + get_field(env->menvcfg, MENVCFG_STCE))) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (riscv_cpu_virt_enabled(env)) { + if (!(get_field(env->hcounteren, COUNTEREN_TM) && + get_field(env->henvcfg, HENVCFG_STCE))) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + } + + return RISCV_EXCP_NONE; +} + +static RISCVException sstc_32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return sstc(env, csrno); } /* Checks if PointerMasking registers could be accessed */ @@ -397,9 +454,7 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno) static int aia_hmode(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -408,9 +463,7 @@ static int aia_hmode(CPURISCVState *env, int csrno) static int aia_hmode32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -419,7 +472,16 @@ static int aia_hmode32(CPURISCVState *env, int csrno) static RISCVException pmp(CPURISCVState *env, int csrno) { - if (riscv_feature(env, RISCV_FEATURE_PMP)) { + if (riscv_cpu_cfg(env)->pmp) { + if (csrno <= CSR_PMPCFG3) { + uint32_t reg_index = csrno - CSR_PMPCFG0; + + /* TODO: RV128 restriction check */ + if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { + return RISCV_EXCP_ILLEGAL_INST; + } + } + return RISCV_EXCP_NONE; } @@ -428,7 +490,7 @@ static RISCVException pmp(CPURISCVState *env, int csrno) static RISCVException epmp(CPURISCVState *env, int csrno) { - if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { return RISCV_EXCP_NONE; } @@ -437,7 +499,7 @@ static RISCVException epmp(CPURISCVState *env, int csrno) static RISCVException debug(CPURISCVState *env, int csrno) { - if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { + if (riscv_cpu_cfg(env)->debug) { return RISCV_EXCP_NONE; } @@ -447,13 +509,15 @@ static RISCVException debug(CPURISCVState *env, int csrno) static RISCVException seed(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_zkr) { + if (!riscv_cpu_cfg(env)->ext_zkr) { return RISCV_EXCP_ILLEGAL_INST; } #if !defined(CONFIG_USER_ONLY) + if (env->debugger) { + return RISCV_EXCP_NONE; + } + /* * With a CSR read-write instruction: * 1) The seed CSR is always available in machine mode as normal. @@ -572,7 +636,7 @@ static RISCVException read_vl(CPURISCVState *env, int csrno, static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env_archcpu(env)->cfg.vlen >> 3; + *val = riscv_cpu_cfg(env)->vlen >> 3; return RISCV_EXCP_NONE; } @@ -627,7 +691,7 @@ static RISCVException write_vstart(CPURISCVState *env, int csrno, * The vstart CSR is defined to have only enough writable bits * to hold the largest element index, i.e. lg2(VLEN) bits. */ - env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen)); + env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlen)); return RISCV_EXCP_NONE; } @@ -916,54 +980,8 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException sstc(CPURISCVState *env, int csrno) -{ - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - bool hmode_check = false; - - if (!cpu->cfg.ext_sstc || !env->rdtime_fn) { - return RISCV_EXCP_ILLEGAL_INST; - } - - if (env->priv == PRV_M) { - return RISCV_EXCP_NONE; - } - - /* - * No need of separate function for rv32 as menvcfg stores both menvcfg - * menvcfgh for RV32. - */ - if (!(get_field(env->mcounteren, COUNTEREN_TM) && - get_field(env->menvcfg, MENVCFG_STCE))) { - return RISCV_EXCP_ILLEGAL_INST; - } - - if (riscv_cpu_virt_enabled(env)) { - if (!(get_field(env->hcounteren, COUNTEREN_TM) && - get_field(env->henvcfg, HENVCFG_STCE))) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - } - - if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { - hmode_check = true; - } - - return hmode_check ? hmode(env, csrno) : smode(env, csrno); -} - -static RISCVException sstc_32(CPURISCVState *env, int csrno) -{ - if (riscv_cpu_mxl(env) != MXL_RV32) { - return RISCV_EXCP_ILLEGAL_INST; - } - - return sstc(env, csrno); -} - static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->vstimecmp; @@ -971,7 +989,7 @@ static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, } static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->vstimecmp >> 32; @@ -979,7 +997,7 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { RISCVCPU *cpu = env_archcpu(env); @@ -996,7 +1014,7 @@ static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { RISCVCPU *cpu = env_archcpu(env); @@ -1020,7 +1038,7 @@ static RISCVException read_stimecmp(CPURISCVState *env, int csrno, } static RISCVException read_stimecmph(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { if (riscv_cpu_virt_enabled(env)) { *val = env->vstimecmp >> 32; @@ -1032,7 +1050,7 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno, } static RISCVException write_stimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { RISCVCPU *cpu = env_archcpu(env); @@ -1055,7 +1073,7 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno, } static RISCVException write_stimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { RISCVCPU *cpu = env_archcpu(env); @@ -1123,16 +1141,16 @@ static const target_ulong hip_writable_mask = MIP_VSSIP; static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP; static const target_ulong vsip_writable_mask = MIP_VSSIP; -static const char valid_vm_1_10_32[16] = { - [VM_1_10_MBARE] = 1, - [VM_1_10_SV32] = 1 +const bool valid_vm_1_10_32[16] = { + [VM_1_10_MBARE] = true, + [VM_1_10_SV32] = true }; -static const char valid_vm_1_10_64[16] = { - [VM_1_10_MBARE] = 1, - [VM_1_10_SV39] = 1, - [VM_1_10_SV48] = 1, - [VM_1_10_SV57] = 1 +const bool valid_vm_1_10_64[16] = { + [VM_1_10_MBARE] = true, + [VM_1_10_SV39] = true, + [VM_1_10_SV48] = true, + [VM_1_10_SV57] = true }; /* Machine Information Registers */ @@ -1152,8 +1170,7 @@ static RISCVException write_ignore(CPURISCVState *env, int csrno, static RISCVException read_mvendorid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); *val = cpu->cfg.mvendorid; return RISCV_EXCP_NONE; @@ -1162,8 +1179,7 @@ static RISCVException read_mvendorid(CPURISCVState *env, int csrno, static RISCVException read_marchid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); *val = cpu->cfg.marchid; return RISCV_EXCP_NONE; @@ -1172,8 +1188,7 @@ static RISCVException read_marchid(CPURISCVState *env, int csrno, static RISCVException read_mimpid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); *val = cpu->cfg.mimpid; return RISCV_EXCP_NONE; @@ -1215,13 +1230,11 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int validate_vm(CPURISCVState *env, target_ulong vm) +static bool validate_vm(CPURISCVState *env, target_ulong vm) { - if (riscv_cpu_mxl(env) == MXL_RV32) { - return valid_vm_1_10_32[vm & 0xf]; - } else { - return valid_vm_1_10_64[vm & 0xf]; - } + RISCVCPU *cpu = RISCV_CPU(env_cpu(env)); + + return (vm & 0xf) <= satp_mode_max_from_map(cpu->cfg.satp_mode.map); } static RISCVException write_mstatus(CPURISCVState *env, int csrno, @@ -1329,7 +1342,7 @@ static RISCVException read_misa(CPURISCVState *env, int csrno, static RISCVException write_misa(CPURISCVState *env, int csrno, target_ulong val) { - if (!riscv_feature(env, RISCV_FEATURE_MISA)) { + if (!riscv_cpu_cfg(env)->misa_w) { /* drop write to misa */ return RISCV_EXCP_NONE; } @@ -1342,7 +1355,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* 'E' excludes all other extensions */ if (val & RVE) { - /* when we support 'E' we can do "val = RVE;" however + /* + * when we support 'E' we can do "val = RVE;" however * for now we just drop writes if 'E' is present. */ return RISCV_EXCP_NONE; @@ -1356,15 +1370,13 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* Mask extensions that are not supported by this hart */ val &= env->misa_ext_mask; - /* Mask extensions that are not supported by QEMU */ - val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV); - /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ if ((val & RVD) && !(val & RVF)) { val &= ~RVD; } - /* Suppress 'C' if next instruction is not aligned + /* + * Suppress 'C' if next instruction is not aligned * TODO: this should check next_pc */ if ((val & RVC) && (GETPC() & ~3) != 0) { @@ -1833,28 +1845,28 @@ static RISCVException write_mscratch(CPURISCVState *env, int csrno, } static RISCVException read_mepc(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mepc; return RISCV_EXCP_NONE; } static RISCVException write_mepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { env->mepc = val; return RISCV_EXCP_NONE; } static RISCVException read_mcause(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mcause; return RISCV_EXCP_NONE; } static RISCVException write_mcause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { env->mcause = val; return RISCV_EXCP_NONE; @@ -1876,19 +1888,22 @@ static RISCVException write_mtval(CPURISCVState *env, int csrno, /* Execution environment configuration setup */ static RISCVException read_menvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->menvcfg; return RISCV_EXCP_NONE; } static RISCVException write_menvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { + RISCVCPUConfig *cfg = &env_archcpu(env)->cfg; uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE; if (riscv_cpu_mxl(env) == MXL_RV64) { - mask |= MENVCFG_PBMTE | MENVCFG_STCE; + mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cfg->ext_sstc ? MENVCFG_STCE : 0) | + (cfg->ext_svadu ? MENVCFG_HADE : 0); } env->menvcfg = (env->menvcfg & ~mask) | (val & mask); @@ -1896,16 +1911,19 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno, } static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->menvcfg >> 32; return RISCV_EXCP_NONE; } static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE; + RISCVCPUConfig *cfg = &env_archcpu(env)->cfg; + uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cfg->ext_sstc ? MENVCFG_STCE : 0) | + (cfg->ext_svadu ? MENVCFG_HADE : 0); uint64_t valh = (uint64_t)val << 32; env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); @@ -1914,7 +1932,7 @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, } static RISCVException read_senvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1928,7 +1946,7 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno, } static RISCVException write_senvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; RISCVException ret; @@ -1943,7 +1961,7 @@ static RISCVException write_senvcfg(CPURISCVState *env, int csrno, } static RISCVException read_henvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1952,12 +1970,18 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno, return ret; } - *val = env->henvcfg; + /* + * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 + * henvcfg.stce is read_only 0 when menvcfg.stce = 0 + * henvcfg.hade is read_only 0 when menvcfg.hade = 0 + */ + *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) | + env->menvcfg); return RISCV_EXCP_NONE; } static RISCVException write_henvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; RISCVException ret; @@ -1968,7 +1992,7 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, } if (riscv_cpu_mxl(env) == MXL_RV64) { - mask |= HENVCFG_PBMTE | HENVCFG_STCE; + mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE); } env->henvcfg = (env->henvcfg & ~mask) | (val & mask); @@ -1977,7 +2001,7 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, } static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1986,14 +2010,16 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, return ret; } - *val = env->henvcfg >> 32; + *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) | + env->menvcfg)) >> 32; return RISCV_EXCP_NONE; } static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE; + uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | + HENVCFG_HADE); uint64_t valh = (uint64_t)val << 32; RISCVException ret; @@ -2034,13 +2060,13 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno, } static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } static RISCVException read_mstateenh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32; @@ -2061,7 +2087,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -2069,7 +2095,7 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2106,7 +2132,7 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno, } static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2145,7 +2171,7 @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2624,7 +2650,7 @@ static RISCVException rmw_siph(CPURISCVState *env, int csrno, static RISCVException read_satp(CPURISCVState *env, int csrno, target_ulong *val) { - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { *val = 0; return RISCV_EXCP_NONE; } @@ -2641,9 +2667,10 @@ static RISCVException read_satp(CPURISCVState *env, int csrno, static RISCVException write_satp(CPURISCVState *env, int csrno, target_ulong val) { - target_ulong vm, mask; + target_ulong mask; + bool vm; - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { return RISCV_EXCP_NONE; } @@ -3338,30 +3365,18 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno, } static RISCVException write_mseccfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { mseccfg_csr_write(env, val); return RISCV_EXCP_NONE; } -static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index) -{ - /* TODO: RV128 restriction check */ - if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { - return false; - } - return true; -} - static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val) { uint32_t reg_index = csrno - CSR_PMPCFG0; - if (!check_pmp_reg_index(env, reg_index)) { - return RISCV_EXCP_ILLEGAL_INST; - } - *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); + *val = pmpcfg_csr_read(env, reg_index); return RISCV_EXCP_NONE; } @@ -3370,10 +3385,7 @@ static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, { uint32_t reg_index = csrno - CSR_PMPCFG0; - if (!check_pmp_reg_index(env, reg_index)) { - return RISCV_EXCP_ILLEGAL_INST; - } - pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); + pmpcfg_csr_write(env, reg_index, val); return RISCV_EXCP_NONE; } @@ -3776,27 +3788,32 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, RISCVCPU *cpu) { /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ - int read_only = get_field(csrno, 0xC00) == 3; + bool read_only = get_field(csrno, 0xC00) == 3; int csr_min_priv = csr_ops[csrno].min_priv_ver; - /* ensure the CSR extension is enabled. */ + /* ensure the CSR extension is enabled */ if (!cpu->cfg.ext_icsr) { return RISCV_EXCP_ILLEGAL_INST; } + /* privileged spec version check */ if (env->priv_ver < csr_min_priv) { return RISCV_EXCP_ILLEGAL_INST; } - /* check predicate */ - if (!csr_ops[csrno].predicate) { - return RISCV_EXCP_ILLEGAL_INST; - } - + /* read / write check */ if (write_mask && read_only) { return RISCV_EXCP_ILLEGAL_INST; } + /* + * The predicate() not only does existence check but also does some + * access control check which triggers for example virtual instruction + * exception in some cases. When writing read-only CSRs in those cases + * illegal instruction exception should be triggered instead of virtual + * instruction exception. Hence this comes after the read / write check. + */ + g_assert(csr_ops[csrno].predicate != NULL); RISCVException ret = csr_ops[csrno].predicate(env, csrno); if (ret != RISCV_EXCP_NONE) { return ret; diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index 6e7bbdbd5e..6048541606 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -127,40 +127,6 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n) return 0; } -/* - * Convert register index number passed by GDB to the correspond - * vector CSR number. Vector CSRs are defined after vector registers - * in dynamic generated riscv-vector.xml, thus the starting register index - * of vector CSRs is 32. - * Return 0 if register index number is out of range. - */ -static int riscv_gdb_vector_csrno(int num_regs) -{ - /* - * The order of vector CSRs in the switch case - * should match with the order defined in csr_ops[]. - */ - switch (num_regs) { - case 32: - return CSR_VSTART; - case 33: - return CSR_VXSAT; - case 34: - return CSR_VXRM; - case 35: - return CSR_VCSR; - case 36: - return CSR_VL; - case 37: - return CSR_VTYPE; - case 38: - return CSR_VLENB; - default: - /* Unknown register. */ - return 0; - } -} - static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) { uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3; @@ -174,19 +140,6 @@ static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) return cnt; } - int csrno = riscv_gdb_vector_csrno(n); - - if (!csrno) { - return 0; - } - - target_ulong val = 0; - int result = riscv_csrrw_debug(env, csrno, &val, 0, 0); - - if (result == RISCV_EXCP_NONE) { - return gdb_get_regl(buf, val); - } - return 0; } @@ -201,19 +154,6 @@ static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n) return vlenb; } - int csrno = riscv_gdb_vector_csrno(n); - - if (!csrno) { - return 0; - } - - target_ulong val = ldtul_p(mem_buf); - int result = riscv_csrrw_debug(env, csrno, NULL, val, -1); - - if (result == RISCV_EXCP_NONE) { - return sizeof(target_ulong); - } - return 0; } @@ -280,6 +220,10 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) int bitsize = 16 << env->misa_mxl_max; int i; +#if !defined(CONFIG_USER_ONLY) + env->debugger = true; +#endif + /* Until gdb knows about 128-bit registers */ if (bitsize > 64) { bitsize = 64; @@ -290,6 +234,9 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">"); for (i = 0; i < CSR_TABLE_SIZE; i++) { + if (env->priv_ver < csr_ops[i].min_priv_ver) { + continue; + } predicate = csr_ops[i].predicate; if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) { if (csr_ops[i].name) { @@ -305,6 +252,11 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) g_string_append_printf(s, "</feature>"); cpu->dyn_csr_xml = g_string_free(s, false); + +#if !defined(CONFIG_USER_ONLY) + env->debugger = false; +#endif + return CSR_TABLE_SIZE; } @@ -349,21 +301,6 @@ static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg) num_regs++; } - /* Define vector CSRs */ - const char *vector_csrs[7] = { - "vstart", "vxsat", "vxrm", "vcsr", - "vl", "vtype", "vlenb" - }; - - for (i = 0; i < 7; i++) { - g_string_append_printf(s, - "<reg name=\"%s\" bitsize=\"%d\"" - " regnum=\"%d\" group=\"vector\"" - " type=\"int\"/>", - vector_csrs[i], TARGET_LONG_BITS, base_reg++); - num_regs++; - } - g_string_append_printf(s, "</feature>"); cpu->dyn_vreg_xml = g_string_free(s, false); @@ -382,9 +319,9 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) 32, "riscv-32bit-fpu.xml", 0); } if (env->misa_ext & RVV) { + int base_reg = cs->gdb_num_regs; gdb_register_coprocessor(cs, riscv_gdb_get_vector, riscv_gdb_set_vector, - ricsv_gen_dynamic_vector_xml(cs, - cs->gdb_num_regs), + ricsv_gen_dynamic_vector_xml(cs, base_reg), "riscv-vector.xml", 0); } switch (env->misa_mxl_max) { @@ -403,7 +340,10 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) g_assert_not_reached(); } - gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, - riscv_gen_dynamic_csr_xml(cs, cs->gdb_num_regs), - "riscv-csr.xml", 0); + if (cpu->cfg.ext_icsr) { + int base_reg = cs->gdb_num_regs; + gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, + riscv_gen_dynamic_csr_xml(cs, base_reg), + "riscv-csr.xml", 0); + } } diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 0497370afd..37b54e0991 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -97,6 +97,11 @@ DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64) +/* Cache-block operations */ +DEF_HELPER_2(cbo_clean_flush, void, env, tl) +DEF_HELPER_2(cbo_inval, void, env, tl) +DEF_HELPER_2(cbo_zero, void, env, tl) + /* Special functions */ DEF_HELPER_2(csrr, tl, env, int) DEF_HELPER_3(csrw, void, env, int, tl) diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index b7e7613ea2..73d5d1b045 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -134,6 +134,7 @@ addi ............ ..... 000 ..... 0010011 @i slti ............ ..... 010 ..... 0010011 @i sltiu ............ ..... 011 ..... 0010011 @i xori ............ ..... 100 ..... 0010011 @i +# cbo.prefetch_{i,r,m} instructions are ori with rd=x0 and not decoded. ori ............ ..... 110 ..... 0010011 @i andi ............ ..... 111 ..... 0010011 @i slli 00000. ...... ..... 001 ..... 0010011 @sh @@ -179,7 +180,20 @@ sraw 0100000 ..... ..... 101 ..... 0111011 @r # *** RV128I Base Instruction Set (in addition to RV64I) *** ldu ............ ..... 111 ..... 0000011 @i -lq ............ ..... 010 ..... 0001111 @i +{ + [ + # *** RV32 Zicbom Standard Extension *** + cbo_clean 0000000 00001 ..... 010 00000 0001111 @sfence_vm + cbo_flush 0000000 00010 ..... 010 00000 0001111 @sfence_vm + cbo_inval 0000000 00000 ..... 010 00000 0001111 @sfence_vm + + # *** RV32 Zicboz Standard Extension *** + cbo_zero 0000000 00100 ..... 010 00000 0001111 @sfence_vm + ] + + # *** RVI128 lq *** + lq ............ ..... 010 ..... 0001111 @i +} sq ............ ..... 100 ..... 0100011 @s addid ............ ..... 000 ..... 1011011 @i sllid 000000 ...... ..... 001 ..... 1011011 @sh6 @@ -890,3 +904,7 @@ sm3p1 00 01000 01001 ..... 001 ..... 0010011 @r2 # *** RV32 Zksed Standard Extension *** sm4ed .. 11000 ..... ..... 000 ..... 0110011 @k_aes sm4ks .. 11010 ..... ..... 000 ..... 0110011 @k_aes + +# *** RV32 Zicond Standard Extension *** +czero_eqz 0000111 ..... ..... 101 ..... 0110011 @r +czero_nez 0000111 ..... ..... 111 ..... 0110011 @r diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc index 990bc94b98..e4dcc7c991 100644 --- a/target/riscv/insn_trans/trans_rvb.c.inc +++ b/target/riscv/insn_trans/trans_rvb.c.inc @@ -64,7 +64,6 @@ static void gen_clzw(TCGv ret, TCGv arg1) TCGv t = tcg_temp_new(); tcg_gen_shli_tl(t, arg1, 32); tcg_gen_clzi_tl(ret, t, 32); - tcg_temp_free(t); } static bool trans_clz(DisasContext *ctx, arg_clz *a) @@ -161,8 +160,6 @@ static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt) gen_sbop_mask(t, shamt); tcg_gen_or_tl(ret, arg1, t); - - tcg_temp_free(t); } static bool trans_bset(DisasContext *ctx, arg_bset *a) @@ -183,8 +180,6 @@ static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt) gen_sbop_mask(t, shamt); tcg_gen_andc_tl(ret, arg1, t); - - tcg_temp_free(t); } static bool trans_bclr(DisasContext *ctx, arg_bclr *a) @@ -205,8 +200,6 @@ static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt) gen_sbop_mask(t, shamt); tcg_gen_xor_tl(ret, arg1, t); - - tcg_temp_free(t); } static bool trans_binv(DisasContext *ctx, arg_binv *a) @@ -252,9 +245,6 @@ static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2) /* sign-extend 64-bits */ tcg_gen_ext_i32_tl(ret, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } static bool trans_ror(DisasContext *ctx, arg_ror *a) @@ -270,8 +260,6 @@ static void gen_roriw(TCGv ret, TCGv arg1, target_long shamt) tcg_gen_trunc_tl_i32(t1, arg1); tcg_gen_rotri_i32(t1, t1, shamt); tcg_gen_ext_i32_tl(ret, t1); - - tcg_temp_free_i32(t1); } static bool trans_rori(DisasContext *ctx, arg_rori *a) @@ -294,9 +282,6 @@ static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2) /* sign-extend 64-bits */ tcg_gen_ext_i32_tl(ret, t1); - - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); } static bool trans_rol(DisasContext *ctx, arg_rol *a) @@ -340,8 +325,6 @@ static void gen_orc_b(TCGv ret, TCGv source1) /* Replicate the lsb of each byte across the byte. */ tcg_gen_muli_tl(ret, tmp, 0xff); - - tcg_temp_free(tmp); } static bool trans_orc_b(DisasContext *ctx, arg_orc_b *a) @@ -357,8 +340,6 @@ static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \ \ tcg_gen_shli_tl(t, arg1, SHAMT); \ tcg_gen_add_tl(ret, t, arg2); \ - \ - tcg_temp_free(t); \ } GEN_SHADD(1) @@ -446,8 +427,6 @@ static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \ \ tcg_gen_shli_tl(t, t, SHAMT); \ tcg_gen_add_tl(ret, t, arg2); \ - \ - tcg_temp_free(t); \ } GEN_SHADD_UW(1) @@ -472,7 +451,6 @@ static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2) TCGv t = tcg_temp_new(); tcg_gen_ext32u_tl(t, arg1); tcg_gen_add_tl(ret, t, arg2); - tcg_temp_free(t); } static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a) @@ -531,7 +509,6 @@ static void gen_packh(TCGv ret, TCGv src1, TCGv src2) tcg_gen_ext8u_tl(t, src2); tcg_gen_deposit_tl(ret, src1, t, 8, TARGET_LONG_BITS - 8); - tcg_temp_free(t); } static void gen_packw(TCGv ret, TCGv src1, TCGv src2) @@ -540,7 +517,6 @@ static void gen_packw(TCGv ret, TCGv src1, TCGv src2) tcg_gen_ext16s_tl(t, src2); tcg_gen_deposit_tl(ret, src1, t, 16, TARGET_LONG_BITS - 16); - tcg_temp_free(t); } static bool trans_brev8(DisasContext *ctx, arg_brev8 *a) diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index 6e3159b797..1597bf31d8 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -250,7 +250,6 @@ static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a) TCGv_i64 t0 = tcg_temp_new_i64(); tcg_gen_not_i64(t0, src2); tcg_gen_deposit_i64(dest, t0, src1, 0, 63); - tcg_temp_free_i64(t0); } gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); @@ -273,7 +272,6 @@ static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a) TCGv_i64 t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(t0, src2, INT64_MIN); tcg_gen_xor_i64(dest, src1, t0); - tcg_temp_free_i64(t0); } gen_set_fpr_d(ctx, a->rd, dest); mark_fs_dirty(ctx); diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index 965e1f8d11..052408f45c 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -233,9 +233,6 @@ static bool trans_fsgnj_s(DisasContext *ctx, arg_fsgnj_s *a) /* This formulation retains the nanboxing of rs2 in normal 'F'. */ tcg_gen_deposit_i64(dest, rs2, rs1, 0, 31); - - tcg_temp_free_i64(rs1); - tcg_temp_free_i64(rs2); } else { tcg_gen_deposit_i64(dest, src2, src1, 0, 31); tcg_gen_ext32s_i64(dest, dest); @@ -281,15 +278,12 @@ static bool trans_fsgnjn_s(DisasContext *ctx, arg_fsgnjn_s *a) tcg_gen_nor_i64(rs2, rs2, mask); tcg_gen_and_i64(dest, mask, rs1); tcg_gen_or_i64(dest, dest, rs2); - - tcg_temp_free_i64(rs2); } /* signed-extended intead of nanboxing for result if enable zfinx */ if (ctx->cfg_ptr->ext_zfinx) { tcg_gen_ext32s_i64(dest, dest); } gen_set_fpr_hs(ctx, a->rd, dest); - tcg_temp_free_i64(rs1); mark_fs_dirty(ctx); return true; } @@ -329,14 +323,11 @@ static bool trans_fsgnjx_s(DisasContext *ctx, arg_fsgnjx_s *a) */ tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(31, 1)); tcg_gen_xor_i64(dest, rs1, dest); - - tcg_temp_free_i64(rs2); } /* signed-extended intead of nanboxing for result if enable zfinx */ if (ctx->cfg_ptr->ext_zfinx) { tcg_gen_ext32s_i64(dest, dest); } - tcg_temp_free_i64(rs1); gen_set_fpr_hs(ctx, a->rd, dest); mark_fs_dirty(ctx); return true; diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index 4496f21266..4ad54e8a49 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -62,7 +62,6 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) misaligned = gen_new_label(); tcg_gen_andi_tl(t0, cpu_pc, 0x2); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned); - tcg_temp_free(t0); } gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn); @@ -108,8 +107,6 @@ static TCGCond gen_compare_i128(bool bz, TCGv rl, tcg_gen_xor_tl(tmp, ah, bh); tcg_gen_and_tl(rl, rl, tmp); tcg_gen_xor_tl(rl, rh, rl); - - tcg_temp_free(tmp); } break; @@ -128,8 +125,6 @@ static TCGCond gen_compare_i128(bool bz, TCGv rl, /* seed third word with 1, which will be result */ tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero); tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero); - - tcg_temp_free(tmp); } break; @@ -140,8 +135,6 @@ static TCGCond gen_compare_i128(bool bz, TCGv rl, if (invert) { cond = tcg_invert_cond(cond); } - - tcg_temp_free(rh); return cond; } @@ -169,8 +162,6 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond) cond = gen_compare_i128(a->rs2 == 0, tmp, src1, src1h, src2, src2h, cond); tcg_gen_brcondi_tl(cond, tmp, 0, l); - - tcg_temp_free(tmp); } else { tcg_gen_brcond_tl(cond, src1, src2, l); } @@ -254,8 +245,6 @@ static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop) } gen_set_gpr128(ctx, a->rd, destl, desth); - - tcg_temp_free(addrl); return true; } @@ -344,8 +333,6 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) tcg_gen_addi_tl(addrl, addrl, 8); tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ); } - - tcg_temp_free(addrl); return true; } @@ -568,14 +555,6 @@ static void gen_sll_i128(TCGv destl, TCGv desth, tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll); tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1); - - tcg_temp_free(ls); - tcg_temp_free(rs); - tcg_temp_free(hs); - tcg_temp_free(ll); - tcg_temp_free(lr); - tcg_temp_free(h0); - tcg_temp_free(h1); } static bool trans_sll(DisasContext *ctx, arg_sll *a) @@ -618,14 +597,6 @@ static void gen_srl_i128(TCGv destl, TCGv desth, tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1); - - tcg_temp_free(ls); - tcg_temp_free(rs); - tcg_temp_free(hs); - tcg_temp_free(ll); - tcg_temp_free(lr); - tcg_temp_free(h0); - tcg_temp_free(h1); } static bool trans_srl(DisasContext *ctx, arg_srl *a) @@ -659,14 +630,6 @@ static void gen_sra_i128(TCGv destl, TCGv desth, tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0); tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1); - - tcg_temp_free(ls); - tcg_temp_free(rs); - tcg_temp_free(hs); - tcg_temp_free(ll); - tcg_temp_free(lr); - tcg_temp_free(h0); - tcg_temp_free(h1); } static bool trans_sra(DisasContext *ctx, arg_sra *a) diff --git a/target/riscv/insn_trans/trans_rvk.c.inc b/target/riscv/insn_trans/trans_rvk.c.inc index 90f4eeff60..6600c710a7 100644 --- a/target/riscv/insn_trans/trans_rvk.c.inc +++ b/target/riscv/insn_trans/trans_rvk.c.inc @@ -161,9 +161,6 @@ static bool gen_sha256(DisasContext *ctx, arg_r2 *a, DisasExtend ext, tcg_gen_ext_i32_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); - tcg_temp_free_i32(t2); return true; } @@ -212,9 +209,6 @@ static bool gen_sha512_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext, tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); return true; } @@ -271,9 +265,6 @@ static bool gen_sha512h_rv32(DisasContext *ctx, arg_r *a, DisasExtend ext, tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); return true; } @@ -310,9 +301,6 @@ static bool gen_sha512_rv64(DisasContext *ctx, arg_r2 *a, DisasExtend ext, tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); return true; } @@ -359,9 +347,6 @@ static bool gen_sm3(DisasContext *ctx, arg_r2 *a, int32_t b, int32_t c) tcg_gen_xor_i32(t1, t1, t0); tcg_gen_ext_i32_tl(dest, t1); gen_set_gpr(ctx, a->rd, dest); - - tcg_temp_free_i32(t0); - tcg_temp_free_i32(t1); return true; } diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc index ec7f705aab..2f0fd1f700 100644 --- a/target/riscv/insn_trans/trans_rvm.c.inc +++ b/target/riscv/insn_trans/trans_rvm.c.inc @@ -45,9 +45,6 @@ static void gen_mulhu_i128(TCGv r2, TCGv r3, TCGv al, TCGv ah, TCGv bl, TCGv bh) tcg_gen_mulu2_tl(tmpl, tmph, ah, bh); tcg_gen_add2_tl(r2, r3, r2, r3, tmpl, tmph); - - tcg_temp_free(tmpl); - tcg_temp_free(tmph); } static void gen_mul_i128(TCGv rl, TCGv rh, @@ -63,10 +60,6 @@ static void gen_mul_i128(TCGv rl, TCGv rh, tcg_gen_add2_tl(rh, tmpx, rh, zero, tmpl, tmph); tcg_gen_mulu2_tl(tmpl, tmph, rs1h, rs2l); tcg_gen_add2_tl(rh, tmph, rh, tmpx, tmpl, tmph); - - tcg_temp_free(tmpl); - tcg_temp_free(tmph); - tcg_temp_free(tmpx); } static bool trans_mul(DisasContext *ctx, arg_mul *a) @@ -92,11 +85,6 @@ static void gen_mulh_i128(TCGv rl, TCGv rh, tcg_gen_and_tl(t1h, t1h, rs1h); tcg_gen_sub2_tl(t0l, t0h, rl, rh, t0l, t0h); tcg_gen_sub2_tl(rl, rh, t0l, t0h, t1l, t1h); - - tcg_temp_free(t0l); - tcg_temp_free(t0h); - tcg_temp_free(t1l); - tcg_temp_free(t1h); } static void gen_mulh(TCGv ret, TCGv s1, TCGv s2) @@ -104,7 +92,6 @@ static void gen_mulh(TCGv ret, TCGv s1, TCGv s2) TCGv discard = tcg_temp_new(); tcg_gen_muls2_tl(discard, ret, s1, s2); - tcg_temp_free(discard); } static void gen_mulh_w(TCGv ret, TCGv s1, TCGv s2) @@ -132,9 +119,6 @@ static void gen_mulhsu_i128(TCGv rl, TCGv rh, tcg_gen_and_tl(t0l, t0h, rs2l); tcg_gen_and_tl(t0h, t0h, rs2h); tcg_gen_sub2_tl(rl, rh, rl, rh, t0l, t0h); - - tcg_temp_free(t0l); - tcg_temp_free(t0h); } static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) @@ -147,9 +131,6 @@ static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) tcg_gen_sari_tl(rl, arg1, TARGET_LONG_BITS - 1); tcg_gen_and_tl(rl, rl, arg2); tcg_gen_sub_tl(ret, rh, rl); - - tcg_temp_free(rl); - tcg_temp_free(rh); } static void gen_mulhsu_w(TCGv ret, TCGv arg1, TCGv arg2) @@ -160,8 +141,6 @@ static void gen_mulhsu_w(TCGv ret, TCGv arg1, TCGv arg2) tcg_gen_ext32s_tl(t1, arg1); tcg_gen_ext32u_tl(t2, arg2); tcg_gen_mul_tl(ret, t1, t2); - tcg_temp_free(t1); - tcg_temp_free(t2); tcg_gen_sari_tl(ret, ret, 32); } @@ -177,7 +156,6 @@ static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2) TCGv discard = tcg_temp_new(); tcg_gen_mulu2_tl(discard, ret, s1, s2); - tcg_temp_free(discard); } static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a) @@ -223,9 +201,6 @@ static void gen_div(TCGv ret, TCGv source1, TCGv source2) tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, temp2); tcg_gen_div_tl(ret, temp1, temp2); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } static bool trans_div(DisasContext *ctx, arg_div *a) @@ -258,9 +233,6 @@ static void gen_divu(TCGv ret, TCGv source1, TCGv source2) tcg_gen_movcond_tl(TCG_COND_EQ, temp1, source2, zero, max, source1); tcg_gen_movcond_tl(TCG_COND_EQ, temp2, source2, zero, one, source2); tcg_gen_divu_tl(ret, temp1, temp2); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } static bool trans_divu(DisasContext *ctx, arg_divu *a) @@ -306,9 +278,6 @@ static void gen_rem(TCGv ret, TCGv source1, TCGv source2) /* If div by zero, the required result is the original dividend. */ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp1); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } static bool trans_rem(DisasContext *ctx, arg_rem *a) @@ -342,8 +311,6 @@ static void gen_remu(TCGv ret, TCGv source1, TCGv source2) /* If div by zero, the required result is the original dividend. */ tcg_gen_movcond_tl(TCG_COND_EQ, ret, source2, zero, source1, temp); - - tcg_temp_free(temp); } static bool trans_remu(DisasContext *ctx, arg_remu *a) diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index bbb5c3a7b5..f2e3d38515 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -40,10 +40,11 @@ static bool require_rvf(DisasContext *s) switch (s->sew) { case MO_16: + return s->cfg_ptr->ext_zvfh; case MO_32: - return has_ext(s, RVF); + return s->cfg_ptr->ext_zve32f; case MO_64: - return has_ext(s, RVD); + return s->cfg_ptr->ext_zve64d; default: return false; } @@ -57,57 +58,32 @@ static bool require_scale_rvf(DisasContext *s) switch (s->sew) { case MO_8: + return s->cfg_ptr->ext_zvfh; case MO_16: - return has_ext(s, RVF); + return s->cfg_ptr->ext_zve32f; case MO_32: - return has_ext(s, RVD); + return s->cfg_ptr->ext_zve64d; default: return false; } } -static bool require_zve32f(DisasContext *s) +static bool require_scale_rvfmin(DisasContext *s) { - /* RVV + Zve32f = RVV. */ - if (has_ext(s, RVV)) { - return true; - } - - /* Zve32f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true; -} - -static bool require_scale_zve32f(DisasContext *s) -{ - /* RVV + Zve32f = RVV. */ - if (has_ext(s, RVV)) { - return true; - } - - /* Zve32f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; -} - -static bool require_zve64f(DisasContext *s) -{ - /* RVV + Zve64f = RVV. */ - if (has_ext(s, RVV)) { - return true; + if (s->mstatus_fs == 0) { + return false; } - /* Zve64f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true; -} - -static bool require_scale_zve64f(DisasContext *s) -{ - /* RVV + Zve64f = RVV. */ - if (has_ext(s, RVV)) { - return true; + switch (s->sew) { + case MO_8: + return s->cfg_ptr->ext_zvfhmin; + case MO_16: + return s->cfg_ptr->ext_zve32f; + case MO_32: + return s->cfg_ptr->ext_zve64d; + default: + return false; } - - /* Zve64f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; } /* Destination vector register group cannot overlap source mask register. */ @@ -173,9 +149,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) { TCGv s1, dst; - if (!require_rvv(s) || - !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || - s->cfg_ptr->ext_zve64f)) { + if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) { return false; } @@ -198,11 +172,6 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) gen_set_pc_imm(s, s->pc_succ_insn); lookup_and_goto_ptr(s); s->base.is_jmp = DISAS_NORETURN; - - if (rd == 0 && rs1 == 0) { - tcg_temp_free(s1); - } - return true; } @@ -210,9 +179,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) { TCGv dst; - if (!require_rvv(s) || - !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || - s->cfg_ptr->ext_zve64f)) { + if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) { return false; } @@ -242,8 +209,8 @@ static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a) static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a) { - TCGv s1 = tcg_const_tl(a->rs1); - TCGv s2 = tcg_const_tl(a->zimm); + TCGv s1 = tcg_constant_tl(a->rs1); + TCGv s2 = tcg_constant_tl(a->zimm); return do_vsetivli(s, a->rd, s1, s2); } @@ -315,13 +282,12 @@ static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf, require_nf(vd, nf, s->lmul); /* - * All Zve* extensions support all vector load and store instructions, - * except Zve64* extensions do not support EEW=64 for index values - * when XLEN=32. (Section 18.2) + * V extension supports all vector load and store instructions, + * except V extension does not support EEW=64 for index values + * when XLEN=32. (Section 18.3) */ if (get_xl(s) == MXL_RV32) { - ret &= (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? eew != MO_64 : true); + ret &= (eew != MO_64); } return ret; @@ -673,9 +639,6 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, fn(dest, mask, base, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - if (!is_store) { mark_vs_dirty(s); } @@ -838,9 +801,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, fn(dest, mask, base, stride, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - if (!is_store) { mark_vs_dirty(s); } @@ -949,10 +909,6 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, fn(dest, mask, base, index, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(index); - if (!is_store) { mark_vs_dirty(s); } @@ -1092,8 +1048,6 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, fn(dest, mask, base, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1154,8 +1108,6 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, fn(dest, base, cpu_env, desc); - tcg_temp_free_ptr(dest); - if (!is_store) { mark_vs_dirty(s); } @@ -1311,9 +1263,6 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, fn(dest, mask, src1, src2, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1344,7 +1293,6 @@ do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn, gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), src1, MAXSZ(s), MAXSZ(s)); - tcg_temp_free_i64(src1); mark_vs_dirty(s); return true; } @@ -1479,9 +1427,6 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, fn(dest, mask, src1, src2, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1850,7 +1795,6 @@ do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn, gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), src1, MAXSZ(s), MAXSZ(s)); - tcg_temp_free_i32(src1); mark_vs_dirty(s); return true; } @@ -2027,8 +1971,7 @@ static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a) * are not included for EEW=64 in Zve64*. (Section 18.2) */ return opivv_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) @@ -2041,8 +1984,7 @@ static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) * are not included for EEW=64 in Zve64*. (Section 18.2) */ return opivx_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } GEN_OPIVV_GVEC_TRANS(vmul_vv, mul) @@ -2145,7 +2087,6 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) tcg_gen_ext_tl_i64(s1_i64, s1); tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), s1_i64); - tcg_temp_free_i64(s1_i64); } else { tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), s1); @@ -2166,9 +2107,6 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1_i64, cpu_env, desc); - - tcg_temp_free_ptr(dest); - tcg_temp_free_i64(s1_i64); } mark_vs_dirty(s); @@ -2210,7 +2148,6 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1, cpu_env, desc); - tcg_temp_free_ptr(dest); mark_vs_dirty(s); gen_set_label(over); } @@ -2259,8 +2196,7 @@ static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a) * for EEW=64 in Zve64*. (Section 18.2) */ return opivv_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) @@ -2271,8 +2207,7 @@ static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) * for EEW=64 in Zve64*. (Section 18.2) */ return opivx_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check) @@ -2335,9 +2270,7 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); } /* OPFVV without GVEC IR */ @@ -2407,10 +2340,6 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, fn(dest, mask, t1, src2, cpu_env, desc); - tcg_temp_free_ptr(dest); - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); - tcg_temp_free_i64(t1); mark_vs_dirty(s); gen_set_label(over); return true; @@ -2425,9 +2354,7 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ss(s, a->rd, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ss(s, a->rd, a->rs2, a->vm); } /* OPFVF without GVEC IR */ @@ -2465,9 +2392,7 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } /* OPFVV with WIDEN */ @@ -2510,9 +2435,7 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_ds(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_ds(s, a->rd, a->rs2, a->vm); } /* OPFVF with WIDEN */ @@ -2544,9 +2467,7 @@ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm); } /* WIDEN OPFVV with WIDEN */ @@ -2589,9 +2510,7 @@ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dd(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dd(s, a->rd, a->rs2, a->vm); } /* WIDEN OPFVF with WIDEN */ @@ -2668,9 +2587,7 @@ static bool opfv_check(DisasContext *s, arg_rmr *a) require_rvf(s) && vext_check_isa_ill(s) && /* OPFV instructions ignore vs1 check */ - vext_check_ss(s, a->rd, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ss(s, a->rd, a->rs2, a->vm); } static bool do_opfv(DisasContext *s, arg_rmr *a, @@ -2735,9 +2652,7 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_mss(s, a->rd, a->rs1, a->rs2) && - require_zve32f(s) && - require_zve64f(s); + vext_check_mss(s, a->rd, a->rs1, a->rs2); } GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) @@ -2750,9 +2665,7 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ms(s, a->rd, a->rs2) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ms(s, a->rd, a->rs2); } GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) @@ -2773,9 +2686,7 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) if (require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - require_align(a->rd, s->lmul) && - require_zve32f(s) && - require_zve64f(s)) { + require_align(a->rd, s->lmul)) { gen_set_rm(s, RISCV_FRM_DYN); TCGv_i64 t1; @@ -2814,11 +2725,9 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) fns[s->sew - 1](dest, t1, cpu_env, desc); - tcg_temp_free_ptr(dest); mark_vs_dirty(s); gen_set_label(over); } - tcg_temp_free_i64(t1); return true; } return false; @@ -2860,18 +2769,14 @@ static bool opfv_widen_check(DisasContext *s, arg_rmr *a) static bool opxfv_widen_check(DisasContext *s, arg_rmr *a) { return opfv_widen_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } static bool opffv_widen_check(DisasContext *s, arg_rmr *a) { return opfv_widen_check(s, a) && - require_scale_rvf(s) && - (s->sew != MO_8) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + require_scale_rvfmin(s) && + (s->sew != MO_8); } #define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \ @@ -2922,9 +2827,7 @@ static bool opfxv_widen_check(DisasContext *s, arg_rmr *a) require_scale_rvf(s) && vext_check_isa_ill(s) && /* OPFV widening instructions ignore vs1 check */ - vext_check_ds(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_ds(s, a->rd, a->rs2, a->vm); } #define GEN_OPFXV_WIDEN_TRANS(NAME) \ @@ -2979,18 +2882,21 @@ static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a) { return opfv_narrow_check(s, a) && require_rvf(s) && - (s->sew != MO_64) && - require_zve32f(s) && - require_zve64f(s); + (s->sew != MO_64); } static bool opffv_narrow_check(DisasContext *s, arg_rmr *a) { return opfv_narrow_check(s, a) && + require_scale_rvfmin(s) && + (s->sew != MO_8); +} + +static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a) +{ + return opfv_narrow_check(s, a) && require_scale_rvf(s) && - (s->sew != MO_8) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + (s->sew != MO_8); } #define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \ @@ -3030,7 +2936,7 @@ GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w, GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w, RISCV_FRM_DYN) /* Reuse the helper function from vfncvt.f.f.w */ -GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w, +GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w, RISCV_FRM_ROD) static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) @@ -3039,9 +2945,7 @@ static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) require_scale_rvf(s) && vext_check_isa_ill(s) && /* OPFV narrowing instructions ignore vs1 check */ - vext_check_sd(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_sd(s, a->rd, a->rs2, a->vm); } #define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \ @@ -3115,9 +3019,7 @@ GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check) static bool freduction_check(DisasContext *s, arg_rmrr *a) { return reduction_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } GEN_OPFVV_TRANS(vfredusum_vs, freduction_check) @@ -3200,10 +3102,6 @@ static bool trans_vcpop_m(DisasContext *s, arg_rmr *a) gen_helper_vcpop_m(dst, mask, src2, cpu_env, desc); gen_set_gpr(s, a->rd, dst); - - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); - return true; } return false; @@ -3233,9 +3131,6 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a) gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc); gen_set_gpr(s, a->rd, dst); - - tcg_temp_free_ptr(mask); - tcg_temp_free_ptr(src2); return true; } return false; @@ -3430,8 +3325,6 @@ static void vec_element_loadx(DisasContext *s, TCGv_i64 dest, /* Perform the load. */ load_element(dest, base, vreg_ofs(s, vreg), s->sew, false); - tcg_temp_free_ptr(base); - tcg_temp_free_i32(ofs); /* Flush out-of-range indexing to zero. */ t_vlmax = tcg_constant_i64(vlmax); @@ -3440,8 +3333,6 @@ static void vec_element_loadx(DisasContext *s, TCGv_i64 dest, tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx, t_vlmax, dest, t_zero); - - tcg_temp_free_i64(t_idx); } static void vec_element_loadi(DisasContext *s, TCGv_i64 dest, @@ -3501,9 +3392,6 @@ static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a) vec_element_loadi(s, t1, a->rs2, 0, true); tcg_gen_trunc_i64_tl(dest, t1); gen_set_gpr(s, a->rd, dest); - tcg_temp_free_i64(t1); - tcg_temp_free(dest); - return true; } return false; @@ -3531,7 +3419,6 @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a) s1 = get_gpr(s, a->rs1, EXT_NONE); tcg_gen_ext_tl_i64(t1, s1); vec_element_storei(s, a->rd, 0, t1); - tcg_temp_free_i64(t1); mark_vs_dirty(s); gen_set_label(over); return true; @@ -3544,9 +3431,7 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s) && - require_zve32f(s) && - require_zve64f(s)) { + vext_check_isa_ill(s)) { gen_set_rm(s, RISCV_FRM_DYN); unsigned int ofs = (8 << s->sew); @@ -3572,9 +3457,7 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s) && - require_zve32f(s) && - require_zve64f(s)) { + vext_check_isa_ill(s)) { gen_set_rm(s, RISCV_FRM_DYN); /* The instructions ignore LMUL and vector register group. */ @@ -3590,7 +3473,6 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) do_nanbox(s, t1, cpu_fpr[a->rs1]); vec_element_storei(s, a->rd, 0, t1); - tcg_temp_free_i64(t1); mark_vs_dirty(s); gen_set_label(over); return true; @@ -3625,17 +3507,13 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) static bool fslideup_check(DisasContext *s, arg_rmrr *a) { return slideup_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } static bool fslidedown_check(DisasContext *s, arg_rmrr *a) { return slidedown_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) @@ -3703,7 +3581,6 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), dest); - tcg_temp_free_i64(dest); mark_vs_dirty(s); } else { static gen_helper_opivx * const fns[4] = { diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc index 2ad5716312..74dde37ff7 100644 --- a/target/riscv/insn_trans/trans_rvzfh.c.inc +++ b/target/riscv/insn_trans/trans_rvzfh.c.inc @@ -28,15 +28,14 @@ } \ } while (0) -#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \ - if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \ +#define REQUIRE_ZFHMIN(ctx) do { \ + if (!ctx->cfg_ptr->ext_zfhmin) { \ return false; \ } \ } while (0) -#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \ - if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \ - ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \ +#define REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx) do { \ + if (!(ctx->cfg_ptr->ext_zfhmin || ctx->cfg_ptr->ext_zhinxmin)) { \ return false; \ } \ } while (0) @@ -47,12 +46,12 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a) TCGv t0; REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); decode_save_opc(ctx); t0 = get_gpr(ctx, a->rs1, EXT_NONE); if (a->imm) { - TCGv temp = temp_new(ctx); + TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, t0, a->imm); t0 = temp; } @@ -70,7 +69,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a) TCGv t0; REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); decode_save_opc(ctx); t0 = get_gpr(ctx, a->rs1, EXT_NONE); @@ -257,9 +256,6 @@ static bool trans_fsgnj_h(DisasContext *ctx, arg_fsgnj_h *a) /* This formulation retains the nanboxing of rs2 in normal 'Zfh'. */ tcg_gen_deposit_i64(dest, rs2, rs1, 0, 15); - - tcg_temp_free_i64(rs1); - tcg_temp_free_i64(rs2); } else { tcg_gen_deposit_i64(dest, src2, src1, 0, 15); tcg_gen_ext16s_i64(dest, dest); @@ -303,20 +299,16 @@ static bool trans_fsgnjn_h(DisasContext *ctx, arg_fsgnjn_h *a) * Replace bit 15 in rs1 with inverse in rs2. * This formulation retains the nanboxing of rs1. */ - mask = tcg_const_i64(~MAKE_64BIT_MASK(15, 1)); + mask = tcg_constant_i64(~MAKE_64BIT_MASK(15, 1)); tcg_gen_not_i64(rs2, rs2); tcg_gen_andc_i64(rs2, rs2, mask); tcg_gen_and_i64(dest, mask, rs1); tcg_gen_or_i64(dest, dest, rs2); - - tcg_temp_free_i64(mask); - tcg_temp_free_i64(rs2); } /* signed-extended intead of nanboxing for result if enable zfinx */ if (ctx->cfg_ptr->ext_zfinx) { tcg_gen_ext16s_i64(dest, dest); } - tcg_temp_free_i64(rs1); mark_fs_dirty(ctx); return true; } @@ -356,14 +348,11 @@ static bool trans_fsgnjx_h(DisasContext *ctx, arg_fsgnjx_h *a) */ tcg_gen_andi_i64(dest, rs2, MAKE_64BIT_MASK(15, 1)); tcg_gen_xor_i64(dest, rs1, dest); - - tcg_temp_free_i64(rs2); } /* signed-extended intead of nanboxing for result if enable zfinx */ if (ctx->cfg_ptr->ext_zfinx) { tcg_gen_ext16s_i64(dest, dest); } - tcg_temp_free_i64(rs1); mark_fs_dirty(ctx); return true; } @@ -401,7 +390,7 @@ static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a) static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); @@ -418,7 +407,7 @@ static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a) static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); REQUIRE_ZDINX_OR_D(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); @@ -436,7 +425,7 @@ static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a) static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); @@ -452,7 +441,7 @@ static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a) static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); REQUIRE_ZDINX_OR_D(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); @@ -585,7 +574,7 @@ static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a) static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); TCGv dest = dest_gpr(ctx, a->rd); @@ -605,7 +594,7 @@ static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a) static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO); diff --git a/target/riscv/insn_trans/trans_rvzicbo.c.inc b/target/riscv/insn_trans/trans_rvzicbo.c.inc new file mode 100644 index 0000000000..7df9c30b58 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzicbo.c.inc @@ -0,0 +1,57 @@ +/* + * RISC-V translation routines for the RISC-V CBO Extension. + * + * Copyright (c) 2021 Philipp Tomsich, philipp.tomsich@vrull.eu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define REQUIRE_ZICBOM(ctx) do { \ + if (!ctx->cfg_ptr->ext_icbom) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZICBOZ(ctx) do { \ + if (!ctx->cfg_ptr->ext_icboz) { \ + return false; \ + } \ +} while (0) + +static bool trans_cbo_clean(DisasContext *ctx, arg_cbo_clean *a) +{ + REQUIRE_ZICBOM(ctx); + gen_helper_cbo_clean_flush(cpu_env, cpu_gpr[a->rs1]); + return true; +} + +static bool trans_cbo_flush(DisasContext *ctx, arg_cbo_flush *a) +{ + REQUIRE_ZICBOM(ctx); + gen_helper_cbo_clean_flush(cpu_env, cpu_gpr[a->rs1]); + return true; +} + +static bool trans_cbo_inval(DisasContext *ctx, arg_cbo_inval *a) +{ + REQUIRE_ZICBOM(ctx); + gen_helper_cbo_inval(cpu_env, cpu_gpr[a->rs1]); + return true; +} + +static bool trans_cbo_zero(DisasContext *ctx, arg_cbo_zero *a) +{ + REQUIRE_ZICBOZ(ctx); + gen_helper_cbo_zero(cpu_env, cpu_gpr[a->rs1]); + return true; +} diff --git a/target/riscv/insn_trans/trans_rvzicond.c.inc b/target/riscv/insn_trans/trans_rvzicond.c.inc new file mode 100644 index 0000000000..645260164e --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzicond.c.inc @@ -0,0 +1,49 @@ +/* + * RISC-V translation routines for the Zicond Standard Extension. + * + * Copyright (c) 2020-2023 PLCT Lab + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define REQUIRE_ZICOND(ctx) do { \ + if (!ctx->cfg_ptr->ext_zicond) { \ + return false; \ + } \ +} while (0) + +static bool trans_czero_eqz(DisasContext *ctx, arg_czero_eqz *a) +{ + REQUIRE_ZICOND(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + tcg_gen_movcond_tl(TCG_COND_EQ, dest, src2, ctx->zero, ctx->zero, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_czero_nez(DisasContext *ctx, arg_czero_nez *a) +{ + REQUIRE_ZICOND(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + tcg_gen_movcond_tl(TCG_COND_NE, dest, src2, ctx->zero, ctx->zero, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc index be87c34f56..df504c3f2c 100644 --- a/target/riscv/insn_trans/trans_xthead.c.inc +++ b/target/riscv/insn_trans/trans_xthead.c.inc @@ -100,10 +100,7 @@ static TCGv get_th_address_indexed(DisasContext *ctx, int rs1, int rs2, tcg_gen_shli_tl(offs, src2, imm2); } - TCGv addr = get_address_indexed(ctx, rs1, offs); - - tcg_temp_free(offs); - return addr; + return get_address_indexed(ctx, rs1, offs); } /* XTheadBa */ @@ -120,7 +117,6 @@ static void gen_th_addsl##SHAMT(TCGv ret, TCGv arg1, TCGv arg2) \ TCGv t = tcg_temp_new(); \ tcg_gen_shli_tl(t, arg2, SHAMT); \ tcg_gen_add_tl(ret, t, arg1); \ - tcg_temp_free(t); \ } GEN_TH_ADDSL(1) @@ -204,7 +200,6 @@ static bool gen_th_ff0(DisasContext *ctx, arg_th_ff0 *a, DisasExtend ext) gen_clz(dest, t); } - tcg_temp_free(t); gen_set_gpr(ctx, a->rd, dest); return true; @@ -469,7 +464,6 @@ static bool trans_th_fmv_hw_x(DisasContext *ctx, arg_th_fmv_hw_x *a) tcg_gen_extu_tl_i64(t1, src1); tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd], t1, 32, 32); - tcg_temp_free_i64(t1); mark_fs_dirty(ctx); return true; } @@ -489,7 +483,6 @@ static bool trans_th_fmv_x_hw(DisasContext *ctx, arg_th_fmv_x_hw *a) tcg_gen_extract_i64(t1, cpu_fpr[a->rs1], 32, 32); tcg_gen_trunc_i64_tl(dst, t1); gen_set_gpr(ctx, a->rd, dst); - tcg_temp_free_i64(t1); mark_fs_dirty(ctx); return true; } @@ -511,15 +504,12 @@ static bool gen_th_mac(DisasContext *ctx, arg_r *a, extend_operand_func(tmp, src1); extend_operand_func(tmp2, src2); tcg_gen_mul_tl(tmp, tmp, tmp2); - tcg_temp_free(tmp2); } else { tcg_gen_mul_tl(tmp, src1, src2); } accumulate_func(dest, src0, tmp); gen_set_gpr(ctx, a->rd, dest); - tcg_temp_free(tmp); - return true; } @@ -594,8 +584,6 @@ static bool gen_load_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop, tcg_gen_addi_tl(rs1, rs1, imm); gen_set_gpr(ctx, a->rd, rd); gen_set_gpr(ctx, a->rs1, rs1); - - tcg_temp_free(addr); return true; } @@ -615,8 +603,6 @@ static bool gen_store_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop, tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); tcg_gen_addi_tl(rs1, rs1, imm); gen_set_gpr(ctx, a->rs1, rs1); - - tcg_temp_free(addr); return true; } @@ -950,11 +936,6 @@ static bool gen_loadpair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop, tcg_gen_qemu_ld_tl(t2, addr2, ctx->mem_idx, memop); gen_set_gpr(ctx, a->rd1, t1); gen_set_gpr(ctx, a->rd2, t2); - - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(addr1); - tcg_temp_free(addr2); return true; } @@ -980,10 +961,6 @@ static bool trans_th_lwud(DisasContext *ctx, arg_th_pair *a) static bool gen_storepair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop, int shamt) { - if (a->rs == a->rd1 || a->rs == a->rd2 || a->rd1 == a->rd2) { - return false; - } - TCGv data1 = get_gpr(ctx, a->rd1, EXT_NONE); TCGv data2 = get_gpr(ctx, a->rd2, EXT_NONE); TCGv addr1 = tcg_temp_new(); @@ -995,9 +972,6 @@ static bool gen_storepair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop, tcg_gen_qemu_st_tl(data1, addr1, ctx->mem_idx, memop); tcg_gen_qemu_st_tl(data2, addr2, ctx->mem_idx, memop); - - tcg_temp_free(addr1); - tcg_temp_free(addr2); return true; } diff --git a/target/riscv/machine.c b/target/riscv/machine.c index c6ce318cce..9c455931d8 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -27,9 +27,8 @@ static bool pmp_needed(void *opaque) { RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; - return riscv_feature(env, RISCV_FEATURE_PMP); + return cpu->cfg.pmp; } static int pmp_post_load(void *opaque, int version_id) @@ -226,9 +225,8 @@ static const VMStateDescription vmstate_kvmtimer = { static bool debug_needed(void *opaque) { RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; - return riscv_feature(env, RISCV_FEATURE_DEBUG); + return cpu->cfg.debug; } static int debug_post_load(void *opaque, int version_id) @@ -333,8 +331,8 @@ static const VMStateDescription vmstate_pmu_ctr_state = { const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", - .version_id = 6, - .minimum_version_id = 6, + .version_id = 7, + .minimum_version_id = 7, .post_load = riscv_cpu_post_load, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), @@ -353,7 +351,6 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINT32(env.misa_ext, RISCVCPU), VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), - VMSTATE_UINT32(env.features, RISCVCPU), VMSTATE_UINTTL(env.priv, RISCVCPU), VMSTATE_UINTTL(env.virt, RISCVCPU), VMSTATE_UINT64(env.resetvec, RISCVCPU), diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c index 236f93b9f5..f36ddfa967 100644 --- a/target/riscv/monitor.c +++ b/target/riscv/monitor.c @@ -218,7 +218,7 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict) return; } - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { monitor_printf(mon, "S-mode MMU unavailable\n"); return; } diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 48f918b71b..84ee018f7d 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -3,6 +3,7 @@ * * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu * Copyright (c) 2017-2018 SiFive, Inc. + * Copyright (c) 2022 VRULL GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -123,6 +124,140 @@ target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, return int128_getlo(rv); } + +/* + * check_zicbo_envcfg + * + * Raise virtual exceptions and illegal instruction exceptions for + * Zicbo[mz] instructions based on the settings of [mhs]envcfg as + * specified in section 2.5.1 of the CMO specification. + */ +static void check_zicbo_envcfg(CPURISCVState *env, target_ulong envbits, + uintptr_t ra) +{ +#ifndef CONFIG_USER_ONLY + if ((env->priv < PRV_M) && !get_field(env->menvcfg, envbits)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); + } + + if (riscv_cpu_virt_enabled(env) && + (((env->priv < PRV_H) && !get_field(env->henvcfg, envbits)) || + ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)))) { + riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra); + } + + if ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)) { + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); + } +#endif +} + +void helper_cbo_zero(CPURISCVState *env, target_ulong address) +{ + RISCVCPU *cpu = env_archcpu(env); + uint16_t cbozlen = cpu->cfg.cboz_blocksize; + int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + void *mem; + + check_zicbo_envcfg(env, MENVCFG_CBZE, ra); + + /* Mask off low-bits to align-down to the cache-block. */ + address &= ~(cbozlen - 1); + + /* + * cbo.zero requires MMU_DATA_STORE access. Do a probe_write() + * to raise any exceptions, including PMP. + */ + mem = probe_write(env, address, cbozlen, mmu_idx, ra); + + if (likely(mem)) { + memset(mem, 0, cbozlen); + } else { + /* + * This means that we're dealing with an I/O page. Section 4.2 + * of cmobase v1.0.1 says: + * + * "Cache-block zero instructions store zeros independently + * of whether data from the underlying memory locations are + * cacheable." + * + * Write zeros in address + cbozlen regardless of not being + * a RAM page. + */ + for (int i = 0; i < cbozlen; i++) { + cpu_stb_mmuidx_ra(env, address + i, 0, mmu_idx, ra); + } + } +} + +/* + * check_zicbom_access + * + * Check access permissions (LOAD, STORE or FETCH as specified in + * section 2.5.2 of the CMO specification) for Zicbom, raising + * either store page-fault (non-virtualized) or store guest-page + * fault (virtualized). + */ +static void check_zicbom_access(CPURISCVState *env, + target_ulong address, + uintptr_t ra) +{ + RISCVCPU *cpu = env_archcpu(env); + int mmu_idx = cpu_mmu_index(env, false); + uint16_t cbomlen = cpu->cfg.cbom_blocksize; + void *phost; + int ret; + + /* Mask off low-bits to align-down to the cache-block. */ + address &= ~(cbomlen - 1); + + /* + * Section 2.5.2 of cmobase v1.0.1: + * + * "A cache-block management instruction is permitted to + * access the specified cache block whenever a load instruction + * or store instruction is permitted to access the corresponding + * physical addresses. If neither a load instruction nor store + * instruction is permitted to access the physical addresses, + * but an instruction fetch is permitted to access the physical + * addresses, whether a cache-block management instruction is + * permitted to access the cache block is UNSPECIFIED." + */ + ret = probe_access_flags(env, address, cbomlen, MMU_DATA_LOAD, + mmu_idx, true, &phost, ra); + if (ret != TLB_INVALID_MASK) { + /* Success: readable */ + return; + } + + /* + * Since not readable, must be writable. On failure, store + * fault/store guest amo fault will be raised by + * riscv_cpu_tlb_fill(). PMP exceptions will be caught + * there as well. + */ + probe_write(env, address, cbomlen, mmu_idx, ra); +} + +void helper_cbo_clean_flush(CPURISCVState *env, target_ulong address) +{ + uintptr_t ra = GETPC(); + check_zicbo_envcfg(env, MENVCFG_CBCFE, ra); + check_zicbom_access(env, address, ra); + + /* We don't emulate the cache-hierarchy, so we're done. */ +} + +void helper_cbo_inval(CPURISCVState *env, target_ulong address) +{ + uintptr_t ra = GETPC(); + check_zicbo_envcfg(env, MENVCFG_CBIE, ra); + check_zicbom_access(env, address, ra); + + /* We don't emulate the cache-hierarchy, so we're done. */ +} + #ifndef CONFIG_USER_ONLY target_ulong helper_sret(CPURISCVState *env) @@ -195,7 +330,7 @@ target_ulong helper_mret(CPURISCVState *env) uint64_t mstatus = env->mstatus; target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); - if (riscv_feature(env, RISCV_FEATURE_PMP) && + if (riscv_cpu_cfg(env)->pmp && !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); } diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 4bc4113531..a08cd95658 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -88,7 +88,7 @@ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) if (pmp_index < MAX_RISCV_PMPS) { bool locked = true; - if (riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { /* mseccfg.RLB is set */ if (MSECCFG_RLB_ISSET(env)) { locked = false; @@ -239,7 +239,7 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, { bool ret; - if (riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { if (MSECCFG_MMWP_ISSET(env)) { /* * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set @@ -265,7 +265,7 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, } } - if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) { + if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) { /* * Privileged spec v1.10 states if HW doesn't implement any PMP entry * or no PMP entry matches an M-Mode access, the access succeeds. @@ -315,7 +315,7 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, } if (size == 0) { - if (riscv_feature(env, RISCV_FEATURE_MMU)) { + if (riscv_cpu_cfg(env)->mmu) { /* * If size is unknown (0), assume that all bytes * from addr to the end of the page will be accessed. diff --git a/target/riscv/translate.c b/target/riscv/translate.c index f9d5d1097e..0ee8ee147d 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -101,14 +101,8 @@ typedef struct DisasContext { bool cfg_vta_all_1s; target_ulong vstart; bool vl_eq_vlmax; - uint8_t ntemp; CPUState *cs; TCGv zero; - /* Space for 3 operands plus 1 extra for address computation. */ - TCGv temp[4]; - /* Space for 4 operands(1 dest and <=3 src) for float point computation */ - TCGv_i64 ftemp[4]; - uint8_t nftemp; /* PointerMasking extension */ bool pm_mask_enabled; bool pm_base_enabled; @@ -207,12 +201,10 @@ static void gen_nanbox_h(TCGv_i64 out, TCGv_i64 in) */ static void gen_check_nanbox_h(TCGv_i64 out, TCGv_i64 in) { - TCGv_i64 t_max = tcg_const_i64(0xffffffffffff0000ull); - TCGv_i64 t_nan = tcg_const_i64(0xffffffffffff7e00ull); + TCGv_i64 t_max = tcg_constant_i64(0xffffffffffff0000ull); + TCGv_i64 t_nan = tcg_constant_i64(0xffffffffffff7e00ull); tcg_gen_movcond_i64(TCG_COND_GEU, out, in, t_max, in, t_nan); - tcg_temp_free_i64(t_max); - tcg_temp_free_i64(t_nan); } static void gen_check_nanbox_s(TCGv_i64 out, TCGv_i64 in) @@ -315,12 +307,6 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) * * Further, we may provide an extension for word operations. */ -static TCGv temp_new(DisasContext *ctx) -{ - assert(ctx->ntemp < ARRAY_SIZE(ctx->temp)); - return ctx->temp[ctx->ntemp++] = tcg_temp_new(); -} - static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext) { TCGv t; @@ -335,11 +321,11 @@ static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext) case EXT_NONE: break; case EXT_SIGN: - t = temp_new(ctx); + t = tcg_temp_new(); tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); return t; case EXT_ZERO: - t = temp_new(ctx); + t = tcg_temp_new(); tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); return t; default: @@ -367,7 +353,7 @@ static TCGv get_gprh(DisasContext *ctx, int reg_num) static TCGv dest_gpr(DisasContext *ctx, int reg_num) { if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) { - return temp_new(ctx); + return tcg_temp_new(); } return cpu_gpr[reg_num]; } @@ -375,7 +361,7 @@ static TCGv dest_gpr(DisasContext *ctx, int reg_num) static TCGv dest_gprh(DisasContext *ctx, int reg_num) { if (reg_num == 0) { - return temp_new(ctx); + return tcg_temp_new(); } return cpu_gprh[reg_num]; } @@ -431,12 +417,6 @@ static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh) } } -static TCGv_i64 ftemp_new(DisasContext *ctx) -{ - assert(ctx->nftemp < ARRAY_SIZE(ctx->ftemp)); - return ctx->ftemp[ctx->nftemp++] = tcg_temp_new_i64(); -} - static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num) { if (!ctx->cfg_ptr->ext_zfinx) { @@ -450,7 +430,7 @@ static TCGv_i64 get_fpr_hs(DisasContext *ctx, int reg_num) case MXL_RV32: #ifdef TARGET_RISCV32 { - TCGv_i64 t = ftemp_new(ctx); + TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(t, cpu_gpr[reg_num]); return t; } @@ -476,7 +456,7 @@ static TCGv_i64 get_fpr_d(DisasContext *ctx, int reg_num) switch (get_xl(ctx)) { case MXL_RV32: { - TCGv_i64 t = ftemp_new(ctx); + TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]); return t; } @@ -496,12 +476,12 @@ static TCGv_i64 dest_fpr(DisasContext *ctx, int reg_num) } if (reg_num == 0) { - return ftemp_new(ctx); + return tcg_temp_new_i64(); } switch (get_xl(ctx)) { case MXL_RV32: - return ftemp_new(ctx); + return tcg_temp_new_i64(); #ifdef TARGET_RISCV64 case MXL_RV64: return cpu_gpr[reg_num]; @@ -584,7 +564,7 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) /* Compute a canonical address from a register plus offset. */ static TCGv get_address(DisasContext *ctx, int rs1, int imm) { - TCGv addr = temp_new(ctx); + TCGv addr = tcg_temp_new(); TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); tcg_gen_addi_tl(addr, src1, imm); @@ -602,7 +582,7 @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm) /* Compute a canonical address from a register plus reg offset. */ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs) { - TCGv addr = temp_new(ctx); + TCGv addr = tcg_temp_new(); TCGv src1 = get_gpr(ctx, rs1, EXT_NONE); tcg_gen_add_tl(addr, src1, offs); @@ -639,7 +619,6 @@ static void mark_fs_dirty(DisasContext *ctx) tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - tcg_temp_free(tmp); } if (ctx->virt_enabled && ctx->mstatus_hs_fs != MSTATUS_FS) { @@ -650,7 +629,6 @@ static void mark_fs_dirty(DisasContext *ctx) tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_temp_free(tmp); } } #else @@ -675,7 +653,6 @@ static void mark_vs_dirty(DisasContext *ctx) tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - tcg_temp_free(tmp); } if (ctx->virt_enabled && ctx->mstatus_hs_vs != MSTATUS_VS) { @@ -686,7 +663,6 @@ static void mark_vs_dirty(DisasContext *ctx) tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); tcg_gen_ori_tl(tmp, tmp, MSTATUS_VS); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_temp_free(tmp); } } #else @@ -1037,7 +1013,6 @@ static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext, f128(dest, desth, src1, src1h, ext2); gen_set_gpr128(ctx, a->rd, dest, desth); } - tcg_temp_free(ext2); return true; } @@ -1103,7 +1078,9 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvh.c.inc" #include "insn_trans/trans_rvv.c.inc" #include "insn_trans/trans_rvb.c.inc" +#include "insn_trans/trans_rvzicond.c.inc" #include "insn_trans/trans_rvzawrs.c.inc" +#include "insn_trans/trans_rvzicbo.c.inc" #include "insn_trans/trans_rvzfh.c.inc" #include "insn_trans/trans_rvk.c.inc" #include "insn_trans/trans_privileged.c.inc" @@ -1205,10 +1182,6 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->misa_mxl_max = env->misa_mxl_max; ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); ctx->cs = cs; - ctx->ntemp = 0; - memset(ctx->temp, 0, sizeof(ctx->temp)); - ctx->nftemp = 0; - memset(ctx->ftemp, 0, sizeof(ctx->ftemp)); ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED); ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED); ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER); @@ -1233,23 +1206,11 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cpu->env_ptr; uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next); - int i; ctx->ol = ctx->xl; decode_opc(env, ctx, opcode16); ctx->base.pc_next = ctx->pc_succ_insn; - for (i = ctx->ntemp - 1; i >= 0; --i) { - tcg_temp_free(ctx->temp[i]); - ctx->temp[i] = NULL; - } - ctx->ntemp = 0; - for (i = ctx->nftemp - 1; i >= 0; --i) { - tcg_temp_free_i64(ctx->ftemp[i]); - ctx->ftemp[i] = NULL; - } - ctx->nftemp = 0; - /* Only the first insn within a TB is allowed to cross a page boundary. */ if (ctx->base.is_jmp == DISAS_NEXT) { if (ctx->itrigger || !is_same_page(&ctx->base, ctx->base.pc_next)) { @@ -1261,7 +1222,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next); int len = insn_len(next_insn); - if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) { + if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) { ctx->base.is_jmp = DISAS_TOO_MANY; } } diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 3073c54871..2423affe37 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -267,6 +267,28 @@ GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw) GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl) GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq) +static void vext_set_tail_elems_1s(CPURISCVState *env, target_ulong vl, + void *vd, uint32_t desc, uint32_t nf, + uint32_t esz, uint32_t max_elems) +{ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + uint32_t vta = vext_vta(desc); + uint32_t registers_used; + int k; + + for (k = 0; k < nf; ++k) { + vext_set_elems_1s(vd, vta, (k * max_elems + vl) * esz, + (k * max_elems + max_elems) * esz); + } + + if (nf * max_elems % total_elems != 0) { + registers_used = ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; + vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, + registers_used * vlenb); + } +} + /* *** stride: access vector element from strided memory */ @@ -281,8 +303,6 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, uint32_t nf = vext_nf(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); for (i = env->vstart; i < env->vl; i++, env->vstart++) { @@ -301,18 +321,8 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \ @@ -359,8 +369,6 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, uint32_t nf = vext_nf(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); /* load bytes from guest memory */ for (i = env->vstart; i < evl; i++, env->vstart++) { @@ -372,18 +380,8 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + evl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, evl, vd, desc, nf, esz, max_elems); } /* @@ -484,8 +482,6 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, uint32_t vm = vext_vm(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); /* load bytes from guest memory */ @@ -505,18 +501,8 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \ @@ -585,8 +571,6 @@ vext_ldff(void *vd, void *v0, target_ulong base, uint32_t vm = vext_vm(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); target_ulong addr, offset, remain; @@ -647,18 +631,8 @@ ProbeSuccess: } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \ @@ -697,7 +671,7 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, { uint32_t i, k, off, pos; uint32_t nf = vext_nf(desc); - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; uint32_t max_elems = vlenb >> log2_esz; k = env->vstart / max_elems; @@ -1167,7 +1141,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ @@ -1203,7 +1177,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ @@ -1402,7 +1376,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -1465,7 +1439,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4178,7 +4152,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4216,7 +4190,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4747,7 +4721,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t desc) \ { \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ int a, b; \ @@ -4834,7 +4808,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, { uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; - uint32_t total_elems = env_archcpu(env)->cfg.vlen; + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; uint32_t vta_all_1s = vext_vta_all_1s(desc); uint32_t vma = vext_vma(desc); int i; diff --git a/target/rx/translate.c b/target/rx/translate.c index af23876cb3..6624414739 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -429,7 +429,6 @@ static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a) mem = tcg_temp_new(); tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); rx_gen_st(a->sz, cpu_regs[a->rs], mem); - tcg_temp_free(mem); return true; } @@ -440,7 +439,6 @@ static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a) mem = tcg_temp_new(); tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); rx_gen_ld(a->sz, cpu_regs[a->rd], mem); - tcg_temp_free(mem); return true; } @@ -462,8 +460,6 @@ static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a) mem = tcg_temp_new(); tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); rx_gen_st(a->sz, imm, mem); - tcg_temp_free(imm); - tcg_temp_free(mem); return true; } @@ -474,7 +470,6 @@ static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a) mem = tcg_temp_new(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); rx_gen_ld(a->sz, cpu_regs[a->rd], mem); - tcg_temp_free(mem); return true; } @@ -485,7 +480,6 @@ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) mem = tcg_temp_new(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); rx_gen_st(a->sz, cpu_regs[a->rs], mem); - tcg_temp_free(mem); return true; } @@ -521,9 +515,7 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) rx_gen_ld(a->sz, tmp, addr); addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd); rx_gen_st(a->sz, tmp, addr); - tcg_temp_free(tmp); } - tcg_temp_free(mem); return true; } @@ -541,7 +533,6 @@ static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a) if (a->ad == 0) { tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } - tcg_temp_free(val); return true; } @@ -559,7 +550,6 @@ static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a) tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } tcg_gen_mov_i32(cpu_regs[a->rs], val); - tcg_temp_free(val); return true; } @@ -571,7 +561,6 @@ static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a) mem = tcg_temp_new(); tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); - tcg_temp_free(mem); return true; } @@ -592,7 +581,6 @@ static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a) mem = tcg_temp_new(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); - tcg_temp_free(mem); return true; } @@ -610,7 +598,6 @@ static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a) tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } tcg_gen_mov_i32(cpu_regs[a->rs], val); - tcg_temp_free(val); return true; } @@ -635,7 +622,6 @@ static bool trans_POPC(DisasContext *ctx, arg_POPC *a) val = tcg_temp_new(); pop(val); move_to_cr(ctx, val, a->cr); - tcg_temp_free(val); return true; } @@ -663,7 +649,6 @@ static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a) tcg_gen_mov_i32(val, cpu_regs[a->rs]); tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); rx_gen_st(a->sz, val, cpu_sp); - tcg_temp_free(val); return true; } @@ -677,8 +662,6 @@ static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a) rx_gen_ld(a->sz, val, addr); tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); rx_gen_st(a->sz, val, cpu_sp); - tcg_temp_free(mem); - tcg_temp_free(val); return true; } @@ -689,7 +672,6 @@ static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a) val = tcg_temp_new(); move_from_cr(ctx, val, a->cr, ctx->pc); push(val); - tcg_temp_free(val); return true; } @@ -717,7 +699,6 @@ static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a) tcg_gen_mov_i32(tmp, cpu_regs[a->rs]); tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]); tcg_gen_mov_i32(cpu_regs[a->rd], tmp); - tcg_temp_free(tmp); return true; } @@ -741,7 +722,6 @@ static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a) } tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd], 0, mi_to_mop(a->mi)); - tcg_temp_free(mem); return true; } @@ -753,8 +733,6 @@ static inline void stcond(TCGCond cond, int rd, int imm) _imm = tcg_const_i32(imm); tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z, _imm, cpu_regs[rd]); - tcg_temp_free(z); - tcg_temp_free(_imm); } /* stz #imm,rd */ @@ -785,12 +763,9 @@ static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a) tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0); addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd); rx_gen_st(a->sz, val, addr); - tcg_temp_free(val); - tcg_temp_free(mem); } else { tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0); } - tcg_temp_free(dc.temp); return true; } @@ -842,7 +817,6 @@ static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2) { TCGv imm = tcg_const_i32(src2); opr(cpu_regs[dst], cpu_regs[src], imm); - tcg_temp_free(imm); } static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx, @@ -852,7 +826,6 @@ static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx, mem = tcg_temp_new(); val = rx_load_source(ctx, mem, ld, mi, src); opr(cpu_regs[dst], cpu_regs[dst], val); - tcg_temp_free(mem); } static void rx_and(TCGv ret, TCGv arg1, TCGv arg2) @@ -1003,7 +976,6 @@ static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2) tcg_gen_xor_i32(z, arg1, arg2); tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z); tcg_gen_mov_i32(ret, cpu_psw_s); - tcg_temp_free(z); } /* adc #imm, rd */ @@ -1042,7 +1014,6 @@ static void rx_add(TCGv ret, TCGv arg1, TCGv arg2) tcg_gen_xor_i32(z, arg1, arg2); tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z); tcg_gen_mov_i32(ret, cpu_psw_s); - tcg_temp_free(z); } /* add #uimm4, rd */ @@ -1079,7 +1050,6 @@ static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2) temp = tcg_temp_new_i32(); tcg_gen_xor_i32(temp, arg1, arg2); tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp); - tcg_temp_free_i32(temp); /* CMP not required return */ if (ret) { tcg_gen_mov_i32(ret, cpu_psw_s); @@ -1097,7 +1067,6 @@ static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2) temp = tcg_temp_new(); tcg_gen_not_i32(temp, arg2); rx_adc(ret, arg1, temp); - tcg_temp_free(temp); } /* cmp #imm4, rs2 */ @@ -1165,8 +1134,6 @@ static void rx_abs(TCGv ret, TCGv arg1) zero = tcg_const_i32(0); tcg_gen_neg_i32(neg, arg1); tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1); - tcg_temp_free(neg); - tcg_temp_free(zero); } /* abs rd */ @@ -1239,7 +1206,6 @@ static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) } tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], imm); - tcg_temp_free(imm); return true; } @@ -1255,7 +1221,6 @@ static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], val); - tcg_temp_free(mem); return true; } @@ -1268,7 +1233,6 @@ static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) } tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], imm); - tcg_temp_free(imm); return true; } @@ -1284,7 +1248,6 @@ static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a) val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], val); - tcg_temp_free(mem); return true; } @@ -1381,8 +1344,6 @@ static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) gen_set_label(done); tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); - tcg_temp_free(count); - tcg_temp_free(tmp); return true; } @@ -1436,7 +1397,6 @@ static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith) tcg_gen_movi_i32(cpu_psw_o, 0); tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); - tcg_temp_free(count); } /* shar #imm:5, rd */ @@ -1480,7 +1440,6 @@ static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a) tcg_gen_mov_i32(cpu_psw_c, tmp); tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); - tcg_temp_free(tmp); return true; } @@ -1570,7 +1529,6 @@ static bool trans_REVW(DisasContext *ctx, arg_REVW *a) tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8); tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff); tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); - tcg_temp_free(tmp); return true; } @@ -1592,7 +1550,6 @@ static void rx_bcnd_main(DisasContext *ctx, int cd, int dst) gen_set_label(t); gen_goto_tb(ctx, 1, ctx->pc + dst); gen_set_label(done); - tcg_temp_free(dc.temp); break; case 14: /* always true case */ @@ -1642,7 +1599,6 @@ static inline void rx_save_pc(DisasContext *ctx) { TCGv pc = tcg_const_i32(ctx->base.pc_next); push(pc); - tcg_temp_free(pc); } /* jmp rs */ @@ -1726,7 +1682,6 @@ static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a) do { \ TCGv size = tcg_const_i32(a->sz); \ gen_helper_##op(cpu_env, size); \ - tcg_temp_free(size); \ } while (0) /* suntile.<bwl> */ @@ -1767,8 +1722,6 @@ static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2) tcg_gen_sari_i64(tmp1, tmp1, 16); tcg_gen_mul_i64(ret, tmp0, tmp1); tcg_gen_shli_i64(ret, ret, 16); - tcg_temp_free_i64(tmp0); - tcg_temp_free_i64(tmp1); } static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2) @@ -1782,8 +1735,6 @@ static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2) tcg_gen_ext16s_i64(tmp1, tmp1); tcg_gen_mul_i64(ret, tmp0, tmp1); tcg_gen_shli_i64(ret, ret, 16); - tcg_temp_free_i64(tmp0); - tcg_temp_free_i64(tmp1); } /* mulhi rs,rs2 */ @@ -1807,7 +1758,6 @@ static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a) tmp = tcg_temp_new_i64(); rx_mul64hi(tmp, a->rs, a->rs2); tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); - tcg_temp_free_i64(tmp); return true; } @@ -1818,7 +1768,6 @@ static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a) tmp = tcg_temp_new_i64(); rx_mul64lo(tmp, a->rs, a->rs2); tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); - tcg_temp_free_i64(tmp); return true; } @@ -1836,7 +1785,6 @@ static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a) rd64 = tcg_temp_new_i64(); tcg_gen_extract_i64(rd64, cpu_acc, 16, 32); tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64); - tcg_temp_free_i64(rd64); return true; } @@ -1847,7 +1795,6 @@ static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a) rs64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32); - tcg_temp_free_i64(rs64); return true; } @@ -1858,7 +1805,6 @@ static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a) rs64 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32); - tcg_temp_free_i64(rs64); return true; } @@ -1867,7 +1813,6 @@ static bool trans_RACW(DisasContext *ctx, arg_RACW *a) { TCGv imm = tcg_const_i32(a->imm + 1); gen_helper_racw(cpu_env, imm); - tcg_temp_free(imm); return true; } @@ -1883,8 +1828,6 @@ static bool trans_SAT(DisasContext *ctx, arg_SAT *a) tcg_gen_xori_i32(tmp, tmp, 0x80000000); tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd], cpu_psw_o, z, tmp, cpu_regs[a->rd]); - tcg_temp_free(tmp); - tcg_temp_free(z); return true; } @@ -1903,7 +1846,6 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) TCGv imm = tcg_const_i32(li(ctx, 0)); \ gen_helper_##op(cpu_regs[a->rd], cpu_env, \ cpu_regs[a->rd], imm); \ - tcg_temp_free(imm); \ return true; \ } \ static bool cat3(trans_, name, _mr)(DisasContext *ctx, \ @@ -1914,7 +1856,6 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ gen_helper_##op(cpu_regs[a->rd], cpu_env, \ cpu_regs[a->rd], val); \ - tcg_temp_free(mem); \ return true; \ } @@ -1925,7 +1866,6 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) mem = tcg_temp_new(); \ val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \ - tcg_temp_free(mem); \ return true; \ } @@ -1939,7 +1879,6 @@ static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a) { TCGv imm = tcg_const_i32(li(ctx, 0)); gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm); - tcg_temp_free(imm); return true; } @@ -1951,7 +1890,6 @@ static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a) mem = tcg_temp_new(); val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val); - tcg_temp_free(mem); return true; } @@ -1966,7 +1904,6 @@ static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a) mem = tcg_temp_new(); val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); gen_helper_itof(cpu_regs[a->rd], cpu_env, val); - tcg_temp_free(mem); return true; } @@ -1977,7 +1914,6 @@ static void rx_bsetm(TCGv mem, TCGv mask) rx_gen_ld(MO_8, val, mem); tcg_gen_or_i32(val, val, mask); rx_gen_st(MO_8, val, mem); - tcg_temp_free(val); } static void rx_bclrm(TCGv mem, TCGv mask) @@ -1987,7 +1923,6 @@ static void rx_bclrm(TCGv mem, TCGv mask) rx_gen_ld(MO_8, val, mem); tcg_gen_andc_i32(val, val, mask); rx_gen_st(MO_8, val, mem); - tcg_temp_free(val); } static void rx_btstm(TCGv mem, TCGv mask) @@ -1998,7 +1933,6 @@ static void rx_btstm(TCGv mem, TCGv mask) tcg_gen_and_i32(val, val, mask); tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); - tcg_temp_free(val); } static void rx_bnotm(TCGv mem, TCGv mask) @@ -2008,7 +1942,6 @@ static void rx_bnotm(TCGv mem, TCGv mask) rx_gen_ld(MO_8, val, mem); tcg_gen_xor_i32(val, val, mask); rx_gen_st(MO_8, val, mem); - tcg_temp_free(val); } static void rx_bsetr(TCGv reg, TCGv mask) @@ -2028,7 +1961,6 @@ static inline void rx_btstr(TCGv reg, TCGv mask) tcg_gen_and_i32(t0, reg, mask); tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); - tcg_temp_free(t0); } static inline void rx_bnotr(TCGv reg, TCGv mask) @@ -2045,8 +1977,6 @@ static inline void rx_bnotr(TCGv reg, TCGv mask) mask = tcg_const_i32(1 << a->imm); \ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ cat3(rx_, op, m)(addr, mask); \ - tcg_temp_free(mask); \ - tcg_temp_free(mem); \ return true; \ } \ static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ @@ -2055,7 +1985,6 @@ static inline void rx_bnotr(TCGv reg, TCGv mask) TCGv mask; \ mask = tcg_const_i32(1 << a->imm); \ cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ - tcg_temp_free(mask); \ return true; \ } \ static bool cat3(trans_, name, _rr)(DisasContext *ctx, \ @@ -2067,8 +1996,6 @@ static inline void rx_bnotr(TCGv reg, TCGv mask) tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \ tcg_gen_shl_i32(mask, mask, b); \ cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ - tcg_temp_free(mask); \ - tcg_temp_free(b); \ return true; \ } \ static bool cat3(trans_, name, _rm)(DisasContext *ctx, \ @@ -2082,9 +2009,6 @@ static inline void rx_bnotr(TCGv reg, TCGv mask) mem = tcg_temp_new(); \ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ cat3(rx_, op, m)(addr, mask); \ - tcg_temp_free(mem); \ - tcg_temp_free(mask); \ - tcg_temp_free(b); \ return true; \ } @@ -2103,8 +2027,6 @@ static inline void bmcnd_op(TCGv val, TCGCond cond, int pos) tcg_gen_andi_i32(val, val, ~(1 << pos)); tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0); tcg_gen_deposit_i32(val, val, bit, pos, 1); - tcg_temp_free(bit); - tcg_temp_free(dc.temp); } /* bmcnd #imm, dsp[rd] */ @@ -2117,8 +2039,6 @@ static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a) rx_gen_ld(MO_8, val, addr); bmcnd_op(val, a->cd, a->imm); rx_gen_st(MO_8, val, addr); - tcg_temp_free(val); - tcg_temp_free(mem); return true; } @@ -2210,7 +2130,6 @@ static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) imm = tcg_const_i32(a->imm); move_to_cr(ctx, imm, a->cr); - tcg_temp_free(imm); return true; } @@ -2238,7 +2157,6 @@ static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a) tcg_gen_mov_i32(psw, cpu_bpsw); gen_helper_set_psw_rte(cpu_env, psw); ctx->base.is_jmp = DISAS_EXIT; - tcg_temp_free(psw); } return true; } @@ -2253,7 +2171,6 @@ static bool trans_RTE(DisasContext *ctx, arg_RTE *a) pop(psw); gen_helper_set_psw_rte(cpu_env, psw); ctx->base.is_jmp = DISAS_EXIT; - tcg_temp_free(psw); } return true; } @@ -2276,7 +2193,6 @@ static bool trans_INT(DisasContext *ctx, arg_INT *a) vec = tcg_const_i32(a->imm); tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); gen_helper_rxint(cpu_env, vec); - tcg_temp_free(vec); ctx->base.is_jmp = DISAS_NORETURN; return true; } diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 811049ea28..21a57d5eb2 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -5886,9 +5886,14 @@ static void in2_a2(DisasContext *s, DisasOps *o) } #define SPEC_in2_a2 0 +static TCGv gen_ri2(DisasContext *s) +{ + return tcg_constant_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2); +} + static void in2_ri2(DisasContext *s, DisasOps *o) { - o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2); + o->in2 = gen_ri2(s); } #define SPEC_in2_ri2 0 @@ -5976,29 +5981,29 @@ static void in2_m2_64a(DisasContext *s, DisasOps *o) static void in2_mri2_16u(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_16u 0 static void in2_mri2_32s(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32s(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_32s 0 static void in2_mri2_32u(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld32u(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_32u 0 static void in2_mri2_64(DisasContext *s, DisasOps *o) { - in2_ri2(s, o); - tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); + o->in2 = tcg_temp_new_i64(); + tcg_gen_qemu_ld64(o->in2, gen_ri2(s), get_mem_index(s)); } #define SPEC_in2_mri2_64 0 diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 23563024e0..ad6de41712 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -196,7 +196,6 @@ static void gen_read_sr(TCGv dst) tcg_gen_or_i32(dst, dst, t0); tcg_gen_shli_i32(t0, cpu_sr_t, SR_T); tcg_gen_or_i32(dst, cpu_sr, t0); - tcg_temp_free_i32(t0); } static void gen_write_sr(TCGv src) @@ -499,7 +498,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4); tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x5000: /* mov.l @(disp,Rm),Rn */ @@ -508,7 +506,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0xe000: /* mov #imm,Rn */ @@ -531,14 +528,12 @@ static void _decode_opc(DisasContext * ctx) { TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); - tcg_temp_free(addr); } return; case 0xd000: /* mov.l @(disp,PC),Rn */ { TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); - tcg_temp_free(addr); } return; case 0x7000: /* add #imm,Rn */ @@ -590,7 +585,6 @@ static void _decode_opc(DisasContext * ctx) /* might cause re-execution */ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB); tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */ - tcg_temp_free(addr); } return; case 0x2005: /* mov.w Rm,@-Rn */ @@ -600,7 +594,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW | UNALIGN(ctx)); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); } return; case 0x2006: /* mov.l Rm,@-Rn */ @@ -610,7 +603,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL | UNALIGN(ctx)); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); } return; case 0x6004: /* mov.b @Rm+,Rn */ @@ -635,7 +627,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB); - tcg_temp_free(addr); } return; case 0x0005: /* mov.w Rm,@(R0,Rn) */ @@ -644,7 +635,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x0006: /* mov.l Rm,@(R0,Rn) */ @@ -653,7 +643,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x000c: /* mov.b @(R0,Rm),Rn */ @@ -661,7 +650,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB); - tcg_temp_free(addr); } return; case 0x000d: /* mov.w @(R0,Rm),Rn */ @@ -670,7 +658,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x000e: /* mov.l @(R0,Rm),Rn */ @@ -679,7 +666,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x6008: /* swap.b Rm,Rn */ @@ -687,7 +673,6 @@ static void _decode_opc(DisasContext * ctx) TCGv low = tcg_temp_new(); tcg_gen_bswap16_i32(low, REG(B7_4), 0); tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16); - tcg_temp_free(low); } return; case 0x6009: /* swap.w Rm,Rn */ @@ -701,8 +686,6 @@ static void _decode_opc(DisasContext * ctx) low = tcg_temp_new(); tcg_gen_shri_i32(low, REG(B11_8), 16); tcg_gen_or_i32(REG(B11_8), high, low); - tcg_temp_free(low); - tcg_temp_free(high); } return; case 0x300c: /* add Rm,Rn */ @@ -716,8 +699,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, REG(B11_8), t0, t1, cpu_sr_t); - tcg_temp_free(t0); - tcg_temp_free(t1); } return; case 0x300f: /* addv Rm,Rn */ @@ -730,11 +711,8 @@ static void _decode_opc(DisasContext * ctx) t2 = tcg_temp_new(); tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8)); tcg_gen_andc_i32(cpu_sr_t, t1, t2); - tcg_temp_free(t2); tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31); - tcg_temp_free(t1); tcg_gen_mov_i32(REG(B7_4), t0); - tcg_temp_free(t0); } return; case 0x2009: /* and Rm,Rn */ @@ -764,8 +742,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_andc_i32(cmp1, cmp1, cmp2); tcg_gen_andi_i32(cmp1, cmp1, 0x80808080); tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0); - tcg_temp_free(cmp2); - tcg_temp_free(cmp1); } return; case 0x2007: /* div0s Rm,Rn */ @@ -801,11 +777,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_xor_i32(t1, t1, t0); tcg_gen_xori_i32(cpu_sr_t, t1, 1); tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1); - - tcg_temp_free(zero); - tcg_temp_free(t2); - tcg_temp_free(t1); - tcg_temp_free(t0); } return; case 0x300d: /* dmuls.l Rm,Rn */ @@ -834,8 +805,6 @@ static void _decode_opc(DisasContext * ctx) arg1 = tcg_temp_new(); tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); gen_helper_macl(cpu_env, arg0, arg1); - tcg_temp_free(arg1); - tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); } @@ -848,8 +817,6 @@ static void _decode_opc(DisasContext * ctx) arg1 = tcg_temp_new(); tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); gen_helper_macw(cpu_env, arg0, arg1); - tcg_temp_free(arg1); - tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); } @@ -865,8 +832,6 @@ static void _decode_opc(DisasContext * ctx) arg1 = tcg_temp_new(); tcg_gen_ext16s_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); - tcg_temp_free(arg1); - tcg_temp_free(arg0); } return; case 0x200e: /* mulu.w Rm,Rn */ @@ -877,8 +842,6 @@ static void _decode_opc(DisasContext * ctx) arg1 = tcg_temp_new(); tcg_gen_ext16u_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); - tcg_temp_free(arg1); - tcg_temp_free(arg0); } return; case 0x600b: /* neg Rm,Rn */ @@ -892,7 +855,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, t0, t0, REG(B11_8), cpu_sr_t); tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); - tcg_temp_free(t0); } return; case 0x6007: /* not Rm,Rn */ @@ -921,10 +883,6 @@ static void _decode_opc(DisasContext * ctx) /* select between the two cases */ tcg_gen_movi_i32(t0, 0); tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } return; case 0x400d: /* shld Rm,Rn */ @@ -947,10 +905,6 @@ static void _decode_opc(DisasContext * ctx) /* select between the two cases */ tcg_gen_movi_i32(t0, 0); tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2); - - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_temp_free(t2); } return; case 0x3008: /* sub Rm,Rn */ @@ -965,8 +919,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, REG(B11_8), t0, t1, cpu_sr_t); tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); - tcg_temp_free(t0); - tcg_temp_free(t1); } return; case 0x300b: /* subv Rm,Rn */ @@ -979,11 +931,8 @@ static void _decode_opc(DisasContext * ctx) t2 = tcg_temp_new(); tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4)); tcg_gen_and_i32(t1, t1, t2); - tcg_temp_free(t2); tcg_gen_shri_i32(cpu_sr_t, t1, 31); - tcg_temp_free(t1); tcg_gen_mov_i32(REG(B11_8), t0); - tcg_temp_free(t0); } return; case 0x2008: /* tst Rm,Rn */ @@ -991,7 +940,6 @@ static void _decode_opc(DisasContext * ctx) TCGv val = tcg_temp_new(); tcg_gen_and_i32(val, REG(B7_4), REG(B11_8)); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_temp_free(val); } return; case 0x200a: /* xor Rm,Rn */ @@ -1014,7 +962,6 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ); - tcg_temp_free_i64(fp); } else { tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); } @@ -1025,7 +972,6 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ); gen_store_fpr64(ctx, fp, XHACK(B11_8)); - tcg_temp_free_i64(fp); } else { tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); } @@ -1036,7 +982,6 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ); gen_store_fpr64(ctx, fp, XHACK(B11_8)); - tcg_temp_free_i64(fp); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); } else { tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); @@ -1052,13 +997,11 @@ static void _decode_opc(DisasContext * ctx) gen_load_fpr64(ctx, fp, XHACK(B7_4)); tcg_gen_subi_i32(addr, REG(B11_8), 8); tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ); - tcg_temp_free_i64(fp); } else { tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); } tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); } return; case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */ @@ -1070,11 +1013,9 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ); gen_store_fpr64(ctx, fp, XHACK(B11_8)); - tcg_temp_free_i64(fp); } else { tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL); } - tcg_temp_free(addr); } return; case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */ @@ -1086,11 +1027,9 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, XHACK(B7_4)); tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ); - tcg_temp_free_i64(fp); } else { tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); } - tcg_temp_free(addr); } return; case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ @@ -1132,8 +1071,6 @@ static void _decode_opc(DisasContext * ctx) return; } gen_store_fpr64(ctx, fp0, B11_8); - tcg_temp_free_i64(fp0); - tcg_temp_free_i64(fp1); } else { switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ @@ -1185,8 +1122,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); tcg_gen_andi_i32(val, val, B7_0); tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); - tcg_temp_free(val); - tcg_temp_free(addr); } return; case 0x8b00: /* bf label */ @@ -1217,7 +1152,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB); - tcg_temp_free(addr); } return; case 0xc500: /* mov.w @(disp,GBR),R0 */ @@ -1225,7 +1159,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW); - tcg_temp_free(addr); } return; case 0xc600: /* mov.l @(disp,GBR),R0 */ @@ -1233,7 +1166,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL); - tcg_temp_free(addr); } return; case 0xc000: /* mov.b R0,@(disp,GBR) */ @@ -1241,7 +1173,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB); - tcg_temp_free(addr); } return; case 0xc100: /* mov.w R0,@(disp,GBR) */ @@ -1249,7 +1180,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW); - tcg_temp_free(addr); } return; case 0xc200: /* mov.l R0,@(disp,GBR) */ @@ -1257,7 +1187,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL); - tcg_temp_free(addr); } return; case 0x8000: /* mov.b R0,@(disp,Rn) */ @@ -1265,7 +1194,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB); - tcg_temp_free(addr); } return; case 0x8100: /* mov.w R0,@(disp,Rn) */ @@ -1274,7 +1202,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0x8400: /* mov.b @(disp,Rn),R0 */ @@ -1282,7 +1209,6 @@ static void _decode_opc(DisasContext * ctx) TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB); - tcg_temp_free(addr); } return; case 0x8500: /* mov.w @(disp,Rn),R0 */ @@ -1291,7 +1217,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | UNALIGN(ctx)); - tcg_temp_free(addr); } return; case 0xc700: /* mova @(disp,PC),R0 */ @@ -1310,8 +1235,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); tcg_gen_ori_i32(val, val, B7_0); tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); - tcg_temp_free(val); - tcg_temp_free(addr); } return; case 0xc300: /* trapa #imm */ @@ -1321,7 +1244,6 @@ static void _decode_opc(DisasContext * ctx) gen_save_cpu_state(ctx, true); imm = tcg_const_i32(B7_0); gen_helper_trapa(cpu_env, imm); - tcg_temp_free(imm); ctx->base.is_jmp = DISAS_NORETURN; } return; @@ -1330,7 +1252,6 @@ static void _decode_opc(DisasContext * ctx) TCGv val = tcg_temp_new(); tcg_gen_andi_i32(val, REG(0), B7_0); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_temp_free(val); } return; case 0xcc00: /* tst.b #imm,@(R0,GBR) */ @@ -1340,7 +1261,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB); tcg_gen_andi_i32(val, val, B7_0); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_temp_free(val); } return; case 0xca00: /* xor #imm,R0 */ @@ -1355,8 +1275,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); tcg_gen_xori_i32(val, val, B7_0); tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); - tcg_temp_free(val); - tcg_temp_free(addr); } return; } @@ -1382,7 +1300,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); } return; } @@ -1430,7 +1347,6 @@ static void _decode_opc(DisasContext * ctx) TCGv val = tcg_temp_new(); tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3); gen_write_sr(val); - tcg_temp_free(val); ctx->base.is_jmp = DISAS_STOP; } return; @@ -1441,7 +1357,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL); tcg_gen_andi_i32(val, val, 0x700083f3); gen_write_sr(val); - tcg_temp_free(val); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); ctx->base.is_jmp = DISAS_STOP; } @@ -1459,8 +1374,6 @@ static void _decode_opc(DisasContext * ctx) gen_read_sr(val); tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(val); - tcg_temp_free(addr); } return; #define LD(reg,ldnum,ldpnum,prechk) \ @@ -1485,7 +1398,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_subi_i32(addr, REG(B11_8), 4); \ tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \ tcg_gen_mov_i32(REG(B11_8), addr); \ - tcg_temp_free(addr); \ } \ return; #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \ @@ -1514,7 +1426,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); gen_helper_ld_fpscr(cpu_env, addr); - tcg_temp_free(addr); ctx->base.is_jmp = DISAS_STOP; } return; @@ -1532,8 +1443,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); tcg_gen_mov_i32(REG(B11_8), addr); - tcg_temp_free(addr); - tcg_temp_free(val); } return; case 0x00c3: /* movca.l R0,@Rm */ @@ -1542,7 +1451,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL); gen_helper_movcal(cpu_env, REG(B11_8), val); tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); - tcg_temp_free(val); } ctx->has_movcal = 1; return; @@ -1586,7 +1494,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value, REG(0), ctx->memidx, MO_TEUL); tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value); - tcg_temp_free(tmp); } else { tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail); tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); @@ -1617,7 +1524,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); tcg_gen_mov_i32(cpu_lock_value, REG(0)); tcg_gen_mov_i32(cpu_lock_addr, tmp); - tcg_temp_free(tmp); } else { tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); tcg_gen_movi_i32(cpu_lock_addr, 0); @@ -1653,7 +1559,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp); - tcg_temp_free(tmp); } return; case 0x4025: /* rotcr Rn */ @@ -1663,7 +1568,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp); - tcg_temp_free(tmp); } return; case 0x4004: /* rotl Rn */ @@ -1711,7 +1615,6 @@ static void _decode_opc(DisasContext * ctx) tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val, ctx->memidx, MO_UB); tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); - tcg_temp_free(val); } return; case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ @@ -1732,7 +1635,6 @@ static void _decode_opc(DisasContext * ctx) fp = tcg_temp_new_i64(); gen_helper_float_DT(fp, cpu_env, cpu_fpul); gen_store_fpr64(ctx, fp, B11_8); - tcg_temp_free_i64(fp); } else { gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul); @@ -1748,7 +1650,6 @@ static void _decode_opc(DisasContext * ctx) fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, B11_8); gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp); - tcg_temp_free_i64(fp); } else { gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8)); @@ -1772,7 +1673,6 @@ static void _decode_opc(DisasContext * ctx) gen_load_fpr64(ctx, fp, B11_8); gen_helper_fsqrt_DT(fp, cpu_env, fp); gen_store_fpr64(ctx, fp, B11_8); - tcg_temp_free_i64(fp); } else { gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8)); } @@ -1798,7 +1698,6 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul); gen_store_fpr64(ctx, fp, B11_8); - tcg_temp_free_i64(fp); } return; case 0xf0bd: /* fcnvds DRn,FPUL */ @@ -1807,7 +1706,6 @@ static void _decode_opc(DisasContext * ctx) TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp, B11_8); gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp); - tcg_temp_free_i64(fp); } return; case 0xf0ed: /* fipr FVm,FVn */ @@ -1817,8 +1715,6 @@ static void _decode_opc(DisasContext * ctx) TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3); TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3); gen_helper_fipr(cpu_env, m, n); - tcg_temp_free(m); - tcg_temp_free(n); return; } break; @@ -1831,7 +1727,6 @@ static void _decode_opc(DisasContext * ctx) } TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3); gen_helper_ftrv(cpu_env, n); - tcg_temp_free(n); return; } break; @@ -2220,11 +2115,6 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env) g_assert_not_reached(); } - /* If op_src is not a valid register, then op_arg was a constant. */ - if (op_src < 0 && op_arg) { - tcg_temp_free_i32(op_arg); - } - /* The entire region has been translated. */ ctx->envflags &= ~TB_FLAG_GUSA_MASK; ctx->base.pc_next = pc_end; diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index 6e7f46f847..453498c670 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -64,10 +64,9 @@ static const int perm_table[2][8] = { } }; -static int get_physical_address(CPUSPARCState *env, hwaddr *physical, - int *prot, int *access_index, MemTxAttrs *attrs, - target_ulong address, int rw, int mmu_idx, - target_ulong *page_size) +static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, + int *access_index, target_ulong address, + int rw, int mmu_idx) { int access_perms = 0; hwaddr pde_ptr; @@ -80,20 +79,20 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, is_user = mmu_idx == MMU_USER_IDX; if (mmu_idx == MMU_PHYS_IDX) { - *page_size = TARGET_PAGE_SIZE; + full->lg_page_size = TARGET_PAGE_BITS; /* Boot mode: instruction fetches are taken from PROM */ if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) { - *physical = env->prom_addr | (address & 0x7ffffULL); - *prot = PAGE_READ | PAGE_EXEC; + full->phys_addr = env->prom_addr | (address & 0x7ffffULL); + full->prot = PAGE_READ | PAGE_EXEC; return 0; } - *physical = address; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + full->phys_addr = address; + full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); - *physical = 0xffffffffffff0000ULL; + full->phys_addr = 0xffffffffffff0000ULL; /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ /* Context base + context number */ @@ -157,16 +156,17 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, case 2: /* L3 PTE */ page_offset = 0; } - *page_size = TARGET_PAGE_SIZE; + full->lg_page_size = TARGET_PAGE_BITS; break; case 2: /* L2 PTE */ page_offset = address & 0x3f000; - *page_size = 0x40000; + full->lg_page_size = 18; } break; case 2: /* L1 PTE */ page_offset = address & 0xfff000; - *page_size = 0x1000000; + full->lg_page_size = 24; + break; } } @@ -188,16 +188,16 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, } /* the page can be put in the TLB */ - *prot = perm_table[is_user][access_perms]; + full->prot = perm_table[is_user][access_perms]; if (!(pde & PG_MODIFIED_MASK)) { /* only set write access if already dirty... otherwise wait for dirty access */ - *prot &= ~PAGE_WRITE; + full->prot &= ~PAGE_WRITE; } /* Even if large ptes, we map only one 4KB page in the cache to avoid filling it too fast */ - *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; + full->phys_addr = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; return error_code; } @@ -208,11 +208,9 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; - hwaddr paddr; + CPUTLBEntryFull full = {}; target_ulong vaddr; - target_ulong page_size; - int error_code = 0, prot, access_index; - MemTxAttrs attrs = {}; + int error_code = 0, access_index; /* * TODO: If we ever need tlb_vaddr_to_host for this target, @@ -223,16 +221,15 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, assert(!probe); address &= TARGET_PAGE_MASK; - error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, - address, access_type, - mmu_idx, &page_size); + error_code = get_physical_address(env, &full, &access_index, + address, access_type, mmu_idx); vaddr = address; if (likely(error_code == 0)) { qemu_log_mask(CPU_LOG_MMU, "Translate at %" VADDR_PRIx " -> " HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n", - address, paddr, vaddr); - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); + address, full.phys_addr, vaddr); + tlb_set_page_full(cs, mmu_idx, vaddr, &full); return true; } @@ -247,8 +244,8 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, permissions. If no mapping is available, redirect accesses to neverland. Fake/overridden mappings will be flushed when switching to normal mode. */ - prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); + full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + tlb_set_page_full(cs, mmu_idx, vaddr, &full); return true; } else { if (access_type == MMU_INST_FETCH) { @@ -545,8 +542,7 @@ static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) return sfsr; } -static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, - int *prot, MemTxAttrs *attrs, +static int get_physical_address_data(CPUSPARCState *env, CPUTLBEntryFull *full, target_ulong address, int rw, int mmu_idx) { CPUState *cs = env_cpu(env); @@ -579,11 +575,12 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, for (i = 0; i < 64; i++) { /* ctx match, vaddr match, valid? */ - if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { + if (ultrasparc_tag_match(&env->dtlb[i], address, context, + &full->phys_addr)) { int do_fault = 0; if (TTE_IS_IE(env->dtlb[i].tte)) { - attrs->byte_swap = true; + full->attrs.byte_swap = true; } /* access ok? */ @@ -616,9 +613,9 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, } if (!do_fault) { - *prot = PAGE_READ; + full->prot = PAGE_READ; if (TTE_IS_W_OK(env->dtlb[i].tte)) { - *prot |= PAGE_WRITE; + full->prot |= PAGE_WRITE; } TTE_SET_USED(env->dtlb[i].tte); @@ -645,8 +642,7 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, return 1; } -static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, - int *prot, MemTxAttrs *attrs, +static int get_physical_address_code(CPUSPARCState *env, CPUTLBEntryFull *full, target_ulong address, int mmu_idx) { CPUState *cs = env_cpu(env); @@ -681,7 +677,7 @@ static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, for (i = 0; i < 64; i++) { /* ctx match, vaddr match, valid? */ if (ultrasparc_tag_match(&env->itlb[i], - address, context, physical)) { + address, context, &full->phys_addr)) { /* access ok? */ if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { /* Fault status register */ @@ -708,7 +704,7 @@ static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, return 1; } - *prot = PAGE_EXEC; + full->prot = PAGE_EXEC; TTE_SET_USED(env->itlb[i].tte); return 0; } @@ -722,14 +718,13 @@ static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical, return 1; } -static int get_physical_address(CPUSPARCState *env, hwaddr *physical, - int *prot, int *access_index, MemTxAttrs *attrs, - target_ulong address, int rw, int mmu_idx, - target_ulong *page_size) +static int get_physical_address(CPUSPARCState *env, CPUTLBEntryFull *full, + int *access_index, target_ulong address, + int rw, int mmu_idx) { /* ??? We treat everything as a small page, then explicitly flush everything when an entry is evicted. */ - *page_size = TARGET_PAGE_SIZE; + full->lg_page_size = TARGET_PAGE_BITS; /* safety net to catch wrong softmmu index use from dynamic code */ if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { @@ -747,17 +742,15 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical, } if (mmu_idx == MMU_PHYS_IDX) { - *physical = ultrasparc_truncate_physical(address); - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + full->phys_addr = ultrasparc_truncate_physical(address); + full->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } if (rw == 2) { - return get_physical_address_code(env, physical, prot, attrs, address, - mmu_idx); + return get_physical_address_code(env, full, address, mmu_idx); } else { - return get_physical_address_data(env, physical, prot, attrs, address, - rw, mmu_idx); + return get_physical_address_data(env, full, address, rw, mmu_idx); } } @@ -768,25 +761,17 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; - target_ulong vaddr; - hwaddr paddr; - target_ulong page_size; - MemTxAttrs attrs = {}; - int error_code = 0, prot, access_index; + CPUTLBEntryFull full = {}; + int error_code = 0, access_index; address &= TARGET_PAGE_MASK; - error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs, - address, access_type, - mmu_idx, &page_size); + error_code = get_physical_address(env, &full, &access_index, + address, access_type, mmu_idx); if (likely(error_code == 0)) { - vaddr = address; - - trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, + trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl, env->dmmu.mmu_primary_context, env->dmmu.mmu_secondary_context); - - tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx, - page_size); + tlb_set_page_full(cs, mmu_idx, address, &full); return true; } if (probe) { @@ -888,12 +873,14 @@ void dump_mmu(CPUSPARCState *env) static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, target_ulong addr, int rw, int mmu_idx) { - target_ulong page_size; - int prot, access_index; - MemTxAttrs attrs = {}; + CPUTLBEntryFull full = {}; + int access_index, ret; - return get_physical_address(env, phys, &prot, &access_index, &attrs, addr, - rw, mmu_idx, &page_size); + ret = get_physical_address(env, &full, &access_index, addr, rw, mmu_idx); + if (ret == 0) { + *phys = full.phys_addr; + } + return ret; } #if defined(TARGET_SPARC64) diff --git a/target/sparc/translate.c b/target/sparc/translate.c index 3b0044aa66..5ee293326c 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -84,10 +84,6 @@ typedef struct DisasContext { uint32_t cc_op; /* current CC operation */ sparc_def_t *def; - TCGv_i32 t32[3]; - TCGv ttl[5]; - int n_t32; - int n_ttl; #ifdef TARGET_SPARC64 int fprs_dirty; int asi; @@ -97,7 +93,6 @@ typedef struct DisasContext { typedef struct { TCGCond cond; bool is_bool; - bool g1, g2; TCGv c1, c2; } DisasCompare; @@ -131,22 +126,6 @@ static int sign_extend(int x, int len) #define IS_IMM (insn & (1<<13)) -static inline TCGv_i32 get_temp_i32(DisasContext *dc) -{ - TCGv_i32 t; - assert(dc->n_t32 < ARRAY_SIZE(dc->t32)); - dc->t32[dc->n_t32++] = t = tcg_temp_new_i32(); - return t; -} - -static inline TCGv get_temp_tl(DisasContext *dc) -{ - TCGv t; - assert(dc->n_ttl < ARRAY_SIZE(dc->ttl)); - dc->ttl[dc->n_ttl++] = t = tcg_temp_new(); - return t; -} - static inline void gen_update_fprs_dirty(DisasContext *dc, int rd) { #if defined(TARGET_SPARC64) @@ -163,7 +142,7 @@ static inline void gen_update_fprs_dirty(DisasContext *dc, int rd) /* floating point registers moves */ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) { - TCGv_i32 ret = get_temp_i32(dc); + TCGv_i32 ret = tcg_temp_new_i32(); if (src & 1) { tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]); } else { @@ -179,13 +158,12 @@ static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) tcg_gen_extu_i32_i64(t, v); tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t, (dst & 1 ? 0 : 32), 32); - tcg_temp_free_i64(t); gen_update_fprs_dirty(dc, dst); } static TCGv_i32 gen_dest_fpr_F(DisasContext *dc) { - return get_temp_i32(dc); + return tcg_temp_new_i32(); } static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) @@ -301,7 +279,7 @@ static inline TCGv gen_load_gpr(DisasContext *dc, int reg) assert(reg < 32); return cpu_regs[reg]; } else { - TCGv t = get_temp_tl(dc); + TCGv t = tcg_temp_new(); tcg_gen_movi_tl(t, 0); return t; } @@ -321,7 +299,7 @@ static inline TCGv gen_dest_gpr(DisasContext *dc, int reg) assert(reg < 32); return cpu_regs[reg]; } else { - return get_temp_tl(dc); + return tcg_temp_new(); } } @@ -399,11 +377,6 @@ static TCGv_i32 gen_add32_carry32(void) carry_32 = tcg_temp_new_i32(); tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); -#if TARGET_LONG_BITS == 64 - tcg_temp_free_i32(cc_src1_32); - tcg_temp_free_i32(cc_src2_32); -#endif - return carry_32; } @@ -425,11 +398,6 @@ static TCGv_i32 gen_sub32_carry32(void) carry_32 = tcg_temp_new_i32(); tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); -#if TARGET_LONG_BITS == 64 - tcg_temp_free_i32(cc_src1_32); - tcg_temp_free_i32(cc_src2_32); -#endif - return carry_32; } @@ -460,7 +428,6 @@ static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, generated the carry in the first place. */ carry = tcg_temp_new(); tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - tcg_temp_free(carry); goto add_done; } carry_32 = gen_add32_carry32(); @@ -489,11 +456,6 @@ static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, tcg_gen_add_tl(dst, src1, src2); tcg_gen_add_tl(dst, dst, carry); - tcg_temp_free_i32(carry_32); -#if TARGET_LONG_BITS == 64 - tcg_temp_free(carry); -#endif - add_done: if (update_cc) { tcg_gen_mov_tl(cpu_cc_src, src1); @@ -545,7 +507,6 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, generated the carry in the first place. */ carry = tcg_temp_new(); tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); - tcg_temp_free(carry); goto sub_done; } carry_32 = gen_sub32_carry32(); @@ -568,11 +529,6 @@ static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, tcg_gen_sub_tl(dst, src1, src2); tcg_gen_sub_tl(dst, dst, carry); - tcg_temp_free_i32(carry_32); -#if TARGET_LONG_BITS == 64 - tcg_temp_free(carry); -#endif - sub_done: if (update_cc) { tcg_gen_mov_tl(cpu_cc_src, src1); @@ -594,13 +550,12 @@ static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) if (!(env->y & 1)) T1 = 0; */ - zero = tcg_const_tl(0); + zero = tcg_constant_tl(0); tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff); tcg_gen_andi_tl(r_temp, cpu_y, 0x1); tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero, zero, cpu_cc_src2); - tcg_temp_free(zero); // b2 = T0 & 1; // env->y = (b2 << 31) | (env->y >> 1); @@ -611,14 +566,12 @@ static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) gen_mov_reg_N(t0, cpu_psr); gen_mov_reg_V(r_temp, cpu_psr); tcg_gen_xor_tl(t0, t0, r_temp); - tcg_temp_free(r_temp); // T0 = (b1 << 31) | (T0 >> 1); // src1 = T0; tcg_gen_shli_tl(t0, t0, 31); tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0); - tcg_temp_free(t0); tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2); @@ -646,9 +599,6 @@ static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext) } tcg_gen_mul_i64(dst, t0, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - tcg_gen_shri_i64(cpu_y, dst, 32); #endif } @@ -686,7 +636,6 @@ static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src) tcg_gen_xor_tl(dst, dst, t0); gen_mov_reg_Z(t0, src); tcg_gen_or_tl(dst, dst, t0); - tcg_temp_free(t0); } // N ^ V @@ -696,7 +645,6 @@ static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src) gen_mov_reg_V(t0, src); gen_mov_reg_N(dst, src); tcg_gen_xor_tl(dst, dst, t0); - tcg_temp_free(t0); } // C | Z @@ -706,7 +654,6 @@ static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src) gen_mov_reg_Z(t0, src); gen_mov_reg_C(dst, src); tcg_gen_or_tl(dst, dst, t0); - tcg_temp_free(t0); } // C @@ -811,7 +758,6 @@ static inline void gen_op_eval_fbne(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_or_tl(dst, dst, t0); - tcg_temp_free(t0); } // 1 or 2: FCC0 ^ FCC1 @@ -822,7 +768,6 @@ static inline void gen_op_eval_fblg(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_xor_tl(dst, dst, t0); - tcg_temp_free(t0); } // 1 or 3: FCC0 @@ -840,7 +785,6 @@ static inline void gen_op_eval_fbl(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_andc_tl(dst, dst, t0); - tcg_temp_free(t0); } // 2 or 3: FCC1 @@ -858,7 +802,6 @@ static inline void gen_op_eval_fbg(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_andc_tl(dst, t0, dst); - tcg_temp_free(t0); } // 3: FCC0 & FCC1 @@ -869,7 +812,6 @@ static inline void gen_op_eval_fbu(TCGv dst, TCGv src, gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_and_tl(dst, dst, t0); - tcg_temp_free(t0); } // 0: !(FCC0 | FCC1) @@ -881,7 +823,6 @@ static inline void gen_op_eval_fbe(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_or_tl(dst, dst, t0); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } // 0 or 3: !(FCC0 ^ FCC1) @@ -893,7 +834,6 @@ static inline void gen_op_eval_fbue(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_xor_tl(dst, dst, t0); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } // 0 or 2: !FCC0 @@ -913,7 +853,6 @@ static inline void gen_op_eval_fbuge(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_andc_tl(dst, dst, t0); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } // 0 or 1: !FCC1 @@ -933,7 +872,6 @@ static inline void gen_op_eval_fbule(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_andc_tl(dst, t0, dst); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } // !3: !(FCC0 & FCC1) @@ -945,7 +883,6 @@ static inline void gen_op_eval_fbo(TCGv dst, TCGv src, gen_mov_reg_FCC1(t0, src, fcc_offset); tcg_gen_and_tl(dst, dst, t0); tcg_gen_xori_tl(dst, dst, 0x1); - tcg_temp_free(t0); } static inline void gen_branch2(DisasContext *dc, target_ulong pc1, @@ -991,11 +928,9 @@ static void gen_branch_n(DisasContext *dc, target_ulong pc1) tcg_gen_mov_tl(cpu_pc, cpu_npc); tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); - t = tcg_const_tl(pc1); - z = tcg_const_tl(0); + t = tcg_constant_tl(pc1); + z = tcg_constant_tl(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc); - tcg_temp_free(t); - tcg_temp_free(z); dc->pc = DYNAMIC_PC; } @@ -1003,15 +938,11 @@ static void gen_branch_n(DisasContext *dc, target_ulong pc1) static inline void gen_generic_branch(DisasContext *dc) { - TCGv npc0 = tcg_const_tl(dc->jump_pc[0]); - TCGv npc1 = tcg_const_tl(dc->jump_pc[1]); - TCGv zero = tcg_const_tl(0); + TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]); + TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]); + TCGv zero = tcg_constant_tl(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1); - - tcg_temp_free(npc0); - tcg_temp_free(npc1); - tcg_temp_free(zero); } /* call this function before using the condition register as it may @@ -1050,20 +981,14 @@ static inline void save_state(DisasContext *dc) static void gen_exception(DisasContext *dc, int which) { - TCGv_i32 t; - save_state(dc); - t = tcg_const_i32(which); - gen_helper_raise_exception(cpu_env, t); - tcg_temp_free_i32(t); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(which)); dc->base.is_jmp = DISAS_NORETURN; } static void gen_check_align(TCGv addr, int mask) { - TCGv_i32 r_mask = tcg_const_i32(mask); - gen_helper_check_align(cpu_env, addr, r_mask); - tcg_temp_free_i32(r_mask); + gen_helper_check_align(cpu_env, addr, tcg_constant_i32(mask)); } static inline void gen_mov_pc_npc(DisasContext *dc) @@ -1086,16 +1011,6 @@ static inline void gen_op_next_insn(void) tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); } -static void free_compare(DisasCompare *cmp) -{ - if (!cmp->g1) { - tcg_temp_free(cmp->c1); - } - if (!cmp->g2) { - tcg_temp_free(cmp->c2); - } -} - static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, DisasContext *dc) { @@ -1155,17 +1070,14 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, cmp->cond = logic_cond[cond]; do_compare_dst_0: cmp->is_bool = false; - cmp->g2 = false; - cmp->c2 = tcg_const_tl(0); + cmp->c2 = tcg_constant_tl(0); #ifdef TARGET_SPARC64 if (!xcc) { - cmp->g1 = false; cmp->c1 = tcg_temp_new(); tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst); break; } #endif - cmp->g1 = true; cmp->c1 = cpu_cc_dst; break; @@ -1187,7 +1099,6 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, if (!xcc) { /* Note that sign-extension works for unsigned compares as long as both operands are sign-extended. */ - cmp->g1 = cmp->g2 = false; cmp->c1 = tcg_temp_new(); cmp->c2 = tcg_temp_new(); tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src); @@ -1195,7 +1106,6 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, break; } #endif - cmp->g1 = cmp->g2 = true; cmp->c1 = cpu_cc_src; cmp->c2 = cpu_cc_src2; break; @@ -1212,9 +1122,8 @@ static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, /* We're going to generate a boolean result. */ cmp->cond = TCG_COND_NE; cmp->is_bool = true; - cmp->g1 = cmp->g2 = false; cmp->c1 = r_dst = tcg_temp_new(); - cmp->c2 = tcg_const_tl(0); + cmp->c2 = tcg_constant_tl(0); switch (cond) { case 0x0: @@ -1278,9 +1187,8 @@ static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond) /* For now we still generate a straight boolean result. */ cmp->cond = TCG_COND_NE; cmp->is_bool = true; - cmp->g1 = cmp->g2 = false; cmp->c1 = r_dst = tcg_temp_new(); - cmp->c2 = tcg_const_tl(0); + cmp->c2 = tcg_constant_tl(0); switch (cc) { default: @@ -1362,8 +1270,6 @@ static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond, } else { tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); } - - free_compare(&cmp); } static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond) @@ -1377,8 +1283,6 @@ static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond) } else { tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); } - - free_compare(&cmp); } #ifdef TARGET_SPARC64 @@ -1398,10 +1302,8 @@ static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) { cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]); cmp->is_bool = false; - cmp->g1 = true; - cmp->g2 = false; cmp->c1 = r_src; - cmp->c2 = tcg_const_tl(0); + cmp->c2 = tcg_constant_tl(0); } static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src) @@ -1411,8 +1313,6 @@ static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src) /* The interface is to return a boolean in r_dst. */ tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2); - - free_compare(&cmp); } #endif @@ -2004,10 +1904,9 @@ static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) { - TCGv m1 = tcg_const_tl(0xff); + TCGv m1 = tcg_constant_tl(0xff); gen_address_mask(dc, addr); tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB); - tcg_temp_free(m1); } /* asi moves */ @@ -2260,8 +2159,8 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, break; default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(memop); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(memop); save_state(dc); #ifdef TARGET_SPARC64 @@ -2271,11 +2170,8 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, TCGv_i64 t64 = tcg_temp_new_i64(); gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop); tcg_gen_trunc_i64_tl(dst, t64); - tcg_temp_free_i64(t64); } #endif - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); } break; } @@ -2317,7 +2213,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, { TCGv saddr = tcg_temp_new(); TCGv daddr = tcg_temp_new(); - TCGv four = tcg_const_tl(4); + TCGv four = tcg_constant_tl(4); TCGv_i32 tmp = tcg_temp_new_i32(); int i; @@ -2331,18 +2227,13 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, tcg_gen_add_tl(saddr, saddr, four); tcg_gen_add_tl(daddr, daddr, four); } - - tcg_temp_free(saddr); - tcg_temp_free(daddr); - tcg_temp_free(four); - tcg_temp_free_i32(tmp); } break; #endif default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(memop & MO_SIZE); save_state(dc); #ifdef TARGET_SPARC64 @@ -2352,11 +2243,8 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(t64, src); gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop); - tcg_temp_free_i64(t64); } #endif - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); /* A write to a TLB register may alter page maps. End the TB. */ dc->npc = DYNAMIC_PC; @@ -2397,7 +2285,6 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), da.mem_idx, da.memop); gen_store_gpr(dc, rd, oldv); - tcg_temp_free(oldv); break; default: /* ??? Should be DAE_invalid_asi. */ @@ -2422,22 +2309,18 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) if (tb_cflags(dc->base.tb) & CF_PARALLEL) { gen_helper_exit_atomic(cpu_env); } else { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(MO_UB); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(MO_UB); TCGv_i64 s64, t64; save_state(dc); t64 = tcg_temp_new_i64(); gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop); - s64 = tcg_const_i64(0xff); + s64 = tcg_constant_i64(0xff); gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_mop); - tcg_temp_free_i64(s64); - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); tcg_gen_trunc_i64_tl(dst, t64); - tcg_temp_free_i64(t64); /* End the TB. */ dc->npc = DYNAMIC_PC; @@ -2478,7 +2361,6 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop | MO_ALIGN_4); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); - tcg_temp_free_i64(d64); break; default: g_assert_not_reached(); @@ -2496,7 +2378,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, /* The first operation checks required alignment. */ memop = da.memop | MO_ALIGN_64; - eight = tcg_const_tl(8); + eight = tcg_constant_tl(8); for (i = 0; ; ++i) { tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da.mem_idx, memop); @@ -2506,7 +2388,6 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, tcg_gen_add_tl(addr, addr, eight); memop = da.memop; } - tcg_temp_free(eight); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2524,8 +2405,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(da.memop); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(da.memop); save_state(dc); /* According to the table in the UA2011 manual, the only @@ -2538,7 +2419,6 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop); d32 = gen_dest_fpr_F(dc); tcg_gen_extrl_i64_i32(d32, d64); - tcg_temp_free_i64(d64); gen_store_fpr_F(dc, rd, d32); break; case 8: @@ -2550,13 +2430,10 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr, tcg_gen_addi_tl(addr, addr, 8); gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop); tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); - tcg_temp_free_i64(d64); break; default: g_assert_not_reached(); } - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); } break; } @@ -2610,7 +2487,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, /* The first operation checks required alignment. */ memop = da.memop | MO_ALIGN_64; - eight = tcg_const_tl(8); + eight = tcg_constant_tl(8); for (i = 0; ; ++i) { tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da.mem_idx, memop); @@ -2620,7 +2497,6 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr, tcg_gen_add_tl(addr, addr, eight); memop = da.memop; } - tcg_temp_free(eight); } else { gen_exception(dc, TT_ILL_INSN); } @@ -2677,7 +2553,6 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) } else { tcg_gen_extr32_i64(hi, lo, tmp); } - tcg_temp_free_i64(tmp); } break; @@ -2687,14 +2562,12 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) real hardware allows others. This can be seen with e.g. FreeBSD 10.3 wrt ASI_IC_TAG. */ { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(da.memop); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(da.memop); TCGv_i64 tmp = tcg_temp_new_i64(); save_state(dc); gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop); - tcg_temp_free_i32(r_asi); - tcg_temp_free_i32(r_mop); /* See above. */ if ((da.memop & MO_BSWAP) == MO_TE) { @@ -2702,7 +2575,6 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) } else { tcg_gen_extr32_i64(hi, lo, tmp); } - tcg_temp_free_i64(tmp); } break; } @@ -2742,7 +2614,6 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, } gen_address_mask(dc, addr); tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop); - tcg_temp_free_i64(t64); } break; @@ -2750,8 +2621,8 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, /* ??? In theory we've handled all of the ASIs that are valid for stda, and this should raise DAE_invalid_asi. */ { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(da.memop); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(da.memop); TCGv_i64 t64 = tcg_temp_new_i64(); /* See above. */ @@ -2763,9 +2634,6 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, save_state(dc); gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop); - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); - tcg_temp_free_i64(t64); } break; } @@ -2785,7 +2653,6 @@ static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), da.mem_idx, da.memop); gen_store_gpr(dc, rd, oldv); - tcg_temp_free(oldv); break; default: /* ??? Should be DAE_invalid_asi. */ @@ -2808,7 +2675,6 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) switch (da.type) { case GET_ASI_EXCP: - tcg_temp_free_i64(t64); return; case GET_ASI_DIRECT: gen_address_mask(dc, addr); @@ -2816,19 +2682,16 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) break; default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(MO_UQ); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); save_state(dc); gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop); - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); } break; } tcg_gen_extr_i64_i32(lo, hi, t64); - tcg_temp_free_i64(t64); gen_store_gpr(dc, rd | 1, lo); gen_store_gpr(dc, rd, hi); } @@ -2857,7 +2720,7 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, as a cacheline-style operation. */ { TCGv d_addr = tcg_temp_new(); - TCGv eight = tcg_const_tl(8); + TCGv eight = tcg_constant_tl(8); int i; tcg_gen_andi_tl(d_addr, addr, -8); @@ -2865,25 +2728,18 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop); tcg_gen_add_tl(d_addr, d_addr, eight); } - - tcg_temp_free(d_addr); - tcg_temp_free(eight); } break; default: { - TCGv_i32 r_asi = tcg_const_i32(da.asi); - TCGv_i32 r_mop = tcg_const_i32(MO_UQ); + TCGv_i32 r_asi = tcg_constant_i32(da.asi); + TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); save_state(dc); gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop); - tcg_temp_free_i32(r_mop); - tcg_temp_free_i32(r_asi); } break; } - - tcg_temp_free_i64(t64); } #endif @@ -2897,7 +2753,7 @@ static TCGv get_src2(DisasContext *dc, unsigned int insn) { if (IS_IMM) { /* immediate */ target_long simm = GET_FIELDs(insn, 19, 31); - TCGv t = get_temp_tl(dc); + TCGv t = tcg_temp_new(); tcg_gen_movi_tl(t, simm); return t; } else { /* register */ @@ -2921,18 +2777,15 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) TCGv_i64 c64 = tcg_temp_new_i64(); tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2); tcg_gen_extrl_i64_i32(c32, c64); - tcg_temp_free_i64(c64); } s1 = gen_load_fpr_F(dc, rs); s2 = gen_load_fpr_F(dc, rd); dst = gen_dest_fpr_F(dc); - zero = tcg_const_i32(0); + zero = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2); - tcg_temp_free_i32(c32); - tcg_temp_free_i32(zero); gen_store_fpr_F(dc, rd, dst); } @@ -2978,10 +2831,7 @@ static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env) TCGv_ptr r_tl_tmp = tcg_temp_new_ptr(); tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl); tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp); - tcg_temp_free_ptr(r_tl_tmp); } - - tcg_temp_free_i32(r_tl); } #endif @@ -3081,11 +2931,6 @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, tcg_gen_neg_tl(t1, t1); tcg_gen_or_tl(lo2, lo2, t1); tcg_gen_and_tl(dst, dst, lo2); - - tcg_temp_free(lo1); - tcg_temp_free(lo2); - tcg_temp_free(t1); - tcg_temp_free(t2); } static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left) @@ -3098,8 +2943,6 @@ static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left) tcg_gen_neg_tl(tmp, tmp); } tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); - - tcg_temp_free(tmp); } static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2) @@ -3121,10 +2964,6 @@ static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2) tcg_gen_shri_tl(t2, t2, 1); tcg_gen_or_tl(dst, t1, t2); - - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(shift); } #endif @@ -3253,7 +3092,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) case 2: /* FPU & Logical Operations */ { unsigned int xop = GET_FIELD(insn, 7, 12); - TCGv cpu_dst = get_temp_tl(dc); + TCGv cpu_dst = tcg_temp_new(); TCGv cpu_tmp0; if (xop == 0x3a) { /* generate trap */ @@ -3288,7 +3127,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) l1 = gen_new_label(); tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond), cmp.c1, cmp.c2, l1); - free_compare(&cmp); } mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) @@ -3325,7 +3163,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_raise_exception(cpu_env, trap); - tcg_temp_free_i32(trap); if (cond == 8) { /* An unconditional trap ends the TB. */ @@ -3376,7 +3213,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_const_i32(dc->mem_idx); + r_const = tcg_constant_i32(dc->mem_idx); tcg_gen_ld_ptr(r_tickptr, cpu_env, offsetof(CPUSPARCState, tick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { @@ -3384,8 +3221,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr, r_const); - tcg_temp_free_ptr(r_tickptr); - tcg_temp_free_i32(r_const); gen_store_gpr(dc, rd, cpu_dst); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { /* I/O operations in icount mode must end the TB */ @@ -3430,7 +3265,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_const_i32(dc->mem_idx); + r_const = tcg_constant_i32(dc->mem_idx); tcg_gen_ld_ptr(r_tickptr, cpu_env, offsetof(CPUSPARCState, stick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { @@ -3438,8 +3273,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr, r_const); - tcg_temp_free_ptr(r_tickptr); - tcg_temp_free_i32(r_const); gen_store_gpr(dc, rd, cpu_dst); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { /* I/O operations in icount mode must end the TB */ @@ -3513,7 +3346,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) if (!supervisor(dc)) { goto priv_insn; } - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); #ifdef TARGET_SPARC64 rs1 = GET_FIELD(insn, 13, 17); switch (rs1) { @@ -3525,7 +3358,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_ld_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tpc)); - tcg_temp_free_ptr(r_tsptr); } break; case 1: // tnpc @@ -3536,7 +3368,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_ld_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tnpc)); - tcg_temp_free_ptr(r_tsptr); } break; case 2: // tstate @@ -3547,7 +3378,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_ld_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tstate)); - tcg_temp_free_ptr(r_tsptr); } break; case 3: // tt @@ -3557,7 +3387,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tt)); - tcg_temp_free_ptr(r_tsptr); } break; case 4: // tick @@ -3566,7 +3395,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) TCGv_i32 r_const; r_tickptr = tcg_temp_new_ptr(); - r_const = tcg_const_i32(dc->mem_idx); + r_const = tcg_constant_i32(dc->mem_idx); tcg_gen_ld_ptr(r_tickptr, cpu_env, offsetof(CPUSPARCState, tick)); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { @@ -3574,8 +3403,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_get_count(cpu_tmp0, cpu_env, r_tickptr, r_const); - tcg_temp_free_ptr(r_tickptr); - tcg_temp_free_i32(r_const); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { /* I/O operations in icount mode must end the TB */ dc->base.is_jmp = DISAS_EXIT; @@ -3847,7 +3674,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) cpu_src1 = get_src1(dc, insn); \ gen_compare_reg(&cmp, cond, cpu_src1); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ - free_compare(&cmp); \ } while (0) if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */ @@ -3871,7 +3697,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) cond = GET_FIELD_SP(insn, 14, 17); \ gen_fcompare(&cmp, fcc, cond); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ - free_compare(&cmp); \ } while (0) case 0x001: /* V9 fmovscc %fcc0 */ @@ -3921,7 +3746,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) cond = GET_FIELD_SP(insn, 14, 17); \ gen_compare(&cmp, xcc, cond, dc); \ gen_fmov##sz(dc, &cmp, rd, rs2); \ - free_compare(&cmp); \ } while (0) case 0x101: /* V9 fmovscc %icc */ @@ -4031,7 +3855,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (insn & (1 << 12)) { tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); } else { @@ -4053,7 +3877,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (insn & (1 << 12)) { tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0); @@ -4077,7 +3901,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); cpu_src2 = gen_load_gpr(dc, rs2); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (insn & (1 << 12)) { tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0); @@ -4263,7 +4087,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) simm = GET_FIELDs(insn, 20, 31); tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0); } @@ -4274,7 +4098,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) simm = GET_FIELDs(insn, 20, 31); tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0); } @@ -4285,7 +4109,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) simm = GET_FIELDs(insn, 20, 31); tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f); } else { /* register */ - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0); } @@ -4294,7 +4118,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) #endif case 0x30: { - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); switch(rd) { case 0: /* wry */ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); @@ -4393,7 +4217,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_limit(r_tickptr, cpu_tick_cmpr); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4417,7 +4240,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_count(r_tickptr, cpu_tmp0); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4441,7 +4263,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_limit(r_tickptr, cpu_stick_cmpr); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4479,7 +4300,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) goto illegal_insn; } #else - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); gen_helper_wrpsr(cpu_env, cpu_tmp0); tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); @@ -4495,7 +4316,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) { if (!supervisor(dc)) goto priv_insn; - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); #ifdef TARGET_SPARC64 switch (rd) { @@ -4507,7 +4328,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_st_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tpc)); - tcg_temp_free_ptr(r_tsptr); } break; case 1: // tnpc @@ -4518,7 +4338,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_st_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tnpc)); - tcg_temp_free_ptr(r_tsptr); } break; case 2: // tstate @@ -4530,7 +4349,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_st_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tstate)); - tcg_temp_free_ptr(r_tsptr); } break; case 3: // tt @@ -4541,7 +4359,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_load_trap_state_at_tl(r_tsptr, cpu_env); tcg_gen_st32_tl(cpu_tmp0, r_tsptr, offsetof(trap_state, tt)); - tcg_temp_free_ptr(r_tsptr); } break; case 4: // tick @@ -4557,7 +4374,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_count(r_tickptr, cpu_tmp0); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4653,7 +4469,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) CHECK_IU_FEATURE(dc, HYPV); if (!hypervisor(dc)) goto priv_insn; - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); switch (rd) { case 0: // hpstate @@ -4688,7 +4504,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } gen_helper_tick_set_limit(r_tickptr, cpu_hstick_cmpr); - tcg_temp_free_ptr(r_tickptr); /* End TB to handle timer interrupt */ dc->base.is_jmp = DISAS_EXIT; } @@ -4733,7 +4548,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_movcond_tl(cmp.cond, dst, cmp.c1, cmp.c2, cpu_src2, dst); - free_compare(&cmp); gen_store_gpr(dc, rd, dst); break; } @@ -4765,7 +4579,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_movcond_tl(cmp.cond, dst, cmp.c1, cmp.c2, cpu_src2, dst); - free_compare(&cmp); gen_store_gpr(dc, rd, dst); break; } @@ -5227,7 +5040,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) } else if (xop == 0x39) { /* V9 return */ save_state(dc); cpu_src1 = get_src1(dc, insn); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); @@ -5249,7 +5062,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) #endif } else { cpu_src1 = get_src1(dc, insn); - cpu_tmp0 = get_temp_tl(dc); + cpu_tmp0 = tcg_temp_new(); if (IS_IMM) { /* immediate */ simm = GET_FIELDs(insn, 19, 31); tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); @@ -5344,7 +5157,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) unsigned int xop = GET_FIELD(insn, 7, 12); /* ??? gen_address_mask prevents us from using a source register directly. Always generate a temporary. */ - TCGv cpu_addr = get_temp_tl(dc); + TCGv cpu_addr = tcg_temp_new(); tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn)); if (xop == 0x3c || xop == 0x3e) { @@ -5392,7 +5205,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) gen_store_gpr(dc, rd + 1, cpu_val); tcg_gen_shri_i64(t64, t64, 32); tcg_gen_trunc_i64_tl(cpu_val, t64); - tcg_temp_free_i64(t64); tcg_gen_ext32u_tl(cpu_val, cpu_val); } break; @@ -5522,11 +5334,10 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_qemu_ld_i64(t64, cpu_addr, dc->mem_idx, MO_TEUQ); gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64); - tcg_temp_free_i64(t64); break; } #endif - cpu_dst_32 = get_temp_i32(dc); + cpu_dst_32 = tcg_temp_new_i32(); tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, dc->mem_idx, MO_TEUL); gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32); @@ -5542,8 +5353,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4); gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); - tcg_temp_free_i64(cpu_src1_64); - tcg_temp_free_i64(cpu_src2_64); break; case 0x23: /* lddf, load double fpreg */ gen_address_mask(dc, cpu_addr); @@ -5584,7 +5393,6 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) t64 = tcg_temp_new_i64(); tcg_gen_concat_tl_i64(t64, lo, cpu_val); tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx); - tcg_temp_free_i64(t64); } break; #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) @@ -5747,46 +5555,31 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn) dc->npc = dc->npc + 4; } jmp_insn: - goto egress; + return; illegal_insn: gen_exception(dc, TT_ILL_INSN); - goto egress; + return; unimp_flush: gen_exception(dc, TT_UNIMP_FLUSH); - goto egress; + return; #if !defined(CONFIG_USER_ONLY) priv_insn: gen_exception(dc, TT_PRIV_INSN); - goto egress; + return; #endif nfpu_insn: gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); - goto egress; + return; #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) nfq_insn: gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); - goto egress; + return; #endif #ifndef TARGET_SPARC64 ncp_insn: gen_exception(dc, TT_NCP_INSN); - goto egress; + return; #endif - egress: - if (dc->n_t32 != 0) { - int i; - for (i = dc->n_t32 - 1; i >= 0; --i) { - tcg_temp_free_i32(dc->t32[i]); - } - dc->n_t32 = 0; - } - if (dc->n_ttl != 0) { - int i; - for (i = dc->n_ttl - 1; i >= 0; --i) { - tcg_temp_free(dc->ttl[i]); - } - dc->n_ttl = 0; - } } static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) diff --git a/target/tricore/translate.c b/target/tricore/translate.c index 176ea96b2b..127f9a989a 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -126,7 +126,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) #define gen_helper_1arg(name, arg) do { \ TCGv_i32 helper_tmp = tcg_const_i32(arg); \ gen_helper_##name(cpu_env, helper_tmp); \ - tcg_temp_free_i32(helper_tmp); \ } while (0) #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \ @@ -137,9 +136,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_ext16s_tl(arg01, arg0); \ tcg_gen_ext16s_tl(arg11, arg1); \ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \ - tcg_temp_free(arg00); \ - tcg_temp_free(arg01); \ - tcg_temp_free(arg11); \ } while (0) #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \ @@ -152,10 +148,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_sari_tl(arg11, arg1, 16); \ tcg_gen_ext16s_tl(arg10, arg1); \ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \ - tcg_temp_free(arg00); \ - tcg_temp_free(arg01); \ - tcg_temp_free(arg10); \ - tcg_temp_free(arg11); \ } while (0) #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \ @@ -168,10 +160,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_sari_tl(arg10, arg1, 16); \ tcg_gen_ext16s_tl(arg11, arg1); \ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \ - tcg_temp_free(arg00); \ - tcg_temp_free(arg01); \ - tcg_temp_free(arg10); \ - tcg_temp_free(arg11); \ } while (0) #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \ @@ -182,9 +170,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_ext16s_tl(arg00, arg0); \ tcg_gen_sari_tl(arg11, arg1, 16); \ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \ - tcg_temp_free(arg00); \ - tcg_temp_free(arg01); \ - tcg_temp_free(arg11); \ } while (0) #define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \ @@ -194,9 +179,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) tcg_gen_concat_i32_i64(arg1, al1, ah1); \ gen_helper_##name(ret, arg1, arg2); \ tcg_gen_extr_i64_i32(rl, rh, ret); \ - \ - tcg_temp_free_i64(ret); \ - tcg_temp_free_i64(arg1); \ } while (0) #define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \ @@ -204,8 +186,6 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) \ gen_helper_##name(ret, cpu_env, arg1, arg2); \ tcg_gen_extr_i64_i32(rl, rh, ret); \ - \ - tcg_temp_free_i64(ret); \ } while (0) #define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF)) @@ -229,7 +209,6 @@ static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, r2, con); tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); - tcg_temp_free(temp); } static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, @@ -238,7 +217,6 @@ static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, r2, con); tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); - tcg_temp_free(temp); } static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) @@ -247,8 +225,6 @@ static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) tcg_gen_concat_i32_i64(temp, rl, rh); tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEUQ); - - tcg_temp_free_i64(temp); } static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, @@ -257,7 +233,6 @@ static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, base, con); gen_st_2regs_64(rh, rl, temp, ctx); - tcg_temp_free(temp); } static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) @@ -267,8 +242,6 @@ static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEUQ); /* write back to two 32 bit regs */ tcg_gen_extr_i64_i32(rl, rh, temp); - - tcg_temp_free_i64(temp); } static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, @@ -277,7 +250,6 @@ static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, TCGv temp = tcg_temp_new(); tcg_gen_addi_tl(temp, base, con); gen_ld_2regs_64(rh, rl, temp, ctx); - tcg_temp_free(temp); } static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, @@ -287,7 +259,6 @@ static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, tcg_gen_addi_tl(temp, r2, off); tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); tcg_gen_mov_tl(r2, temp); - tcg_temp_free(temp); } static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, @@ -297,7 +268,6 @@ static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, tcg_gen_addi_tl(temp, r2, off); tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); tcg_gen_mov_tl(r2, temp); - tcg_temp_free(temp); } /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */ @@ -317,9 +287,6 @@ static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea) tcg_gen_or_tl(temp, temp, temp2); /* M(EA, word) = temp; */ tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* tmp = M(EA, word); @@ -332,8 +299,6 @@ static void gen_swap(DisasContext *ctx, int reg, TCGv ea) tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(cpu_gpr_d[reg], temp); - - tcg_temp_free(temp); } static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea) @@ -345,9 +310,6 @@ static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea) cpu_gpr_d[reg], temp); tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(cpu_gpr_d[reg], temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) @@ -362,10 +324,6 @@ static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) tcg_gen_or_tl(temp2, temp2, temp3); tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL); tcg_gen_mov_tl(cpu_gpr_d[reg], temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); } @@ -447,9 +405,6 @@ static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(result); - tcg_temp_free(t0); } static inline void @@ -476,11 +431,6 @@ gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_i64(ret, result); - - tcg_temp_free(temp); - tcg_temp_free_i64(result); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static inline void @@ -527,11 +477,6 @@ gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp); /* calc SAV bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free(temp4); } /* ret = r2 + (r1 * r3); */ @@ -564,17 +509,12 @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGv temp = tcg_const_i32(con); gen_madd32_d(ret, r1, r2, temp); - tcg_temp_free(temp); } static inline void @@ -603,11 +543,6 @@ gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, /* write back the result */ tcg_gen_mov_tl(ret_low, t3); tcg_gen_mov_tl(ret_high, t4); - - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(t3); - tcg_temp_free(t4); } static inline void @@ -638,10 +573,6 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void @@ -650,7 +581,6 @@ gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, { TCGv temp = tcg_const_i32(con); gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void @@ -659,7 +589,6 @@ gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, { TCGv temp = tcg_const_i32(con); gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void @@ -686,9 +615,6 @@ gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_add_tl, tcg_gen_add_tl); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void @@ -715,9 +641,6 @@ gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_sub_tl, tcg_gen_add_tl); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void @@ -751,11 +674,6 @@ gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_add64_d(temp64_2, temp64_3, temp64); /* write back result */ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); - tcg_temp_free_i64(temp64_3); } static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2); @@ -792,12 +710,6 @@ gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(temp64); - } static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2); @@ -834,12 +746,6 @@ gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(temp64); - } static inline void @@ -872,10 +778,6 @@ gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); } @@ -905,11 +807,6 @@ gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_add64_d(temp64_3, temp64_2, temp64); /* write back result */ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); - tcg_temp_free_i64(temp64_3); } static inline void @@ -936,10 +833,6 @@ gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); } static inline void @@ -963,9 +856,6 @@ gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, break; } gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); } static inline void @@ -977,9 +867,6 @@ gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1005,10 +892,6 @@ gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_helper_addsur_h(ret, cpu_env, temp64, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } @@ -1033,9 +916,6 @@ gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, break; } gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); } static inline void @@ -1047,9 +927,6 @@ gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1075,10 +952,6 @@ gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_helper_addsur_h_ssov(ret, cpu_env, temp64, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void @@ -1086,7 +959,6 @@ gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { TCGv temp = tcg_const_i32(n); gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp); - tcg_temp_free(temp); } static inline void @@ -1094,7 +966,6 @@ gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { TCGv temp = tcg_const_i32(n); gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp); - tcg_temp_free(temp); } static inline void @@ -1145,13 +1016,6 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void @@ -1169,9 +1033,6 @@ gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) tcg_gen_sub_tl(temp, temp, temp2); } gen_add_d(ret, arg1, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1189,9 +1050,6 @@ gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) tcg_gen_sub_tl(temp, temp, temp2); } gen_adds(ret, arg1, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1219,12 +1077,6 @@ gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, gen_add64_d(t3, t1, t2); /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t3); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1251,11 +1103,6 @@ gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, gen_helper_add64_ssov(t1, cpu_env, t1, t2); tcg_gen_extr_i64_i32(rl, rh, t1); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static inline void @@ -1294,9 +1141,6 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_shli_tl(temp, temp, 31); /* negate v bit, if special condition */ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t4); @@ -1307,11 +1151,6 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free_i64(t4); } static inline void @@ -1330,10 +1169,6 @@ gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_sari_i64(t2, t2, up_shift - n); gen_helper_madd32_q_add_ssov(ret, cpu_env, t1, t2); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void @@ -1346,10 +1181,8 @@ gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp); tcg_gen_extr_i64_i32(rl, rh, r1); - - tcg_temp_free_i64(r1); - tcg_temp_free(temp); } + /* ret = r2 - (r1 * r3); */ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) { @@ -1381,17 +1214,12 @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGv temp = tcg_const_i32(con); gen_msub32_d(ret, r1, r2, temp); - tcg_temp_free(temp); } static inline void @@ -1420,11 +1248,6 @@ gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, /* write back the result */ tcg_gen_mov_tl(ret_low, t3); tcg_gen_mov_tl(ret_high, t4); - - tcg_temp_free(t1); - tcg_temp_free(t2); - tcg_temp_free(t3); - tcg_temp_free(t4); } static inline void @@ -1433,7 +1256,6 @@ gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, { TCGv temp = tcg_const_i32(con); gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void @@ -1462,10 +1284,6 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); } static inline void @@ -1474,15 +1292,14 @@ gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, { TCGv temp = tcg_const_i32(con); gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2) { TCGv temp = tcg_const_i32(r2); gen_add_d(ret, r1, temp); - tcg_temp_free(temp); } + /* calculate the carry bit too */ static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2) { @@ -1505,16 +1322,12 @@ static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(result); - tcg_temp_free(t0); } static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_add_CC(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2) @@ -1541,17 +1354,12 @@ static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(result); - tcg_temp_free(t0); - tcg_temp_free(carry); } static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_addc_CC(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, @@ -1585,12 +1393,6 @@ static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); /* write back result */ tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1); - - tcg_temp_free(t0); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(result); - tcg_temp_free(mask); } static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, @@ -1598,7 +1400,6 @@ static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, { TCGv temp = tcg_const_i32(r2); gen_cond_add(cond, r1, temp, r3, r4); - tcg_temp_free(temp); } static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2) @@ -1620,9 +1421,6 @@ static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(temp); - tcg_temp_free(result); } static inline void @@ -1649,11 +1447,6 @@ gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_i64(ret, result); - - tcg_temp_free(temp); - tcg_temp_free_i64(result); - tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2) @@ -1677,9 +1470,6 @@ static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(result); - tcg_temp_free(temp); } static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2) @@ -1687,7 +1477,6 @@ static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2) TCGv temp = tcg_temp_new(); tcg_gen_not_tl(temp, r2); gen_addc_CC(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, @@ -1721,12 +1510,6 @@ static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); /* write back result */ tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1); - - tcg_temp_free(t0); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(result); - tcg_temp_free(mask); } static inline void @@ -1753,9 +1536,6 @@ gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_sub_tl, tcg_gen_sub_tl); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void @@ -1790,11 +1570,6 @@ gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(temp64); } static inline void @@ -1823,11 +1598,6 @@ gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_sub64_d(temp64_3, temp64_2, temp64); /* write back result */ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); - tcg_temp_free_i64(temp64_3); } static inline void @@ -1854,10 +1624,6 @@ gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high); gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); } static inline void @@ -1881,9 +1647,6 @@ gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, break; } gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); } static inline void @@ -1895,9 +1658,6 @@ gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1921,9 +1681,6 @@ gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, break; } gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); } static inline void @@ -1935,9 +1692,6 @@ gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -1945,7 +1699,6 @@ gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { TCGv temp = tcg_const_i32(n); gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp); - tcg_temp_free(temp); } static inline void @@ -1953,15 +1706,12 @@ gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) { TCGv temp = tcg_const_i32(n); gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp); - tcg_temp_free(temp); } static inline void gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, uint32_t up_shift) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); TCGv temp3 = tcg_temp_new(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -1997,14 +1747,6 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free_i64(t4); } static inline void @@ -2022,9 +1764,6 @@ gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) tcg_gen_sub_tl(temp, temp, temp2); } gen_sub_d(ret, arg1, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -2042,9 +1781,6 @@ gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) tcg_gen_sub_tl(temp, temp, temp2); } gen_subs(ret, arg1, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -2072,12 +1808,6 @@ gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, gen_sub64_d(t3, t1, t2); /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t3); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -2104,11 +1834,6 @@ gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, gen_helper_sub64_ssov(t1, cpu_env, t1, t2); tcg_gen_extr_i64_i32(rl, rh, t1); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); } static inline void @@ -2147,9 +1872,6 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_shli_tl(temp, temp, 31); /* negate v bit, if special condition */ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t4); @@ -2160,11 +1882,6 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); /* calc SAV */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free_i64(t4); } static inline void @@ -2188,11 +1905,6 @@ gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_add_i64(t3, t3, t4); gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3); - - tcg_temp_free_i64(t1); - tcg_temp_free_i64(t2); - tcg_temp_free_i64(t3); - tcg_temp_free_i64(t4); } static inline void @@ -2205,9 +1917,6 @@ gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp); tcg_gen_extr_i64_i32(rl, rh, r1); - - tcg_temp_free_i64(r1); - tcg_temp_free(temp); } static inline void @@ -2234,9 +1943,6 @@ gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2, tcg_gen_add_tl, tcg_gen_sub_tl); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void @@ -2270,11 +1976,6 @@ gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_sub64_d(temp64_2, temp64_3, temp64); /* write back result */ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); - tcg_temp_free_i64(temp64_3); } static inline void @@ -2300,10 +2001,6 @@ gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void @@ -2338,11 +2035,6 @@ gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); - tcg_temp_free_i64(temp64); } static inline void @@ -2375,10 +2067,6 @@ gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - - tcg_temp_free(temp); - tcg_temp_free_i64(temp64); - tcg_temp_free_i64(temp64_2); } static inline void @@ -2404,10 +2092,6 @@ gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) tcg_gen_andi_tl(temp2, r1, 0xffff0000); tcg_gen_shli_tl(temp, r1, 16); gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free_i64(temp64); } static inline void gen_abs(TCGv ret, TCGv r1) @@ -2449,23 +2133,18 @@ static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_tl(ret, result); - - tcg_temp_free(temp); - tcg_temp_free(result); } static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_absdif(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_absdif_ssov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) @@ -2486,16 +2165,12 @@ static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free(high); - tcg_temp_free(low); } static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_mul_i32s(ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) @@ -2517,7 +2192,6 @@ static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, { TCGv temp = tcg_const_i32(con); gen_mul_i64s(ret_low, ret_high, r1, temp); - tcg_temp_free(temp); } static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) @@ -2539,41 +2213,35 @@ static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, { TCGv temp = tcg_const_i32(con); gen_mul_i64u(ret_low, ret_high, r1, temp); - tcg_temp_free(temp); } static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_mul_ssov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_mul_suov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */ static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp); - tcg_temp_free(temp); } static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp); - tcg_temp_free(temp); } static void gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) { - TCGv temp = tcg_temp_new(); TCGv_i64 temp_64 = tcg_temp_new_i64(); TCGv_i64 temp2_64 = tcg_temp_new_i64(); @@ -2626,9 +2294,6 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) } /* calc sav overflow bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - tcg_temp_free(temp); - tcg_temp_free_i64(temp_64); - tcg_temp_free_i64(temp2_64); } static void @@ -2651,8 +2316,6 @@ gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc sav overflow bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free(temp); } static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) @@ -2679,8 +2342,6 @@ static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* cut halfword off */ tcg_gen_andi_tl(ret, ret, 0xffff0000); - - tcg_temp_free(temp); } static inline void @@ -2691,7 +2352,6 @@ gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - tcg_temp_free_i64(temp64); } static inline void @@ -2700,7 +2360,6 @@ gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, { TCGv temp = tcg_const_i32(con); gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void @@ -2711,7 +2370,6 @@ gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - tcg_temp_free_i64(temp64); } static inline void @@ -2720,21 +2378,18 @@ gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, { TCGv temp = tcg_const_i32(con); gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp); - tcg_temp_free(temp); } static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp); - tcg_temp_free(temp); } static inline void @@ -2745,7 +2400,6 @@ gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - tcg_temp_free_i64(temp64); } static inline void @@ -2754,7 +2408,6 @@ gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, { TCGv temp = tcg_const_i32(con); gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static inline void @@ -2765,7 +2418,6 @@ gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, r3); tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); - tcg_temp_free_i64(temp64); } static inline void @@ -2774,7 +2426,6 @@ gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, { TCGv temp = tcg_const_i32(con); gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); - tcg_temp_free(temp); } static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low) @@ -2787,9 +2438,6 @@ static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low) /* ret = (sat_neg > up ) ? up : sat_neg; */ tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg); - - tcg_temp_free(sat_neg); - tcg_temp_free(temp); } static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up) @@ -2797,7 +2445,6 @@ static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up) TCGv temp = tcg_const_i32(up); /* sat_neg = (arg > up ) ? up : arg; */ tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg); - tcg_temp_free(temp); } static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count) @@ -2826,9 +2473,6 @@ static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount) gen_shi(temp_low, temp_low, shiftcount); gen_shi(ret, temp_high, shiftcount); tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16); - - tcg_temp_free(temp_low); - tcg_temp_free(temp_high); } } @@ -2837,7 +2481,6 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) uint32_t msk, msk_start; TCGv temp = tcg_temp_new(); TCGv temp2 = tcg_temp_new(); - TCGv t_0 = tcg_const_i32(0); if (shift_count == 0) { /* Clear PSW.C and PSW.V */ @@ -2868,9 +2511,6 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV); /* do shift */ tcg_gen_shli_tl(ret, r1, shift_count); - - tcg_temp_free(t_max); - tcg_temp_free(t_min); } else { /* clear PSW.V */ tcg_gen_movi_tl(cpu_PSW_V, 0); @@ -2885,10 +2525,6 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc sav overflow bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(t_0); } static void gen_shas(TCGv ret, TCGv r1, TCGv r2) @@ -2900,7 +2536,6 @@ static void gen_shasi(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_shas(ret, r1, temp); - tcg_temp_free(temp); } static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count) @@ -2917,9 +2552,6 @@ static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count) tcg_gen_shli_tl(low, r1, shift_count); tcg_gen_shli_tl(ret, high, shift_count); tcg_gen_deposit_tl(ret, ret, low, 0, 16); - - tcg_temp_free(low); - tcg_temp_free(high); } else { low = tcg_temp_new(); high = tcg_temp_new(); @@ -2928,11 +2560,7 @@ static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count) tcg_gen_sari_tl(low, low, -shift_count); tcg_gen_sari_tl(ret, r1, -shift_count); tcg_gen_deposit_tl(ret, ret, low, 0, 16); - - tcg_temp_free(low); - tcg_temp_free(high); } - } /* ret = {ret[30:0], (r1 cond r2)}; */ @@ -2944,16 +2572,12 @@ static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2) tcg_gen_shli_tl(temp, ret, 1); tcg_gen_setcond_tl(cond, temp2, r1, r2); tcg_gen_or_tl(ret, temp, temp2); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_sh_cond(cond, ret, r1, temp); - tcg_temp_free(temp); } static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2) @@ -2965,14 +2589,12 @@ static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_add_ssov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con) { TCGv temp = tcg_const_i32(con); gen_helper_add_suov(ret, cpu_env, r1, temp); - tcg_temp_free(temp); } static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2) @@ -3002,9 +2624,6 @@ static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2, (*op2)(temp1 , ret, temp1); tcg_gen_deposit_tl(ret, ret, temp1, 0, 1); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } /* ret = r1[pos1] op1 r2[pos2]; */ @@ -3023,9 +2642,6 @@ static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2, (*op1)(ret, temp1, temp2); tcg_gen_andi_tl(ret, ret, 0x1); - - tcg_temp_free(temp1); - tcg_temp_free(temp2); } static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2, @@ -3041,9 +2657,6 @@ static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2, (*op)(temp, temp, temp2); /* ret = {ret[31:1], temp} */ tcg_gen_deposit_tl(ret, ret, temp, 0, 1); - - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void @@ -3052,7 +2665,6 @@ gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con, { TCGv temp = tcg_const_i32(con); gen_accumulating_cond(cond, ret, r1, temp, op); - tcg_temp_free(temp); } /* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/ @@ -3089,11 +2701,6 @@ static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con) tcg_gen_or_tl(ret, b0, b1); tcg_gen_or_tl(ret, ret, b2); tcg_gen_or_tl(ret, ret, b3); - - tcg_temp_free(b0); - tcg_temp_free(b1); - tcg_temp_free(b2); - tcg_temp_free(b3); } static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con) @@ -3111,10 +2718,8 @@ static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con) /* combine them */ tcg_gen_or_tl(ret, h0, h1); - - tcg_temp_free(h0); - tcg_temp_free(h1); } + /* mask = ((1 << width) -1) << pos; ret = (r1 & ~mask) | (r2 << pos) & mask); */ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) @@ -3132,10 +2737,6 @@ static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) tcg_gen_and_tl(temp, temp, mask); tcg_gen_andc_tl(temp2, r1, mask); tcg_gen_or_tl(ret, temp, temp2); - - tcg_temp_free(mask); - tcg_temp_free(temp); - tcg_temp_free(temp2); } static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1) @@ -3144,8 +2745,6 @@ static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1) gen_helper_bsplit(temp, r1); tcg_gen_extr_i64_i32(rl, rh, temp); - - tcg_temp_free_i64(temp); } static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1) @@ -3154,8 +2753,6 @@ static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1) gen_helper_unpack(temp, r1); tcg_gen_extr_i64_i32(rl, rh, temp); - - tcg_temp_free_i64(temp); } static inline void @@ -3169,8 +2766,6 @@ gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) gen_helper_dvinit_b_131(ret, cpu_env, r1, r2); } tcg_gen_extr_i64_i32(rl, rh, ret); - - tcg_temp_free_i64(ret); } static inline void @@ -3184,8 +2779,6 @@ gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) gen_helper_dvinit_h_131(ret, cpu_env, r1, r2); } tcg_gen_extr_i64_i32(rl, rh, ret); - - tcg_temp_free_i64(ret); } static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high) @@ -3200,7 +2793,6 @@ static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high) /* calc SAV bit */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); tcg_gen_movi_tl(cpu_PSW_V, 0); - tcg_temp_free(temp); } static void gen_calc_usb_mulr_h(TCGv arg) @@ -3215,7 +2807,6 @@ static void gen_calc_usb_mulr_h(TCGv arg) tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* clear V bit */ tcg_gen_movi_tl(cpu_PSW_V, 0); - tcg_temp_free(temp); } /* helpers for generating program flow micro-ops */ @@ -3245,9 +2836,6 @@ static void generate_trap(DisasContext *ctx, int class, int tin) gen_save_pc(ctx->base.pc_next); gen_helper_raise_exception_sync(cpu_env, classtemp, tintemp); ctx->base.is_jmp = DISAS_NORETURN; - - tcg_temp_free(classtemp); - tcg_temp_free(tintemp); } static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, @@ -3267,7 +2855,6 @@ static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1, { TCGv temp = tcg_const_i32(r2); gen_branch_cond(ctx, cond, r1, temp, address); - tcg_temp_free(temp); } static void gen_loop(DisasContext *ctx, int r1, int32_t offset) @@ -3289,8 +2876,6 @@ static void gen_fcall_save_ctx(DisasContext *ctx) tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL); tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn); tcg_gen_mov_tl(cpu_gpr_a[10], temp); - - tcg_temp_free(temp); } static void gen_fret(DisasContext *ctx) @@ -3303,8 +2888,6 @@ static void gen_fret(DisasContext *ctx) tcg_gen_mov_tl(cpu_PC, temp); tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; - - tcg_temp_free(temp); } static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, @@ -3350,13 +2933,11 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, temp = tcg_temp_new(); tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); - tcg_temp_free(temp); break; case OPC1_16_SBRN_JNZ_T: temp = tcg_temp_new(); tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); - tcg_temp_free(temp); break; /* SBR-format jumps */ case OPC1_16_SBR_JEQ: @@ -3474,7 +3055,6 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); } - tcg_temp_free(temp); break; /* BRN format */ case OPCM_32_BRN_JTT: @@ -3488,7 +3068,6 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, } else { gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); } - tcg_temp_free(temp); break; /* BRR Format */ case OPCM_32_BRR_EQ_NEQ: @@ -3553,8 +3132,6 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); } - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPCM_32_BRR_JNZ: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) { @@ -3609,16 +3186,12 @@ static void decode_src_opc(DisasContext *ctx, int op1) temp2 = tcg_const_tl(const4); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, temp2, cpu_gpr_d[r1]); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPC1_16_SRC_CMOVN: temp = tcg_const_tl(0); temp2 = tcg_const_tl(const4); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, temp2, cpu_gpr_d[r1]); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPC1_16_SRC_EQ: tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], @@ -3685,13 +3258,11 @@ static void decode_srr_opc(DisasContext *ctx, int op1) temp = tcg_const_tl(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, cpu_gpr_d[r2], cpu_gpr_d[r1]); - tcg_temp_free(temp); break; case OPC1_16_SRR_CMOVN: temp = tcg_const_tl(0); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, cpu_gpr_d[r2], cpu_gpr_d[r1]); - tcg_temp_free(temp); break; case OPC1_16_SRR_EQ: tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], @@ -3952,7 +3523,6 @@ static void decode_sr_accu(DisasContext *ctx) tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV); /* calc sav */ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - tcg_temp_free(temp); break; case OPC2_16_SR_SAT_B: gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80); @@ -4047,7 +3617,6 @@ static void decode_16Bit_opc(DisasContext *ctx) temp = tcg_temp_new(); tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16); tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; /* SLRO-format */ case OPC1_16_SLRO_LD_A: @@ -4239,8 +3808,6 @@ static void decode_abs_ldw(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - - tcg_temp_free(temp); } static void decode_abs_ldb(DisasContext *ctx) @@ -4272,8 +3839,6 @@ static void decode_abs_ldb(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - - tcg_temp_free(temp); } static void decode_abs_ldst_swap(DisasContext *ctx) @@ -4299,8 +3864,6 @@ static void decode_abs_ldst_swap(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - - tcg_temp_free(temp); } static void decode_abs_ldst_context(DisasContext *ctx) @@ -4360,7 +3923,6 @@ static void decode_abs_store(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } static void decode_abs_storeb_h(DisasContext *ctx) @@ -4386,7 +3948,6 @@ static void decode_abs_storeb_h(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } /* Bit-format */ @@ -4486,7 +4047,6 @@ static void decode_bit_insert(DisasContext *ctx) tcg_gen_not_tl(temp, temp); } tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1); - tcg_temp_free(temp); } static void decode_bit_logical_t2(DisasContext *ctx) @@ -4604,7 +4164,6 @@ static void decode_bit_sh_logic1(DisasContext *ctx) } tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); - tcg_temp_free(temp); } static void decode_bit_sh_logic2(DisasContext *ctx) @@ -4645,7 +4204,6 @@ static void decode_bit_sh_logic2(DisasContext *ctx) } tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); - tcg_temp_free(temp); } /* BO-format */ @@ -4743,7 +4301,6 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_DA_SHORTOFF: CHECK_REG_PAIR(r1); @@ -4761,7 +4318,6 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_H_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); @@ -4778,7 +4334,6 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) temp = tcg_temp_new(); tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_Q_POSTINC: temp = tcg_temp_new(); @@ -4786,13 +4341,11 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_Q_PREINC: temp = tcg_temp_new(); tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); - tcg_temp_free(temp); break; case OPC2_32_BO_ST_W_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); @@ -4915,9 +4468,6 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); } static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) @@ -4982,7 +4532,6 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_BO_LD_DA_SHORTOFF: CHECK_REG_PAIR(r1); @@ -5000,7 +4549,6 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); tcg_gen_mov_tl(cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_BO_LD_H_SHORTOFF: gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW); @@ -5167,9 +4715,6 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); } static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) @@ -5178,7 +4723,7 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) uint32_t off10; int r1, r2; - TCGv temp, temp2; + TCGv temp; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); @@ -5187,7 +4732,6 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) temp = tcg_temp_new(); - temp2 = tcg_temp_new(); switch (op2) { case OPC2_32_BO_LDLCX_SHORTOFF: @@ -5260,8 +4804,6 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) @@ -5320,10 +4862,6 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); } static void decode_bol_opc(DisasContext *ctx, int32_t op1) @@ -5341,13 +4879,11 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1) temp = tcg_temp_new(); tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL); - tcg_temp_free(temp); break; case OPC1_32_BOL_LD_W_LONGOFF: temp = tcg_temp_new(); tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL); - tcg_temp_free(temp); break; case OPC1_32_BOL_LEA_LONGOFF: tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address); @@ -5474,7 +5010,6 @@ static void decode_rc_logical_shift(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } static void decode_rc_accumulator(DisasContext *ctx) @@ -5674,7 +5209,6 @@ static void decode_rc_accumulator(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } static void decode_rc_serviceroutine(DisasContext *ctx) @@ -5764,7 +5298,6 @@ static void decode_rcpw_insert(DisasContext *ctx) if (pos + width <= 32) { temp = tcg_const_i32(const4); tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width); - tcg_temp_free(temp); } break; default: @@ -5807,14 +5340,10 @@ static void decode_rcrw_insert(DisasContext *ctx) tcg_gen_movi_tl(temp2, const4); tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], temp2, temp, temp3); - - tcg_temp_free(temp3); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* RCR format */ @@ -5847,16 +5376,12 @@ static void decode_rcr_cond_select(DisasContext *ctx) temp2 = tcg_const_i32(const9); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp2); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPC2_32_RCR_SELN: temp = tcg_const_i32(0); temp2 = tcg_const_i32(const9); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp2); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -6236,8 +5761,6 @@ static void decode_rr_accumulator(DisasContext *ctx) tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]); tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp); - - tcg_temp_free(temp); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } @@ -6377,13 +5900,10 @@ static void decode_rr_logical_shift(DisasContext *ctx) { uint32_t op2; int r3, r2, r1; - TCGv temp; r3 = MASK_OP_RR_D(ctx->opcode); r2 = MASK_OP_RR_S2(ctx->opcode); r1 = MASK_OP_RR_S1(ctx->opcode); - - temp = tcg_temp_new(); op2 = MASK_OP_RR_OP2(ctx->opcode); switch (op2) { @@ -6448,7 +5968,6 @@ static void decode_rr_logical_shift(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } static void decode_rr_address(DisasContext *ctx) @@ -6471,14 +5990,12 @@ static void decode_rr_address(DisasContext *ctx) temp = tcg_temp_new(); tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n); tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp); - tcg_temp_free(temp); break; case OPC2_32_RR_ADDSC_AT: temp = tcg_temp_new(); tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3); tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp); tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC); - tcg_temp_free(temp); break; case OPC2_32_RR_EQ_A: tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], @@ -6598,10 +6115,6 @@ static void decode_rr_divide(DisasContext *ctx) /* write result */ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24); tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); break; case OPC2_32_RR_DVINIT_H: CHECK_REG_PAIR(r3); @@ -6631,9 +6144,6 @@ static void decode_rr_divide(DisasContext *ctx) /* write result */ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16); tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3); - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); break; case OPC2_32_RR_DVINIT: temp = tcg_temp_new(); @@ -6655,8 +6165,6 @@ static void decode_rr_divide(DisasContext *ctx) tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); /* sign extend to high reg */ tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31); - tcg_temp_free(temp); - tcg_temp_free(temp2); break; case OPC2_32_RR_DVINIT_U: /* overflow = (D[b] == 0) */ @@ -6758,7 +6266,6 @@ static void decode_rr1_mul(DisasContext *ctx) GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MUL_H_32_LU: temp64 = tcg_temp_new_i64(); @@ -6766,7 +6273,6 @@ static void decode_rr1_mul(DisasContext *ctx) GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MUL_H_32_UL: temp64 = tcg_temp_new_i64(); @@ -6774,7 +6280,6 @@ static void decode_rr1_mul(DisasContext *ctx) GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MUL_H_32_UU: temp64 = tcg_temp_new_i64(); @@ -6782,7 +6287,6 @@ static void decode_rr1_mul(DisasContext *ctx) GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MULM_H_64_LL: temp64 = tcg_temp_new_i64(); @@ -6793,7 +6297,6 @@ static void decode_rr1_mul(DisasContext *ctx) tcg_gen_movi_tl(cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MULM_H_64_LU: temp64 = tcg_temp_new_i64(); @@ -6804,7 +6307,6 @@ static void decode_rr1_mul(DisasContext *ctx) tcg_gen_movi_tl(cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MULM_H_64_UL: temp64 = tcg_temp_new_i64(); @@ -6815,7 +6317,6 @@ static void decode_rr1_mul(DisasContext *ctx) tcg_gen_movi_tl(cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_temp_free_i64(temp64); break; case OPC2_32_RR1_MULM_H_64_UU: temp64 = tcg_temp_new_i64(); @@ -6826,8 +6327,6 @@ static void decode_rr1_mul(DisasContext *ctx) tcg_gen_movi_tl(cpu_PSW_V, 0); /* reset AV bit */ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_temp_free_i64(temp64); - break; case OPC2_32_RR1_MULR_H_16_LL: GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n); @@ -6848,7 +6347,6 @@ static void decode_rr1_mul(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(n); } static void decode_rr1_mulq(DisasContext *ctx) @@ -6918,8 +6416,6 @@ static void decode_rr1_mulq(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } /* RR2 format */ @@ -7009,7 +6505,6 @@ static void decode_rrpw_extract_insert(DisasContext *ctx) tcg_gen_movi_tl(temp, ((1u << width) - 1) << pos); tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos); tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp); - tcg_temp_free(temp); } break; @@ -7058,13 +6553,11 @@ static void decode_rrr_cond_select(DisasContext *ctx) temp = tcg_const_i32(0); tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2]); - tcg_temp_free(temp); break; case OPC2_32_RRR_SELN: temp = tcg_const_i32(0); tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2]); - tcg_temp_free(temp); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -7577,8 +7070,6 @@ static void decode_rrr1_maddq_h(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void decode_rrr1_maddsu_h(DisasContext *ctx) @@ -8061,8 +7552,6 @@ static void decode_rrr1_msubq_h(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); - tcg_temp_free(temp2); } static void decode_rrr1_msubad_h(DisasContext *ctx) @@ -8257,7 +7746,6 @@ static void decode_rrrr_extract_insert(DisasContext *ctx) */ tcg_gen_movcond_tl(TCG_COND_EQ, msw, tmp_pos, zero, zero, msw); tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, msw); - tcg_temp_free(msw); } break; case OPC2_32_RRRR_EXTR: @@ -8285,8 +7773,6 @@ static void decode_rrrr_extract_insert(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(tmp_pos); - tcg_temp_free(tmp_width); } /* RRRW format */ @@ -8332,8 +7818,6 @@ static void decode_rrrw_extract_insert(DisasContext *ctx) tcg_gen_shl_tl(temp2, temp2, temp); tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp); tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2); - - tcg_temp_free(temp2); break; case OPC2_32_RRRW_INSERT: temp2 = tcg_temp_new(); @@ -8341,13 +7825,10 @@ static void decode_rrrw_extract_insert(DisasContext *ctx) tcg_gen_movi_tl(temp, width); tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2); - - tcg_temp_free(temp2); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_temp_free(temp); } /* SYS Format*/ @@ -8400,7 +7881,6 @@ static void decode_sys_interrupts(DisasContext *ctx) gen_set_label(l1); tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(tmp); } else { /* generate privilege trap */ } @@ -8482,9 +7962,6 @@ static void decode_32Bit_opc(DisasContext *ctx) tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16); tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW); - - tcg_temp_free(temp2); - tcg_temp_free(temp); break; case OPC1_32_ABS_LD_Q: address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -8493,8 +7970,6 @@ static void decode_32Bit_opc(DisasContext *ctx) tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); - - tcg_temp_free(temp); break; case OPC1_32_ABS_LEA: address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -8514,9 +7989,6 @@ static void decode_32Bit_opc(DisasContext *ctx) tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos)); tcg_gen_ori_tl(temp2, temp2, (b << bpos)); tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB); - - tcg_temp_free(temp); - tcg_temp_free(temp2); break; /* B-format */ case OPC1_32_B_CALL: @@ -8647,10 +8119,6 @@ static void decode_32Bit_opc(DisasContext *ctx) tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3); - - tcg_temp_free(temp); - tcg_temp_free(temp2); - tcg_temp_free(temp3); break; /* RCRW Format */ case OPCM_32_RCRW_MASK_INSERT: diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 4af0650deb..0cf3075649 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -57,7 +57,6 @@ struct DisasContext { bool sar_5bit; bool sar_m32_5bit; - bool sar_m32_allocated; TCGv_i32 sar_m32; unsigned window; @@ -284,14 +283,7 @@ static void init_sar_tracker(DisasContext *dc) { dc->sar_5bit = false; dc->sar_m32_5bit = false; - dc->sar_m32_allocated = false; -} - -static void reset_sar_tracker(DisasContext *dc) -{ - if (dc->sar_m32_allocated) { - tcg_temp_free(dc->sar_m32); - } + dc->sar_m32 = NULL; } static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) @@ -306,9 +298,8 @@ static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) { - if (!dc->sar_m32_allocated) { + if (!dc->sar_m32) { dc->sar_m32 = tcg_temp_new_i32(); - dc->sar_m32_allocated = true; } tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f); tcg_gen_sub_i32(cpu_SR[SAR], tcg_constant_i32(32), dc->sar_m32); @@ -1111,16 +1102,6 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) ops->translate(dc, pslot->arg, ops->par); } - for (i = 0; i < n_arg_copy; ++i) { - if (arg_copy[i].arg->num_bits <= 32) { - tcg_temp_free_i32(arg_copy[i].temp); - } else if (arg_copy[i].arg->num_bits <= 64) { - tcg_temp_free_i64(arg_copy[i].temp); - } else { - g_assert_not_reached(); - } - } - if (dc->base.is_jmp == DISAS_NEXT) { gen_postprocess(dc, 0); dc->op_flags = 0; @@ -1247,11 +1228,6 @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); - reset_sar_tracker(dc); - if (dc->icount) { - tcg_temp_free(dc->next_icount); - } - switch (dc->base.is_jmp) { case DISAS_NORETURN: break; @@ -1379,14 +1355,13 @@ static void translate_addx(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, par[0]); tcg_gen_add_i32(arg[0].out, tmp, arg[2].in); - tcg_temp_free(tmp); } static void translate_all(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { uint32_t shift = par[1]; - TCGv_i32 mask = tcg_const_i32(((1 << shift) - 1) << arg[1].imm); + TCGv_i32 mask = tcg_constant_i32(((1 << shift) - 1) << arg[1].imm); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[1].in, mask); @@ -1398,8 +1373,6 @@ static void translate_all(DisasContext *dc, const OpcodeArg arg[], tcg_gen_shri_i32(tmp, tmp, arg[1].imm + shift); tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp, arg[0].imm, 1); - tcg_temp_free(mask); - tcg_temp_free(tmp); } static void translate_and(DisasContext *dc, const OpcodeArg arg[], @@ -1414,7 +1387,6 @@ static void translate_ball(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[0].in, arg[1].in); gen_brcond(dc, par[0], tmp, arg[1].in, arg[2].imm); - tcg_temp_free(tmp); } static void translate_bany(DisasContext *dc, const OpcodeArg arg[], @@ -1423,7 +1395,6 @@ static void translate_bany(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[0].in, arg[1].in); gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); - tcg_temp_free(tmp); } static void translate_b(DisasContext *dc, const OpcodeArg arg[], @@ -1435,22 +1406,16 @@ static void translate_b(DisasContext *dc, const OpcodeArg arg[], static void translate_bb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { -#if TARGET_BIG_ENDIAN - TCGv_i32 bit = tcg_const_i32(0x80000000u); -#else - TCGv_i32 bit = tcg_const_i32(0x00000001u); -#endif TCGv_i32 tmp = tcg_temp_new_i32(); + tcg_gen_andi_i32(tmp, arg[1].in, 0x1f); -#if TARGET_BIG_ENDIAN - tcg_gen_shr_i32(bit, bit, tmp); -#else - tcg_gen_shl_i32(bit, bit, tmp); -#endif - tcg_gen_and_i32(tmp, arg[0].in, bit); + if (TARGET_BIG_ENDIAN) { + tcg_gen_shr_i32(tmp, tcg_constant_i32(0x80000000u), tmp); + } else { + tcg_gen_shl_i32(tmp, tcg_constant_i32(0x00000001u), tmp); + } + tcg_gen_and_i32(tmp, arg[0].in, tmp); gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); - tcg_temp_free(tmp); - tcg_temp_free(bit); } static void translate_bbi(DisasContext *dc, const OpcodeArg arg[], @@ -1463,7 +1428,6 @@ static void translate_bbi(DisasContext *dc, const OpcodeArg arg[], tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm); #endif gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); - tcg_temp_free(tmp); } static void translate_bi(DisasContext *dc, const OpcodeArg arg[], @@ -1504,8 +1468,6 @@ static void translate_boolean(DisasContext *dc, const OpcodeArg arg[], tcg_gen_shri_i32(tmp2, arg[2].in, arg[2].imm); op[par[0]](tmp1, tmp1, tmp2); tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp1, arg[0].imm, 1); - tcg_temp_free(tmp1); - tcg_temp_free(tmp2); } static void translate_bp(DisasContext *dc, const OpcodeArg arg[], @@ -1515,7 +1477,6 @@ static void translate_bp(DisasContext *dc, const OpcodeArg arg[], tcg_gen_andi_i32(tmp, arg[0].in, 1 << arg[0].imm); gen_brcondi(dc, par[0], tmp, 0, arg[1].imm); - tcg_temp_free(tmp); } static void translate_call0(DisasContext *dc, const OpcodeArg arg[], @@ -1528,9 +1489,8 @@ static void translate_call0(DisasContext *dc, const OpcodeArg arg[], static void translate_callw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_const_i32(arg[0].imm); + TCGv_i32 tmp = tcg_constant_i32(arg[0].imm); gen_callw_slot(dc, par[0], tmp, adjust_jump_slot(dc, arg[0].imm, 0)); - tcg_temp_free(tmp); } static void translate_callx0(DisasContext *dc, const OpcodeArg arg[], @@ -1540,7 +1500,6 @@ static void translate_callx0(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next); gen_jump(dc, tmp); - tcg_temp_free(tmp); } static void translate_callxw(DisasContext *dc, const OpcodeArg arg[], @@ -1550,19 +1509,16 @@ static void translate_callxw(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, arg[0].in); gen_callw_slot(dc, par[0], tmp, -1); - tcg_temp_free(tmp); } static void translate_clamps(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2].imm); - TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2].imm) - 1); + TCGv_i32 tmp1 = tcg_constant_i32(-1u << arg[2].imm); + TCGv_i32 tmp2 = tcg_constant_i32((1 << arg[2].imm) - 1); - tcg_gen_smax_i32(tmp1, tmp1, arg[1].in); - tcg_gen_smin_i32(arg[0].out, tmp1, tmp2); - tcg_temp_free(tmp1); - tcg_temp_free(tmp2); + tcg_gen_smax_i32(arg[0].out, tmp1, arg[1].in); + tcg_gen_smin_i32(arg[0].out, arg[0].out, tmp2); } static void translate_clrb_expstate(DisasContext *dc, const OpcodeArg arg[], @@ -1581,10 +1537,9 @@ static void translate_clrex(DisasContext *dc, const OpcodeArg arg[], static void translate_const16(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 c = tcg_const_i32(arg[1].imm); + TCGv_i32 c = tcg_constant_i32(arg[1].imm); tcg_gen_deposit_i32(arg[0].out, c, arg[0].in, 16, 16); - tcg_temp_free(c); } static void translate_dcache(DisasContext *dc, const OpcodeArg arg[], @@ -1595,8 +1550,6 @@ static void translate_dcache(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); tcg_gen_qemu_ld8u(res, addr, dc->cring); - tcg_temp_free(addr); - tcg_temp_free(res); } static void translate_depbits(DisasContext *dc, const OpcodeArg arg[], @@ -1647,7 +1600,6 @@ static void translate_extui(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, arg[1].in, arg[2].imm); tcg_gen_andi_i32(arg[0].out, tmp, maskimm); - tcg_temp_free(tmp); } static void translate_getex(DisasContext *dc, const OpcodeArg arg[], @@ -1658,7 +1610,6 @@ static void translate_getex(DisasContext *dc, const OpcodeArg arg[], tcg_gen_extract_i32(tmp, cpu_SR[ATOMCTL], 8, 1); tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], arg[0].in, 8, 1); tcg_gen_mov_i32(arg[0].out, tmp); - tcg_temp_free(tmp); } static void translate_icache(DisasContext *dc, const OpcodeArg arg[], @@ -1670,7 +1621,6 @@ static void translate_icache(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movi_i32(cpu_pc, dc->pc); tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); gen_helper_itlb_hit_test(cpu_env, addr); - tcg_temp_free(addr); #endif } @@ -1705,7 +1655,6 @@ static void translate_l32e(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); mop = gen_load_store_alignment(dc, MO_TEUL, addr); tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, mop); - tcg_temp_free(addr); } #ifdef CONFIG_USER_ONLY @@ -1736,7 +1685,6 @@ static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[], tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->cring, mop); tcg_gen_mov_i32(cpu_exclusive_addr, addr); tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out); - tcg_temp_free(addr); } static void translate_ldst(DisasContext *dc, const OpcodeArg arg[], @@ -1759,7 +1707,6 @@ static void translate_ldst(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL); } } - tcg_temp_free(addr); } static void translate_lct(DisasContext *dc, const OpcodeArg arg[], @@ -1774,13 +1721,12 @@ static void translate_l32r(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp; if (dc->base.tb->flags & XTENSA_TBFLAG_LITBASE) { - tmp = tcg_const_i32(arg[1].raw_imm - 1); - tcg_gen_add_i32(tmp, cpu_SR[LITBASE], tmp); + tmp = tcg_temp_new(); + tcg_gen_addi_i32(tmp, cpu_SR[LITBASE], arg[1].raw_imm - 1); } else { - tmp = tcg_const_i32(arg[1].imm); + tmp = tcg_constant_i32(arg[1].imm); } tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring); - tcg_temp_free(tmp); } static void translate_loop(DisasContext *dc, const OpcodeArg arg[], @@ -1866,19 +1812,12 @@ static void translate_mac16(DisasContext *dc, const OpcodeArg arg[], lo, hi); } tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); - - tcg_temp_free_i32(lo); - tcg_temp_free_i32(hi); } - tcg_temp_free(m1); - tcg_temp_free(m2); } if (ld_offset) { tcg_gen_mov_i32(arg[1].out, vaddr); tcg_gen_mov_i32(cpu_SR[MR + arg[0].imm], mem32); } - tcg_temp_free(vaddr); - tcg_temp_free(mem32); } static void translate_memw(DisasContext *dc, const OpcodeArg arg[], @@ -1942,7 +1881,6 @@ static void translate_movp(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i32(par[0], arg[0].out, tmp, zero, arg[1].in, arg[0].in); - tcg_temp_free(tmp); } static void translate_movsp(DisasContext *dc, const OpcodeArg arg[], @@ -1965,8 +1903,6 @@ static void translate_mul16(DisasContext *dc, const OpcodeArg arg[], tcg_gen_ext16u_i32(v2, arg[2].in); } tcg_gen_mul_i32(arg[0].out, v1, v2); - tcg_temp_free(v2); - tcg_temp_free(v1); } static void translate_mull(DisasContext *dc, const OpcodeArg arg[], @@ -1985,7 +1921,6 @@ static void translate_mulh(DisasContext *dc, const OpcodeArg arg[], } else { tcg_gen_mulu2_i32(lo, arg[0].out, arg[1].in, arg[2].in); } - tcg_temp_free(lo); } static void translate_neg(DisasContext *dc, const OpcodeArg arg[], @@ -2112,15 +2047,14 @@ static uint32_t test_exceptions_retw(DisasContext *dc, const OpcodeArg arg[], static void translate_retw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_const_i32(1); - tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); + TCGv_i32 tmp = tcg_temp_new(); + tcg_gen_shl_i32(tmp, tcg_constant_i32(1), cpu_SR[WINDOW_BASE]); tcg_gen_andc_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); tcg_gen_movi_i32(tmp, dc->pc); tcg_gen_deposit_i32(tmp, tmp, cpu_R[0], 0, 30); gen_helper_retw(cpu_env, cpu_R[0]); gen_jump(dc, tmp); - tcg_temp_free(tmp); } static void translate_rfde(DisasContext *dc, const OpcodeArg arg[], @@ -2146,10 +2080,10 @@ static void translate_rfi(DisasContext *dc, const OpcodeArg arg[], static void translate_rfw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { - TCGv_i32 tmp = tcg_const_i32(1); + TCGv_i32 tmp = tcg_temp_new(); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); - tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); + tcg_gen_shl_i32(tmp, tcg_constant_i32(1), cpu_SR[WINDOW_BASE]); if (par[0]) { tcg_gen_andc_i32(cpu_SR[WINDOW_START], @@ -2159,7 +2093,6 @@ static void translate_rfw(DisasContext *dc, const OpcodeArg arg[], cpu_SR[WINDOW_START], tmp); } - tcg_temp_free(tmp); gen_helper_restore_owb(cpu_env); gen_jump(dc, cpu_SR[EPC1]); } @@ -2209,7 +2142,6 @@ static void translate_rsr_ptevaddr(DisasContext *dc, const OpcodeArg arg[], tcg_gen_shri_i32(tmp, cpu_SR[EXCVADDR], 10); tcg_gen_or_i32(tmp, tmp, cpu_SR[PTEVADDR]); tcg_gen_andi_i32(arg[0].out, tmp, 0xfffffffc); - tcg_temp_free(tmp); #endif } @@ -2283,8 +2215,6 @@ static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[], gen_check_atomctl(dc, addr); tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1], tmp, dc->cring, mop); - tcg_temp_free(addr); - tcg_temp_free(tmp); } static void translate_s32e(DisasContext *dc, const OpcodeArg arg[], @@ -2296,7 +2226,6 @@ static void translate_s32e(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); mop = gen_load_store_alignment(dc, MO_TEUL, addr); tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, mop); - tcg_temp_free(addr); } static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[], @@ -2322,9 +2251,6 @@ static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[], gen_set_label(label); tcg_gen_extract_i32(arg[0].out, cpu_SR[ATOMCTL], 8, 1); tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], res, 8, 1); - tcg_temp_free(prev); - tcg_temp_free(addr); - tcg_temp_free(res); } static void translate_salt(DisasContext *dc, const OpcodeArg arg[], @@ -2348,7 +2274,6 @@ static void translate_sext(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, shift); tcg_gen_sari_i32(arg[0].out, tmp, shift); - tcg_temp_free(tmp); } } @@ -2388,8 +2313,6 @@ static void translate_simcall(DisasContext *dc, const OpcodeArg arg[], tcg_gen_extu_i32_i64(tmp, reg); \ tcg_gen_##cmd##_i64(v, v, tmp); \ tcg_gen_extrl_i64_i32(arg[0].out, v); \ - tcg_temp_free_i64(v); \ - tcg_temp_free_i64(tmp); \ } while (0) #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR]) @@ -2401,12 +2324,11 @@ static void translate_sll(DisasContext *dc, const OpcodeArg arg[], tcg_gen_shl_i32(arg[0].out, arg[1].in, dc->sar_m32); } else { TCGv_i64 v = tcg_temp_new_i64(); - TCGv_i32 s = tcg_const_i32(32); - tcg_gen_sub_i32(s, s, cpu_SR[SAR]); + TCGv_i32 s = tcg_temp_new(); + tcg_gen_subfi_i32(s, 32, cpu_SR[SAR]); tcg_gen_andi_i32(s, s, 0x3f); tcg_gen_extu_i32_i64(v, arg[1].in); gen_shift_reg(shl, s); - tcg_temp_free(s); } } @@ -2473,7 +2395,6 @@ static void translate_ssa8b(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[0].in, 3); gen_left_shift_sar(dc, tmp); - tcg_temp_free(tmp); } static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[], @@ -2482,7 +2403,6 @@ static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[0].in, 3); gen_right_shift_sar(dc, tmp); - tcg_temp_free(tmp); } static void translate_ssai(DisasContext *dc, const OpcodeArg arg[], @@ -2515,7 +2435,6 @@ static void translate_subx(DisasContext *dc, const OpcodeArg arg[], TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, par[0]); tcg_gen_sub_i32(arg[0].out, tmp, arg[2].in); - tcg_temp_free(tmp); } static void translate_waiti(DisasContext *dc, const OpcodeArg arg[], @@ -2767,7 +2686,6 @@ static void translate_xsr(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]); tcg_gen_mov_i32(cpu_SR[par[0]], tmp); - tcg_temp_free(tmp); } else { tcg_gen_movi_i32(arg[0].out, 0); } @@ -2782,7 +2700,6 @@ static void translate_xsr_mask(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]); tcg_gen_andi_i32(cpu_SR[par[0]], tmp, par[2]); - tcg_temp_free(tmp); } else { tcg_gen_movi_i32(arg[0].out, 0); } @@ -2802,7 +2719,6 @@ static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[], tcg_gen_mov_i32(tmp, cpu_SR[par[0]]); gen_helper_wsr_ccount(cpu_env, arg[0].in); tcg_gen_mov_i32(arg[0].out, tmp); - tcg_temp_free(tmp); #endif } @@ -2820,7 +2736,6 @@ static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[], } \ translate_wsr_##name(dc, arg, par); \ tcg_gen_mov_i32(arg[0].out, tmp); \ - tcg_temp_free(tmp); \ } gen_translate_xsr(acchi) @@ -6307,16 +6222,6 @@ static inline void put_f32_o1_i3(const OpcodeArg *arg, const OpcodeArg *arg32, (o0 >= 0 && arg[o0].num_bits == 64)) { if (o0 >= 0) { tcg_gen_extu_i32_i64(arg[o0].out, arg32[o0].out); - tcg_temp_free_i32(arg32[o0].out); - } - if (i0 >= 0) { - tcg_temp_free_i32(arg32[i0].in); - } - if (i1 >= 0) { - tcg_temp_free_i32(arg32[i1].in); - } - if (i2 >= 0) { - tcg_temp_free_i32(arg32[i2].in); } } } @@ -6440,9 +6345,6 @@ static void translate_compare_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i32(TCG_COND_NE, arg[0].out, res, zero, set_br, clr_br); - tcg_temp_free(res); - tcg_temp_free(set_br); - tcg_temp_free(clr_br); } static void translate_compare_s(DisasContext *dc, const OpcodeArg arg[], @@ -6473,9 +6375,6 @@ static void translate_compare_s(DisasContext *dc, const OpcodeArg arg[], arg[0].out, res, zero, set_br, clr_br); put_f32_i2(arg, arg32, 1, 2); - tcg_temp_free(res); - tcg_temp_free(set_br); - tcg_temp_free(clr_br); } static void translate_const_d(DisasContext *dc, const OpcodeArg arg[], @@ -6594,7 +6493,6 @@ static void translate_ldsti(DisasContext *dc, const OpcodeArg arg[], if (par[1]) { tcg_gen_mov_i32(arg[1].out, addr); } - tcg_temp_free(addr); } static void translate_ldstx(DisasContext *dc, const OpcodeArg arg[], @@ -6613,7 +6511,6 @@ static void translate_ldstx(DisasContext *dc, const OpcodeArg arg[], if (par[1]) { tcg_gen_mov_i32(arg[1].out, addr); } - tcg_temp_free(addr); } static void translate_fpu2k_madd_s(DisasContext *dc, const OpcodeArg arg[], @@ -6649,7 +6546,6 @@ static void translate_movcond_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i64(par[0], arg[0].out, arg2, zero, arg[1].in, arg[0].in); - tcg_temp_free_i64(arg2); } static void translate_movcond_s(DisasContext *dc, const OpcodeArg arg[], @@ -6678,8 +6574,6 @@ static void translate_movp_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i64(par[0], arg[0].out, tmp2, zero, arg[1].in, arg[0].in); - tcg_temp_free_i32(tmp1); - tcg_temp_free_i64(tmp2); } static void translate_movp_s(DisasContext *dc, const OpcodeArg arg[], @@ -6693,7 +6587,6 @@ static void translate_movp_s(DisasContext *dc, const OpcodeArg arg[], tcg_gen_movcond_i32(par[0], arg[0].out, tmp, zero, arg[1].in, arg[0].in); - tcg_temp_free(tmp); } else { translate_movp_d(dc, arg, par); } @@ -7025,7 +6918,6 @@ static void translate_cvtd_s(DisasContext *dc, const OpcodeArg arg[], tcg_gen_extrl_i64_i32(v, arg[1].in); gen_helper_cvtd_s(arg[0].out, cpu_env, v); - tcg_temp_free_i32(v); } static void translate_cvts_d(DisasContext *dc, const OpcodeArg arg[], @@ -7035,7 +6927,6 @@ static void translate_cvts_d(DisasContext *dc, const OpcodeArg arg[], gen_helper_cvts_d(v, cpu_env, arg[1].in); tcg_gen_extu_i32_i64(arg[0].out, v); - tcg_temp_free_i32(v); } static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[], @@ -7063,9 +6954,6 @@ static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(arg[1].out, arg[1].in, arg[2].imm); } } - if (par[1]) { - tcg_temp_free(addr); - } } static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[], @@ -7098,9 +6986,6 @@ static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[], tcg_gen_addi_i32(arg[1].out, arg[1].in, arg[2].imm); } } - if (par[1]) { - tcg_temp_free(addr); - } } static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[], @@ -7128,9 +7013,6 @@ static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[], tcg_gen_add_i32(arg[1].out, arg[1].in, arg[2].in); } } - if (par[1]) { - tcg_temp_free(addr); - } } static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[], @@ -7163,9 +7045,6 @@ static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[], tcg_gen_add_i32(arg[1].out, arg[1].in, arg[2].in); } } - if (par[1]) { - tcg_temp_free(addr); - } } static void translate_madd_d(DisasContext *dc, const OpcodeArg arg[], diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 883ced8168..4060a35cf6 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -4156,12 +4156,9 @@ static void tcg_target_init(TCGContext *s) /* There are a number of things we must check before we can be sure of not hitting invalid opcode. */ if (c & bit_OSXSAVE) { - unsigned xcrl, xcrh; - /* The xgetbv instruction is not available to older versions of - * the assembler, so we encode the instruction manually. - */ - asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcrl), "=d" (xcrh) : "c" (0)); - if ((xcrl & 6) == 6) { + unsigned bv = xgetbv_low(0); + + if ((bv & 6) == 6) { have_avx1 = (c & bit_AVX) != 0; have_avx2 = (b7 & bit_AVX2) != 0; @@ -4172,7 +4169,7 @@ static void tcg_target_init(TCGContext *s) * check that OPMASK and all extended ZMM state are enabled * even if we're not using them -- the insns will fault. */ - if ((xcrl & 0xe0) == 0xe0 + if ((bv & 0xe0) == 0xe0 && (b7 & bit_AVX512F) && (b7 & bit_AVX512VL)) { have_avx512vl = true; @@ -4221,6 +4218,19 @@ static void tcg_target_init(TCGContext *s) s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); +#ifdef _WIN64 + /* These are call saved, and we don't save them, so don't use them. */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM6); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM7); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM8); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM9); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM10); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM11); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM12); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM13); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM14); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM15); +#endif } typedef struct { diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index f2269a1b91..77658a88f0 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -84,6 +84,22 @@ void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, op->args[5] = a6; } +/* Generic ops. */ + +static void add_last_as_label_use(TCGLabel *l) +{ + TCGLabelUse *u = tcg_malloc(sizeof(TCGLabelUse)); + + u->op = tcg_last_op(); + QSIMPLEQ_INSERT_TAIL(&l->branches, u, next); +} + +void tcg_gen_br(TCGLabel *l) +{ + tcg_gen_op1(INDEX_op_br, label_arg(l)); + add_last_as_label_use(l); +} + void tcg_gen_mb(TCGBar mb_type) { if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { @@ -216,8 +232,8 @@ void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l) if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { - l->refs++; tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l)); + add_last_as_label_use(l); } } @@ -1474,7 +1490,6 @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { - l->refs++; if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), @@ -1483,6 +1498,7 @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label_arg(l)); } + add_last_as_label_use(l); } } @@ -1493,12 +1509,12 @@ void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) } else if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { - l->refs++; tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), TCGV_HIGH(arg1), tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32), cond, label_arg(l)); + add_last_as_label_use(l); } } diff --git a/tcg/tcg.c b/tcg/tcg.c index 5cccc06ae3..e4fccbd0d8 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -36,6 +36,7 @@ #include "qemu/qemu-print.h" #include "qemu/cacheflush.h" #include "qemu/cacheinfo.h" +#include "qemu/timer.h" /* Note: the long term plan is to reduce the dependencies on the QEMU CPU definitions. Currently they are used for qemu_ld/st @@ -282,6 +283,7 @@ TCGLabel *gen_new_label(void) memset(l, 0, sizeof(TCGLabel)); l->id = s->nb_labels++; + QSIMPLEQ_INIT(&l->branches); QSIMPLEQ_INIT(&l->relocs); QSIMPLEQ_INSERT_TAIL(&s->labels, l, next); @@ -1271,7 +1273,7 @@ TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind) ts->temp_allocated = 1; tcg_debug_assert(ts->base_type == type); tcg_debug_assert(ts->kind == kind); - goto done; + return ts; } } else { tcg_debug_assert(kind == TEMP_TB); @@ -1315,11 +1317,6 @@ TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind) ts2->kind = kind; } } - - done: -#if defined(CONFIG_DEBUG_TCG) - s->temps_in_use++; -#endif return ts; } @@ -1364,30 +1361,18 @@ void tcg_temp_free_internal(TCGTemp *ts) switch (ts->kind) { case TEMP_CONST: - /* - * In order to simplify users of tcg_constant_*, - * silently ignore free. - */ - return; - case TEMP_EBB: case TEMP_TB: + /* Silently ignore free. */ + break; + case TEMP_EBB: + tcg_debug_assert(ts->temp_allocated != 0); + ts->temp_allocated = 0; + set_bit(temp_idx(ts), s->free_temps[ts->base_type].l); break; default: + /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */ g_assert_not_reached(); } - - tcg_debug_assert(ts->temp_allocated != 0); - ts->temp_allocated = 0; - -#if defined(CONFIG_DEBUG_TCG) - assert(s->temps_in_use > 0); - s->temps_in_use--; -#endif - - if (ts->kind == TEMP_EBB) { - int idx = temp_idx(ts); - set_bit(idx, s->free_temps[ts->base_type].l); - } } TCGTemp *tcg_constant_internal(TCGType type, int64_t val) @@ -1475,27 +1460,6 @@ TCGv_i64 tcg_const_i64(int64_t val) return t0; } -#if defined(CONFIG_DEBUG_TCG) -void tcg_clear_temp_count(void) -{ - TCGContext *s = tcg_ctx; - s->temps_in_use = 0; -} - -int tcg_check_temp_count(void) -{ - TCGContext *s = tcg_ctx; - if (s->temps_in_use) { - /* Clear the count so that we don't give another - * warning immediately next time around. - */ - s->temps_in_use = 0; - return 1; - } - return 0; -} -#endif - /* Return true if OP may appear in the opcode stream. Test the runtime variable that controls each opcode. */ bool tcg_op_supported(TCGOpcode op) @@ -2190,6 +2154,85 @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) arg_label(op->args[k])->id); i++, k++; break; + case INDEX_op_mb: + { + TCGBar membar = op->args[k]; + const char *b_op, *m_op; + + switch (membar & TCG_BAR_SC) { + case 0: + b_op = "none"; + break; + case TCG_BAR_LDAQ: + b_op = "acq"; + break; + case TCG_BAR_STRL: + b_op = "rel"; + break; + case TCG_BAR_SC: + b_op = "seq"; + break; + default: + g_assert_not_reached(); + } + + switch (membar & TCG_MO_ALL) { + case 0: + m_op = "none"; + break; + case TCG_MO_LD_LD: + m_op = "rr"; + break; + case TCG_MO_LD_ST: + m_op = "rw"; + break; + case TCG_MO_ST_LD: + m_op = "wr"; + break; + case TCG_MO_ST_ST: + m_op = "ww"; + break; + case TCG_MO_LD_LD | TCG_MO_LD_ST: + m_op = "rr+rw"; + break; + case TCG_MO_LD_LD | TCG_MO_ST_LD: + m_op = "rr+wr"; + break; + case TCG_MO_LD_LD | TCG_MO_ST_ST: + m_op = "rr+ww"; + break; + case TCG_MO_LD_ST | TCG_MO_ST_LD: + m_op = "rw+wr"; + break; + case TCG_MO_LD_ST | TCG_MO_ST_ST: + m_op = "rw+ww"; + break; + case TCG_MO_ST_LD | TCG_MO_ST_ST: + m_op = "wr+ww"; + break; + case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_LD: + m_op = "rr+rw+wr"; + break; + case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST: + m_op = "rr+rw+ww"; + break; + case TCG_MO_LD_LD | TCG_MO_ST_LD | TCG_MO_ST_ST: + m_op = "rr+wr+ww"; + break; + case TCG_MO_LD_ST | TCG_MO_ST_LD | TCG_MO_ST_ST: + m_op = "rw+wr+ww"; + break; + case TCG_MO_ALL: + m_op = "all"; + break; + default: + g_assert_not_reached(); + } + + col += ne_fprintf(f, "%s%s:%s", (k ? "," : ""), b_op, m_op); + i++, k++; + } + break; default: break; } @@ -2519,23 +2562,32 @@ static void process_op_defs(TCGContext *s) } } -void tcg_op_remove(TCGContext *s, TCGOp *op) +static void remove_label_use(TCGOp *op, int idx) { - TCGLabel *label; + TCGLabel *label = arg_label(op->args[idx]); + TCGLabelUse *use; + QSIMPLEQ_FOREACH(use, &label->branches, next) { + if (use->op == op) { + QSIMPLEQ_REMOVE(&label->branches, use, TCGLabelUse, next); + return; + } + } + g_assert_not_reached(); +} + +void tcg_op_remove(TCGContext *s, TCGOp *op) +{ switch (op->opc) { case INDEX_op_br: - label = arg_label(op->args[0]); - label->refs--; + remove_label_use(op, 0); break; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: - label = arg_label(op->args[3]); - label->refs--; + remove_label_use(op, 3); break; case INDEX_op_brcond2_i32: - label = arg_label(op->args[5]); - label->refs--; + remove_label_use(op, 5); break; default: break; @@ -2617,6 +2669,31 @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, return new_op; } +static void move_label_uses(TCGLabel *to, TCGLabel *from) +{ + TCGLabelUse *u; + + QSIMPLEQ_FOREACH(u, &from->branches, next) { + TCGOp *op = u->op; + switch (op->opc) { + case INDEX_op_br: + op->args[0] = label_arg(to); + break; + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + op->args[3] = label_arg(to); + break; + case INDEX_op_brcond2_i32: + op->args[5] = label_arg(to); + break; + default: + g_assert_not_reached(); + } + } + + QSIMPLEQ_CONCAT(&to->branches, &from->branches); +} + /* Reachable analysis : remove unreachable code. */ static void __attribute__((noinline)) reachable_code_pass(TCGContext *s) @@ -2633,13 +2710,30 @@ reachable_code_pass(TCGContext *s) label = arg_label(op->args[0]); /* + * Note that the first op in the TB is always a load, + * so there is always something before a label. + */ + op_prev = QTAILQ_PREV(op, link); + + /* + * If we find two sequential labels, move all branches to + * reference the second label and remove the first label. + * Do this before branch to next optimization, so that the + * middle label is out of the way. + */ + if (op_prev->opc == INDEX_op_set_label) { + move_label_uses(label, arg_label(op_prev->args[0])); + tcg_op_remove(s, op_prev); + op_prev = QTAILQ_PREV(op, link); + } + + /* * Optimization can fold conditional branches to unconditional. * If we find a label which is preceded by an unconditional * branch to next, remove the branch. We couldn't do this when * processing the branch because any dead code between the branch * and label had not yet been removed. */ - op_prev = QTAILQ_PREV(op, link); if (op_prev->opc == INDEX_op_br && label == arg_label(op_prev->args[0])) { tcg_op_remove(s, op_prev); @@ -2647,7 +2741,7 @@ reachable_code_pass(TCGContext *s) dead = false; } - if (label->refs == 0) { + if (QSIMPLEQ_EMPTY(&label->branches)) { /* * While there is an occasional backward branch, virtually * all branches generated by the translators are forward. @@ -4891,7 +4985,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) bool error = false; QSIMPLEQ_FOREACH(l, &s->labels, next) { - if (unlikely(!l->present) && l->refs) { + if (unlikely(!l->present) && !QSIMPLEQ_EMPTY(&l->branches)) { qemu_log_mask(CPU_LOG_TB_OP, "$L%d referenced but not present.\n", l->id); error = true; diff --git a/tests/avocado/machine_aspeed.py b/tests/avocado/machine_aspeed.py index ddf05b3617..d2c57ccb7e 100644 --- a/tests/avocado/machine_aspeed.py +++ b/tests/avocado/machine_aspeed.py @@ -199,6 +199,8 @@ class AST2x00Machine(QemuSystemTest): 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); self.vm.add_args('-device', 'ds1338,bus=aspeed.i2c.bus.3,address=0x32'); + self.vm.add_args('-device', + 'i2c-echo,bus=aspeed.i2c.bus.3,address=0x42'); self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00') exec_command_and_wait_for_pattern(self, @@ -217,6 +219,14 @@ class AST2x00Machine(QemuSystemTest): year = time.strftime("%Y") exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year); + exec_command_and_wait_for_pattern(self, + 'echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-3/new_device', + 'i2c i2c-3: new_device: Instantiated device slave-24c02 at 0x64'); + exec_command(self, 'i2cset -y 3 0x42 0x64 0x00 0xaa i'); + time.sleep(0.1) + exec_command_and_wait_for_pattern(self, + 'hexdump /sys/bus/i2c/devices/3-1064/slave-eeprom', + '0000000 ffaa ffff ffff ffff ffff ffff ffff ffff'); self.do_test_arm_aspeed_buildroot_poweroff() diff --git a/tests/data/acpi/q35/DSDT.cxl b/tests/data/acpi/q35/DSDT.cxl index 3d18b9672d..4586b9a18b 100644 --- a/tests/data/acpi/q35/DSDT.cxl +++ b/tests/data/acpi/q35/DSDT.cxl Binary files differdiff --git a/tests/data/acpi/virt/APIC.memhp b/tests/data/acpi/virt/APIC.memhp deleted file mode 100644 index 179d274770..0000000000 --- a/tests/data/acpi/virt/APIC.memhp +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/APIC.numamem b/tests/data/acpi/virt/APIC.numamem deleted file mode 100644 index 179d274770..0000000000 --- a/tests/data/acpi/virt/APIC.numamem +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/DSDT.numamem b/tests/data/acpi/virt/DSDT.numamem deleted file mode 100644 index c475039907..0000000000 --- a/tests/data/acpi/virt/DSDT.numamem +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/FACP.memhp b/tests/data/acpi/virt/FACP.memhp deleted file mode 100644 index ac05c35a69..0000000000 --- a/tests/data/acpi/virt/FACP.memhp +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/FACP.numamem b/tests/data/acpi/virt/FACP.numamem deleted file mode 100644 index ac05c35a69..0000000000 --- a/tests/data/acpi/virt/FACP.numamem +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/GTDT.memhp b/tests/data/acpi/virt/GTDT.memhp deleted file mode 100644 index 6f8cb9b8f3..0000000000 --- a/tests/data/acpi/virt/GTDT.memhp +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/GTDT.numamem b/tests/data/acpi/virt/GTDT.numamem deleted file mode 100644 index 6f8cb9b8f3..0000000000 --- a/tests/data/acpi/virt/GTDT.numamem +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/IORT.memhp b/tests/data/acpi/virt/IORT.memhp deleted file mode 100644 index 7efd0ce8a6..0000000000 --- a/tests/data/acpi/virt/IORT.memhp +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/IORT.numamem b/tests/data/acpi/virt/IORT.numamem deleted file mode 100644 index 7efd0ce8a6..0000000000 --- a/tests/data/acpi/virt/IORT.numamem +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/IORT.pxb b/tests/data/acpi/virt/IORT.pxb deleted file mode 100644 index 7efd0ce8a6..0000000000 --- a/tests/data/acpi/virt/IORT.pxb +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/MCFG.memhp b/tests/data/acpi/virt/MCFG.memhp deleted file mode 100644 index f4ae3203a4..0000000000 --- a/tests/data/acpi/virt/MCFG.memhp +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/MCFG.numamem b/tests/data/acpi/virt/MCFG.numamem deleted file mode 100644 index f4ae3203a4..0000000000 --- a/tests/data/acpi/virt/MCFG.numamem +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/SPCR.memhp b/tests/data/acpi/virt/SPCR.memhp deleted file mode 100644 index 24e0a579e7..0000000000 --- a/tests/data/acpi/virt/SPCR.memhp +++ /dev/null Binary files differdiff --git a/tests/data/acpi/virt/SPCR.numamem b/tests/data/acpi/virt/SPCR.numamem deleted file mode 100644 index 24e0a579e7..0000000000 --- a/tests/data/acpi/virt/SPCR.numamem +++ /dev/null Binary files differdiff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index 109bc8e7b1..d4ab3934ed 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -2572,8 +2572,14 @@ int main(int argc, char **argv) qtest_add_func("/migration/auto_converge", test_migrate_auto_converge); qtest_add_func("/migration/multifd/tcp/plain/none", test_multifd_tcp_none); - qtest_add_func("/migration/multifd/tcp/plain/cancel", - test_multifd_tcp_cancel); + /* + * This test is flaky and sometimes fails in CI and otherwise: + * don't run unless user opts in via environment variable. + */ + if (getenv("QEMU_TEST_FLAKY_TESTS")) { + qtest_add_func("/migration/multifd/tcp/plain/cancel", + test_multifd_tcp_cancel); + } qtest_add_func("/migration/multifd/tcp/plain/zlib", test_multifd_tcp_zlib); #ifdef CONFIG_ZSTD diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 79357b29ca..0b7d5ecd68 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -1073,7 +1073,6 @@ static gboolean diff_tree(gpointer key, gpointer value, gpointer data) struct match_node_data d = {tp->tree2, key, value}; g_tree_foreach(tp->tree2, tp->match_node, &d); - g_tree_remove(tp->tree1, key); return false; } @@ -1082,9 +1081,9 @@ static void compare_trees(GTree *tree1, GTree *tree2, { struct tree_cmp_data tp = {tree1, tree2, function}; + assert(g_tree_nnodes(tree1) == g_tree_nnodes(tree2)); g_tree_foreach(tree1, diff_tree, &tp); - assert(g_tree_nnodes(tree1) == 0); - assert(g_tree_nnodes(tree2) == 0); + g_tree_destroy(g_tree_ref(tree1)); } static void diff_domain(TestGTreeDomain *d1, TestGTreeDomain *d2) diff --git a/util/bufferiszero.c b/util/bufferiszero.c index 1790ded7d4..1886bc5ba4 100644 --- a/util/bufferiszero.c +++ b/util/bufferiszero.c @@ -258,8 +258,7 @@ static void __attribute__((constructor)) init_cpuid_cache(void) /* We must check that AVX is not just available, but usable. */ if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { - int bv; - __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); + unsigned bv = xgetbv_low(0); __cpuid_count(7, 0, a, b, c, d); if ((bv & 0x6) == 0x6 && (b & bit_AVX2)) { cache |= CACHE_AVX2; |