diff options
281 files changed, 5323 insertions, 2997 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 2c14c670c5..0844f5da19 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1473,7 +1473,6 @@ F: tests/functional/test_ppc_40p.py sPAPR (pseries) M: Nicholas Piggin <npiggin@gmail.com> R: Daniel Henrique Barboza <danielhb413@gmail.com> -R: David Gibson <david@gibson.dropbear.id.au> R: Harsh Prateek Bora <harshpb@linux.ibm.com> L: qemu-ppc@nongnu.org S: Odd Fixes @@ -1497,7 +1496,6 @@ F: tests/functional/test_ppc64_hv.py F: tests/functional/test_ppc64_tuxrun.py PowerNV (Non-Virtualized) -M: Cédric Le Goater <clg@kaod.org> M: Nicholas Piggin <npiggin@gmail.com> R: Frédéric Barrat <fbarrat@linux.ibm.com> L: qemu-ppc@nongnu.org @@ -1507,8 +1505,10 @@ F: hw/ppc/pnv* F: hw/intc/pnv* F: hw/intc/xics_pnv.c F: hw/pci-host/pnv* +F: hw/ssi/pnv_spi.c F: include/hw/ppc/pnv* F: include/hw/pci-host/pnv* +F: include/hw/ssi/pnv_spi* F: pc-bios/skiboot.lid F: tests/qtest/pnv* F: tests/functional/test_ppc64_powernv.py @@ -1563,7 +1563,6 @@ F: tests/functional/test_ppc_amiga.py Virtual Open Firmware (VOF) M: Alexey Kardashevskiy <aik@ozlabs.ru> -R: David Gibson <david@gibson.dropbear.id.au> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/spapr_vof* @@ -2639,12 +2638,12 @@ F: tests/qtest/fw_cfg-test.c T: git https://github.com/philmd/qemu.git fw_cfg-next XIVE -M: Cédric Le Goater <clg@kaod.org> R: Frédéric Barrat <fbarrat@linux.ibm.com> L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/*/*xive* F: include/hw/*/*xive* +F: tests/qtest/*xive* F: docs/*/*xive* Renesas peripherals diff --git a/Makefile b/Makefile index 917c9a34d1..b65b0bd41a 100644 --- a/Makefile +++ b/Makefile @@ -187,11 +187,6 @@ SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS))) $(SUBDIR_RULES): $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),) -ifneq ($(filter contrib/plugins, $(SUBDIRS)),) -.PHONY: plugins -plugins: contrib/plugins/all -endif - .PHONY: recurse-all recurse-clean recurse-all: $(addsuffix /all, $(SUBDIRS)) recurse-clean: $(addsuffix /clean, $(SUBDIRS)) @@ -307,11 +302,6 @@ help: $(call print-help,cscope,Generate cscope index) $(call print-help,sparse,Run sparse on the QEMU source) @echo '' -ifneq ($(filter contrib/plugins, $(SUBDIRS)),) - @echo 'Plugin targets:' - $(call print-help,plugins,Build the example TCG plugins) - @echo '' -endif @echo 'Cleaning targets:' $(call print-help,clean,Remove most generated files but keep the config) $(call print-help,distclean,Remove all generated files) diff --git a/bsd-user/main.c b/bsd-user/main.c index cc980e6f40..61ca73c478 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -60,6 +60,7 @@ uintptr_t qemu_host_page_size; intptr_t qemu_host_page_mask; static bool opt_one_insn_per_tb; +static unsigned long opt_tb_size; uintptr_t guest_base; bool have_guest_base; /* @@ -169,6 +170,7 @@ static void usage(void) " (use '-d help' for a list of log items)\n" "-D logfile write logs to 'logfile' (default stderr)\n" "-one-insn-per-tb run with one guest instruction per emulated TB\n" + "-tb-size size TCG translation block cache size\n" "-strace log system calls\n" "-trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n" " specify tracing options\n" @@ -387,6 +389,11 @@ int main(int argc, char **argv) seed_optarg = optarg; } else if (!strcmp(r, "one-insn-per-tb")) { opt_one_insn_per_tb = true; + } else if (!strcmp(r, "tb-size")) { + r = argv[optind++]; + if (qemu_strtoul(r, NULL, 0, &opt_tb_size)) { + usage(); + } } else if (!strcmp(r, "strace")) { do_strace = 1; } else if (!strcmp(r, "trace")) { @@ -452,6 +459,8 @@ int main(int argc, char **argv) accel_init_interfaces(ac); object_property_set_bool(OBJECT(accel), "one-insn-per-tb", opt_one_insn_per_tb, &error_abort); + object_property_set_int(OBJECT(accel), "tb-size", + opt_tb_size, &error_abort); ac->init_machine(NULL); } @@ -601,6 +610,7 @@ int main(int argc, char **argv) init_task_state(ts); ts->info = info; ts->bprm = &bprm; + ts->ts_tid = qemu_get_thread_id(); cpu->opaque = ts; target_set_brk(info->brk); diff --git a/bsd-user/x86_64/target_arch_thread.h b/bsd-user/x86_64/target_arch_thread.h index 52c28906d6..7739bb2154 100644 --- a/bsd-user/x86_64/target_arch_thread.h +++ b/bsd-user/x86_64/target_arch_thread.h @@ -31,7 +31,7 @@ static inline void target_thread_init(struct target_pt_regs *regs, struct image_info *infop) { regs->rax = 0; - regs->rsp = infop->start_stack; + regs->rsp = ((infop->start_stack - 8) & ~0xfUL) + 8; regs->rip = infop->entry; regs->rdi = infop->start_stack; } diff --git a/configs/devices/microblaze-softmmu/default.mak b/configs/devices/microblaze-softmmu/default.mak index 583e3959bb..7894106465 100644 --- a/configs/devices/microblaze-softmmu/default.mak +++ b/configs/devices/microblaze-softmmu/default.mak @@ -2,5 +2,3 @@ # Boards are selected by default, uncomment to keep out of the build. # CONFIG_PETALOGIX_S3ADSP1800=n -# CONFIG_PETALOGIX_ML605=n -# CONFIG_XLNX_ZYNQMP_PMU=n diff --git a/configs/devices/microblazeel-softmmu/default.mak b/configs/devices/microblazeel-softmmu/default.mak index 29f7f13816..4c1086435b 100644 --- a/configs/devices/microblazeel-softmmu/default.mak +++ b/configs/devices/microblazeel-softmmu/default.mak @@ -1,3 +1,6 @@ # Default configuration for microblazeel-softmmu -include ../microblaze-softmmu/default.mak +# Boards are selected by default, uncomment to keep out of the build. +# CONFIG_PETALOGIX_S3ADSP1800=n +# CONFIG_PETALOGIX_ML605=n +# CONFIG_XLNX_ZYNQMP_PMU=n diff --git a/configure b/configure index f24940ca77..096b1fddb7 100755 --- a/configure +++ b/configure @@ -1077,7 +1077,6 @@ if test "$plugins" != "no" && test $host_bits -eq 64; then plugins="no" else plugins=yes - subdirs="$subdirs contrib/plugins" fi fi @@ -1708,7 +1707,6 @@ LINKS="$LINKS .gdbinit scripts" # scripts needed by relative path in .gdbinit LINKS="$LINKS tests/avocado tests/data" LINKS="$LINKS tests/qemu-iotests/check tests/qemu-iotests/Makefile" LINKS="$LINKS python" -LINKS="$LINKS contrib/plugins/Makefile " for f in $LINKS ; do if [ -e "$source_path/$f" ]; then symlink "$source_path/$f" "$f" @@ -1794,22 +1792,6 @@ if test "$default_targets" = "yes"; then echo "CONFIG_DEFAULT_TARGETS=y" >> $config_host_mak fi -# contrib/plugins configuration -echo "# Automatically generated by configure - do not modify" > contrib/plugins/$config_host_mak -echo "SRC_PATH=$source_path/contrib/plugins" >> contrib/plugins/$config_host_mak -echo "PKG_CONFIG=${pkg_config}" >> contrib/plugins/$config_host_mak -echo "CC=$cc $CPU_CFLAGS" >> contrib/plugins/$config_host_mak -echo "CFLAGS=${CFLAGS-$default_cflags} $EXTRA_CFLAGS" >> contrib/plugins/$config_host_mak -if test "$host_os" = windows; then - echo "DLLTOOL=$dlltool" >> contrib/plugins/$config_host_mak -fi -if test "$host_os" = darwin; then - echo "CONFIG_DARWIN=y" >> contrib/plugins/$config_host_mak -fi -if test "$host_os" = windows; then - echo "CONFIG_WIN32=y" >> contrib/plugins/$config_host_mak -fi - # tests/tcg configuration mkdir -p tests/tcg echo "# Automatically generated by configure - do not modify" > tests/tcg/$config_host_mak diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile deleted file mode 100644 index bbddd4800f..0000000000 --- a/contrib/plugins/Makefile +++ /dev/null @@ -1,87 +0,0 @@ -# -*- Mode: makefile -*- -# -# This Makefile example is fairly independent from the main makefile -# so users can take and adapt it for their build. We only really -# include config-host.mak so we don't have to repeat probing for -# programs that the main configure has already done for us. -# - -include config-host.mak - -TOP_SRC_PATH = $(SRC_PATH)/../.. - -VPATH += $(SRC_PATH) - -NAMES := -NAMES += bbv -NAMES += execlog -NAMES += hotblocks -NAMES += hotpages -NAMES += howvec - -# The lockstep example communicates using unix sockets, -# and can't be easily made to work on windows. -ifneq ($(CONFIG_WIN32),y) -NAMES += lockstep -endif - -NAMES += hwprofile -NAMES += cache -NAMES += drcov -NAMES += ips -NAMES += stoptrigger -NAMES += cflow - -ifeq ($(CONFIG_WIN32),y) -SO_SUFFIX := .dll -LDLIBS += $(shell $(PKG_CONFIG) --libs glib-2.0) -else -SO_SUFFIX := .so -endif - -SONAMES := $(addsuffix $(SO_SUFFIX),$(addprefix lib,$(NAMES))) - -# The main QEMU uses Glib extensively so it is perfectly fine to use it -# in plugins (which many example do). -PLUGIN_CFLAGS := $(shell $(PKG_CONFIG) --cflags glib-2.0) -PLUGIN_CFLAGS += -fPIC -Wall -PLUGIN_CFLAGS += -I$(TOP_SRC_PATH)/include/qemu - -# Helper that honours V=1 so we get some output when compiling -quiet-@ = $(if $(V),,@$(if $1,printf " %-7s %s\n" "$(strip $1)" "$(strip $2)" && )) -quiet-command = $(call quiet-@,$2,$3)$1 - -# for including , in command strings -COMMA := , - -all: $(SONAMES) - -%.o: %.c - $(call quiet-command, \ - $(CC) $(CFLAGS) $(PLUGIN_CFLAGS) -c -o $@ $<, \ - BUILD, plugin $@) - -ifeq ($(CONFIG_WIN32),y) -lib%$(SO_SUFFIX): %.o win32_linker.o ../../plugins/libqemu_plugin_api.a - $(call quiet-command, \ - $(CC) -shared -o $@ $^ $(LDLIBS), \ - LINK, plugin $@) -else ifeq ($(CONFIG_DARWIN),y) -lib%$(SO_SUFFIX): %.o - $(call quiet-command, \ - $(CC) -bundle -Wl$(COMMA)-undefined$(COMMA)dynamic_lookup -o $@ $^ $(LDLIBS), \ - LINK, plugin $@) -else -lib%$(SO_SUFFIX): %.o - $(call quiet-command, \ - $(CC) -shared -o $@ $^ $(LDLIBS), \ - LINK, plugin $@) -endif - - -clean distclean: - rm -f *.o *$(SO_SUFFIX) *.d - rm -Rf .libs - -.PHONY: all clean -.SECONDARY: diff --git a/contrib/plugins/cflow.c b/contrib/plugins/cflow.c index 6faa55d10d..b39974d1cf 100644 --- a/contrib/plugins/cflow.c +++ b/contrib/plugins/cflow.c @@ -136,7 +136,7 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) g_autoptr(GString) result = g_string_new("collected "); GList *data; GCompareFunc sort = &hottest; - int n = 0; + int i = 0; g_mutex_lock(&node_lock); g_string_append_printf(result, "%d control flow nodes in the hash table\n", @@ -162,8 +162,8 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) data = g_list_sort(data, sort); for (GList *l = data; - l != NULL && n < topn; - l = l->next, n++) { + l != NULL && i < topn; + l = l->next, i++) { NodeData *n = l->data; const char *type = n->mid_count ? "sync fault" : "branch"; g_string_append_printf(result, " addr: 0x%"PRIx64 " %s: %s (%s)\n", diff --git a/contrib/plugins/meson.build b/contrib/plugins/meson.build new file mode 100644 index 0000000000..63a32c2b4f --- /dev/null +++ b/contrib/plugins/meson.build @@ -0,0 +1,28 @@ +contrib_plugins = ['bbv', 'cache', 'cflow', 'drcov', 'execlog', 'hotblocks', + 'hotpages', 'howvec', 'hwprofile', 'ips', 'stoptrigger'] +if host_os != 'windows' + # lockstep uses socket.h + contrib_plugins += 'lockstep' +endif + +t = [] +if get_option('plugins') + foreach i : contrib_plugins + if host_os == 'windows' + t += shared_module(i, files(i + '.c') + 'win32_linker.c', + include_directories: '../../include/qemu', + link_depends: [win32_qemu_plugin_api_lib], + link_args: ['-Lplugins', '-lqemu_plugin_api'], + dependencies: glib) + else + t += shared_module(i, files(i + '.c'), + include_directories: '../../include/qemu', + dependencies: glib) + endif + endforeach +endif +if t.length() > 0 + alias_target('contrib-plugins', t) +else + run_target('contrib-plugins', command: find_program('true')) +endif diff --git a/crypto/hash-gcrypt.c b/crypto/hash-gcrypt.c index 73533a4949..af61c4e75d 100644 --- a/crypto/hash-gcrypt.c +++ b/crypto/hash-gcrypt.c @@ -34,13 +34,16 @@ static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALGO__MAX] = { [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MD_SHA384, [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MD_SHA512, [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MD_RMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = GCRY_MD_SM3, +#endif }; gboolean qcrypto_hash_supports(QCryptoHashAlgo alg) { if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) && qcrypto_hash_alg_map[alg] != GCRY_MD_NONE) { - return true; + return gcry_md_test_algo(qcrypto_hash_alg_map[alg]) == 0; } return false; } @@ -49,7 +52,7 @@ static QCryptoHash *qcrypto_gcrypt_hash_new(QCryptoHashAlgo alg, Error **errp) { QCryptoHash *hash; - int ret; + gcry_error_t ret; hash = g_new(QCryptoHash, 1); hash->alg = alg; @@ -57,7 +60,7 @@ QCryptoHash *qcrypto_gcrypt_hash_new(QCryptoHashAlgo alg, Error **errp) ret = gcry_md_open((gcry_md_hd_t *) hash->opaque, qcrypto_hash_alg_map[alg], 0); - if (ret < 0) { + if (ret != 0) { error_setg(errp, "Unable to initialize hash algorithm: %s", gcry_strerror(ret)); diff --git a/crypto/hash-nettle.c b/crypto/hash-nettle.c index c78624b347..53f68301ef 100644 --- a/crypto/hash-nettle.c +++ b/crypto/hash-nettle.c @@ -26,6 +26,9 @@ #include <nettle/md5.h> #include <nettle/sha.h> #include <nettle/ripemd160.h> +#ifdef CONFIG_CRYPTO_SM3 +#include <nettle/sm3.h> +#endif typedef void (*qcrypto_nettle_init)(void *ctx); typedef void (*qcrypto_nettle_write)(void *ctx, @@ -43,6 +46,9 @@ union qcrypto_hash_ctx { struct sha384_ctx sha384; struct sha512_ctx sha512; struct ripemd160_ctx ripemd160; +#ifdef CONFIG_CRYPTO_SM3 + struct sm3_ctx sm3; +#endif }; struct qcrypto_hash_alg { @@ -93,6 +99,14 @@ struct qcrypto_hash_alg { .result = (qcrypto_nettle_result)ripemd160_digest, .len = RIPEMD160_DIGEST_SIZE, }, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = { + .init = (qcrypto_nettle_init)sm3_init, + .write = (qcrypto_nettle_write)sm3_update, + .result = (qcrypto_nettle_result)sm3_digest, + .len = SM3_DIGEST_SIZE, + }, +#endif }; gboolean qcrypto_hash_supports(QCryptoHashAlgo alg) diff --git a/crypto/hash.c b/crypto/hash.c index 0c8548c568..7513769e42 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -33,6 +33,9 @@ static size_t qcrypto_hash_alg_size[QCRYPTO_HASH_ALGO__MAX] = { [QCRYPTO_HASH_ALGO_SHA384] = QCRYPTO_HASH_DIGEST_LEN_SHA384, [QCRYPTO_HASH_ALGO_SHA512] = QCRYPTO_HASH_DIGEST_LEN_SHA512, [QCRYPTO_HASH_ALGO_RIPEMD160] = QCRYPTO_HASH_DIGEST_LEN_RIPEMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = QCRYPTO_HASH_DIGEST_LEN_SM3, +#endif }; size_t qcrypto_hash_digest_len(QCryptoHashAlgo alg) diff --git a/crypto/hmac-gcrypt.c b/crypto/hmac-gcrypt.c index 19990cb6ed..5273086eb9 100644 --- a/crypto/hmac-gcrypt.c +++ b/crypto/hmac-gcrypt.c @@ -26,6 +26,9 @@ static int qcrypto_hmac_alg_map[QCRYPTO_HASH_ALGO__MAX] = { [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MAC_HMAC_SHA384, [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MAC_HMAC_SHA512, [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MAC_HMAC_RMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = GCRY_MAC_HMAC_SM3, +#endif }; typedef struct QCryptoHmacGcrypt QCryptoHmacGcrypt; @@ -37,7 +40,7 @@ bool qcrypto_hmac_supports(QCryptoHashAlgo alg) { if (alg < G_N_ELEMENTS(qcrypto_hmac_alg_map) && qcrypto_hmac_alg_map[alg] != GCRY_MAC_NONE) { - return true; + return gcry_mac_test_algo(qcrypto_hmac_alg_map[alg]) == 0; } return false; diff --git a/crypto/hmac-nettle.c b/crypto/hmac-nettle.c index 54dd75d5ff..dd5b2ab7a1 100644 --- a/crypto/hmac-nettle.c +++ b/crypto/hmac-nettle.c @@ -38,6 +38,9 @@ struct QCryptoHmacNettle { struct hmac_sha256_ctx sha256_ctx; /* equals hmac_sha224_ctx */ struct hmac_sha512_ctx sha512_ctx; /* equals hmac_sha384_ctx */ struct hmac_ripemd160_ctx ripemd160_ctx; +#ifdef CONFIG_CRYPTO_SM3 + struct hmac_sm3_ctx ctx; +#endif } u; }; @@ -89,6 +92,14 @@ struct qcrypto_nettle_hmac_alg { .digest = (qcrypto_nettle_hmac_digest)hmac_ripemd160_digest, .len = RIPEMD160_DIGEST_SIZE, }, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = { + .setkey = (qcrypto_nettle_hmac_setkey)hmac_sm3_set_key, + .update = (qcrypto_nettle_hmac_update)hmac_sm3_update, + .digest = (qcrypto_nettle_hmac_digest)hmac_sm3_digest, + .len = SM3_DIGEST_SIZE, + }, +#endif }; bool qcrypto_hmac_supports(QCryptoHashAlgo alg) diff --git a/crypto/pbkdf-gcrypt.c b/crypto/pbkdf-gcrypt.c index 76bbb55f7a..e89b8b1c76 100644 --- a/crypto/pbkdf-gcrypt.c +++ b/crypto/pbkdf-gcrypt.c @@ -33,6 +33,9 @@ bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash) case QCRYPTO_HASH_ALGO_SHA384: case QCRYPTO_HASH_ALGO_SHA512: case QCRYPTO_HASH_ALGO_RIPEMD160: +#ifdef CONFIG_CRYPTO_SM3 + case QCRYPTO_HASH_ALGO_SM3: +#endif return qcrypto_hash_supports(hash); default: return false; @@ -54,6 +57,9 @@ int qcrypto_pbkdf2(QCryptoHashAlgo hash, [QCRYPTO_HASH_ALGO_SHA384] = GCRY_MD_SHA384, [QCRYPTO_HASH_ALGO_SHA512] = GCRY_MD_SHA512, [QCRYPTO_HASH_ALGO_RIPEMD160] = GCRY_MD_RMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = GCRY_MD_SM3, +#endif }; int ret; diff --git a/crypto/pbkdf-nettle.c b/crypto/pbkdf-nettle.c index 93e686c2c6..3ef9c1b52c 100644 --- a/crypto/pbkdf-nettle.c +++ b/crypto/pbkdf-nettle.c @@ -34,6 +34,9 @@ bool qcrypto_pbkdf2_supports(QCryptoHashAlgo hash) case QCRYPTO_HASH_ALGO_SHA384: case QCRYPTO_HASH_ALGO_SHA512: case QCRYPTO_HASH_ALGO_RIPEMD160: +#ifdef CONFIG_CRYPTO_SM3 + case QCRYPTO_HASH_ALGO_SM3: +#endif return true; default: return false; @@ -55,6 +58,9 @@ int qcrypto_pbkdf2(QCryptoHashAlgo hash, struct hmac_sha384_ctx sha384; struct hmac_sha512_ctx sha512; struct hmac_ripemd160_ctx ripemd160; +#ifdef CONFIG_CRYPTO_SM3 + struct hmac_sm3_ctx sm3; +#endif } ctx; if (iterations > UINT_MAX) { @@ -106,6 +112,13 @@ int qcrypto_pbkdf2(QCryptoHashAlgo hash, PBKDF2(&ctx.ripemd160, hmac_ripemd160_update, hmac_ripemd160_digest, RIPEMD160_DIGEST_SIZE, iterations, nsalt, salt, nout, out); break; +#ifdef CONFIG_CRYPTO_SM3 + case QCRYPTO_HASH_ALGO_SM3: + hmac_sm3_set_key(&ctx.sm3, nkey, key); + PBKDF2(&ctx.sm3, hmac_sm3_update, hmac_sm3_digest, + SM3_DIGEST_SIZE, iterations, nsalt, salt, nout, out); + break; +#endif default: error_setg_errno(errp, ENOSYS, diff --git a/crypto/secret_common.c b/crypto/secret_common.c index 2c141107a5..dbda998940 100644 --- a/crypto/secret_common.c +++ b/crypto/secret_common.c @@ -191,15 +191,6 @@ qcrypto_secret_complete(UserCreatable *uc, Error **errp) } -static bool -qcrypto_secret_prop_get_loaded(Object *obj, - Error **errp G_GNUC_UNUSED) -{ - QCryptoSecretCommon *secret = QCRYPTO_SECRET_COMMON(obj); - return secret->rawdata != NULL; -} - - static void qcrypto_secret_prop_set_format(Object *obj, int value, @@ -278,9 +269,6 @@ qcrypto_secret_class_init(ObjectClass *oc, void *data) ucc->complete = qcrypto_secret_complete; - object_class_property_add_bool(oc, "loaded", - qcrypto_secret_prop_get_loaded, - NULL); object_class_property_add_enum(oc, "format", "QCryptoSecretFormat", &QCryptoSecretFormat_lookup, diff --git a/crypto/tlscredsanon.c b/crypto/tlscredsanon.c index c0d23a0ef3..476cf89c96 100644 --- a/crypto/tlscredsanon.c +++ b/crypto/tlscredsanon.c @@ -127,37 +127,6 @@ qcrypto_tls_creds_anon_complete(UserCreatable *uc, Error **errp) } -#ifdef CONFIG_GNUTLS - - -static bool -qcrypto_tls_creds_anon_prop_get_loaded(Object *obj, - Error **errp G_GNUC_UNUSED) -{ - QCryptoTLSCredsAnon *creds = QCRYPTO_TLS_CREDS_ANON(obj); - - if (creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { - return creds->data.server != NULL; - } else { - return creds->data.client != NULL; - } -} - - -#else /* ! CONFIG_GNUTLS */ - - -static bool -qcrypto_tls_creds_anon_prop_get_loaded(Object *obj G_GNUC_UNUSED, - Error **errp G_GNUC_UNUSED) -{ - return false; -} - - -#endif /* ! CONFIG_GNUTLS */ - - static void qcrypto_tls_creds_anon_finalize(Object *obj) { @@ -173,10 +142,6 @@ qcrypto_tls_creds_anon_class_init(ObjectClass *oc, void *data) UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); ucc->complete = qcrypto_tls_creds_anon_complete; - - object_class_property_add_bool(oc, "loaded", - qcrypto_tls_creds_anon_prop_get_loaded, - NULL); } diff --git a/crypto/tlscredspsk.c b/crypto/tlscredspsk.c index 0d6b71a37c..aa270d7988 100644 --- a/crypto/tlscredspsk.c +++ b/crypto/tlscredspsk.c @@ -206,37 +206,6 @@ qcrypto_tls_creds_psk_complete(UserCreatable *uc, Error **errp) } -#ifdef CONFIG_GNUTLS - - -static bool -qcrypto_tls_creds_psk_prop_get_loaded(Object *obj, - Error **errp G_GNUC_UNUSED) -{ - QCryptoTLSCredsPSK *creds = QCRYPTO_TLS_CREDS_PSK(obj); - - if (creds->parent_obj.endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { - return creds->data.server != NULL; - } else { - return creds->data.client != NULL; - } -} - - -#else /* ! CONFIG_GNUTLS */ - - -static bool -qcrypto_tls_creds_psk_prop_get_loaded(Object *obj G_GNUC_UNUSED, - Error **errp G_GNUC_UNUSED) -{ - return false; -} - - -#endif /* ! CONFIG_GNUTLS */ - - static void qcrypto_tls_creds_psk_finalize(Object *obj) { @@ -273,9 +242,6 @@ qcrypto_tls_creds_psk_class_init(ObjectClass *oc, void *data) ucc->complete = qcrypto_tls_creds_psk_complete; - object_class_property_add_bool(oc, "loaded", - qcrypto_tls_creds_psk_prop_get_loaded, - NULL); object_class_property_add_str(oc, "username", qcrypto_tls_creds_psk_prop_get_username, qcrypto_tls_creds_psk_prop_set_username); diff --git a/crypto/tlscredsx509.c b/crypto/tlscredsx509.c index d14313925d..24ec584922 100644 --- a/crypto/tlscredsx509.c +++ b/crypto/tlscredsx509.c @@ -695,33 +695,6 @@ qcrypto_tls_creds_x509_complete(UserCreatable *uc, Error **errp) } -#ifdef CONFIG_GNUTLS - - -static bool -qcrypto_tls_creds_x509_prop_get_loaded(Object *obj, - Error **errp G_GNUC_UNUSED) -{ - QCryptoTLSCredsX509 *creds = QCRYPTO_TLS_CREDS_X509(obj); - - return creds->data != NULL; -} - - -#else /* ! CONFIG_GNUTLS */ - - -static bool -qcrypto_tls_creds_x509_prop_get_loaded(Object *obj G_GNUC_UNUSED, - Error **errp G_GNUC_UNUSED) -{ - return false; -} - - -#endif /* ! CONFIG_GNUTLS */ - - static void qcrypto_tls_creds_x509_prop_set_sanity(Object *obj, bool value, @@ -838,9 +811,6 @@ qcrypto_tls_creds_x509_class_init(ObjectClass *oc, void *data) ucc->complete = qcrypto_tls_creds_x509_complete; - object_class_property_add_bool(oc, "loaded", - qcrypto_tls_creds_x509_prop_get_loaded, - NULL); object_class_property_add_bool(oc, "sanity-check", qcrypto_tls_creds_x509_prop_get_sanity, qcrypto_tls_creds_x509_prop_set_sanity); diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index ff404d44f8..d8dc29d0a4 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -255,14 +255,6 @@ These old machine types are quite neglected nowadays and thus might have various pitfalls with regards to live migration. Use a newer machine type instead. -``pseries-2.1`` up to ``pseries-2.12`` (since 9.0) -'''''''''''''''''''''''''''''''''''''''''''''''''' - -Older pseries machines before version 3.0 have undergone many changes -to correct issues, mostly regarding migration compatibility. These are -no longer maintained and removing them will make the code easier to -read and maintain. Use versions 3.0 and above as a replacement. - PPC 405 ``ref405ep`` machine (since 9.1) '''''''''''''''''''''''''''''''''''''''' @@ -279,6 +271,12 @@ BMC and a witherspoon like OpenPOWER system. It was used for bring up of the AST2600 SoC in labs. It can be easily replaced by the ``rainier-bmc`` machine which is a real product. +Big-Endian variants of MicroBlaze ``petalogix-ml605`` and ``xlnx-zynqmp-pmu`` machines (since 9.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Both ``petalogix-ml605`` and ``xlnx-zynqmp-pmu`` were added for little endian +CPUs. Big endian support is not tested. + Backend options --------------- diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst index 912e0a1fcf..ee6455aeee 100644 --- a/docs/about/removed-features.rst +++ b/docs/about/removed-features.rst @@ -355,13 +355,13 @@ The ``-writeconfig`` option was not able to serialize the entire contents of the QEMU command line. It is thus considered a failed experiment and removed without a replacement. -``loaded`` property of ``secret`` and ``secret_keyring`` objects (removed in 7.1) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +``loaded`` property of secret and TLS credential objects (removed in 9.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' The ``loaded=on`` option in the command line or QMP ``object-add`` either had no effect (if ``loaded`` was the last option) or caused options to be effectively ignored as if they were not given. The property is therefore -useless and should simply be removed. +useless and has been removed. ``opened`` property of ``rng-*`` objects (removed in 7.1) ''''''''''''''''''''''''''''''''''''''''''''''''''''''''' diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst index d8419fd2f1..2e50f2ddfa 100644 --- a/docs/interop/vhost-user.rst +++ b/docs/interop/vhost-user.rst @@ -167,6 +167,8 @@ A vring address description Note that a ring address is an IOVA if ``VIRTIO_F_IOMMU_PLATFORM`` has been negotiated. Otherwise it is a user address. +.. _memory_region_description: + Memory region description ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -180,7 +182,7 @@ Memory region description :user address: a 64-bit user address -:mmap offset: 64-bit offset where region starts in the mapped memory +:mmap offset: a 64-bit offset where region starts in the mapped memory When the ``VHOST_USER_PROTOCOL_F_XEN_MMAP`` protocol feature has been successfully negotiated, the memory region description contains two extra @@ -190,7 +192,7 @@ fields at the end. | guest address | size | user address | mmap offset | xen mmap flags | domid | +---------------+------+--------------+-------------+----------------+-------+ -:xen mmap flags: 32-bit bit field +:xen mmap flags: a 32-bit bit field - Bit 0 is set for Xen foreign memory mapping. - Bit 1 is set for Xen grant memory mapping. @@ -211,7 +213,7 @@ Single memory region description :padding: 64-bit -A region is represented by Memory region description. +:region: region is represented by :ref:`Memory region description <memory_region_description>`. Multiple Memory regions description ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -224,7 +226,7 @@ Multiple Memory regions description :padding: 32-bit -A region is represented by Memory region description. +:regions: regions field contains 8 regions of type :ref:`Memory region description <memory_region_description>`. Log description ^^^^^^^^^^^^^^^ @@ -233,9 +235,9 @@ Log description | log size | log offset | +----------+------------+ -:log size: size of area used for logging +:log size: a 64-bit size of area used for logging -:log offset: offset from start of supplied file descriptor where +:log offset: a 64-bit offset from start of supplied file descriptor where logging starts (i.e. where guest address 0 would be logged) @@ -382,7 +384,7 @@ the kernel implementation. The communication consists of the *front-end* sending message requests and the *back-end* sending message replies. Most of the requests don't require -replies. Here is a list of the ones that do: +replies, except for the following requests: * ``VHOST_USER_GET_FEATURES`` * ``VHOST_USER_GET_PROTOCOL_FEATURES`` @@ -1239,11 +1241,11 @@ Front-end message types (*a vring descriptor index for split virtqueues* vs. *vring descriptor indices for packed virtqueues*). - When and as long as all of a device’s vrings are stopped, it is + When and as long as all of a device's vrings are stopped, it is *suspended*, see :ref:`Suspended device state <suspended_device_state>`. - The request payload’s *num* field is currently reserved and must be + The request payload's *num* field is currently reserved and must be set to 0. ``VHOST_USER_SET_VRING_KICK`` @@ -1662,7 +1664,7 @@ Front-end message types :reply payload: ``u64`` Front-end and back-end negotiate a channel over which to transfer the - back-end’s internal state during migration. Either side (front-end or + back-end's internal state during migration. Either side (front-end or back-end) may create the channel. The nature of this channel is not restricted or defined in this document, but whichever side creates it must create a file descriptor that is provided to the respectively @@ -1714,7 +1716,7 @@ Front-end message types :request payload: N/A :reply payload: ``u64`` - After transferring the back-end’s internal state during migration (see + After transferring the back-end's internal state during migration (see the :ref:`Migrating back-end state <migrating_backend_state>` section), check whether the back-end was able to successfully fully process the state. diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 35f52a54b1..a2a388f091 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -26,6 +26,7 @@ the following architecture extensions: - FEAT_BF16 (AArch64 BFloat16 instructions) - FEAT_BTI (Branch Target Identification) - FEAT_CCIDX (Extended cache index) +- FEAT_CMOW (Control for cache maintenance permission) - FEAT_CRC32 (CRC32 instructions) - FEAT_Crypto (Cryptographic Extension) - FEAT_CSV2 (Cache speculation variant 2) diff --git a/docs/system/ppc/pseries.rst b/docs/system/ppc/pseries.rst index a876d897b6..bbc51aa7fc 100644 --- a/docs/system/ppc/pseries.rst +++ b/docs/system/ppc/pseries.rst @@ -14,10 +14,19 @@ virtualization capabilities. Supported devices ================= - * Multi processor support for many Power processors generations: POWER7, - POWER7+, POWER8, POWER8NVL, POWER9, and Power10. Support for POWER5+ exists, - but its state is unknown. - * Interrupt Controller, XICS (POWER8) and XIVE (POWER9 and Power10) + * Multi processor support for many Power processors generations: + - POWER7, POWER7+ + - POWER8, POWER8NVL + - POWER9 + - Power10 + - Power11 + - Support for POWER5+ also exists, works with correct kernel/userspace + * Interrupt Controller + - XICS (POWER8) + - XIVE (Supported by below:) + - POWER9 + - Power10 + - Power11 * vPHB PCIe Host bridge. * vscsi and vnet devices, compatible with the same devices available on a PowerVM hypervisor with VIOS managing LPARs. diff --git a/fpu/softfloat-specialize.c.inc b/fpu/softfloat-specialize.c.inc index 4e279b9bc4..b5a3208050 100644 --- a/fpu/softfloat-specialize.c.inc +++ b/fpu/softfloat-specialize.c.inc @@ -390,118 +390,80 @@ bool float32_is_signaling_nan(float32 a_, float_status *status) static int pickNaN(FloatClass a_cls, FloatClass b_cls, bool aIsLargerSignificand, float_status *status) { -#if defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_HPPA) || \ - defined(TARGET_LOONGARCH64) || defined(TARGET_S390X) - /* ARM mandated NaN propagation rules (see FPProcessNaNs()), take - * the first of: - * 1. A if it is signaling - * 2. B if it is signaling - * 3. A (quiet) - * 4. B (quiet) - * A signaling NaN is always quietened before returning it. - */ - /* According to MIPS specifications, if one of the two operands is - * a sNaN, a new qNaN has to be generated. This is done in - * floatXX_silence_nan(). For qNaN inputs the specifications - * says: "When possible, this QNaN result is one of the operand QNaN - * values." In practice it seems that most implementations choose - * the first operand if both operands are qNaN. In short this gives - * the following rules: - * 1. A if it is signaling - * 2. B if it is signaling - * 3. A (quiet) - * 4. B (quiet) - * A signaling NaN is always silenced before returning it. - */ - if (is_snan(a_cls)) { - return 0; - } else if (is_snan(b_cls)) { - return 1; - } else if (is_qnan(a_cls)) { - return 0; - } else { - return 1; - } -#elif defined(TARGET_PPC) || defined(TARGET_M68K) - /* PowerPC propagation rules: - * 1. A if it sNaN or qNaN - * 2. B if it sNaN or qNaN - * A signaling NaN is always silenced before returning it. - */ - /* M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL - * 3.4 FLOATING-POINT INSTRUCTION DETAILS - * If either operand, but not both operands, of an operation is a - * nonsignaling NaN, then that NaN is returned as the result. If both - * operands are nonsignaling NaNs, then the destination operand - * nonsignaling NaN is returned as the result. - * If either operand to an operation is a signaling NaN (SNaN), then the - * SNaN bit is set in the FPSR EXC byte. If the SNaN exception enable bit - * is set in the FPCR ENABLE byte, then the exception is taken and the - * destination is not modified. If the SNaN exception enable bit is not - * set, setting the SNaN bit in the operand to a one converts the SNaN to - * a nonsignaling NaN. The operation then continues as described in the - * preceding paragraph for nonsignaling NaNs. - */ - if (is_nan(a_cls)) { - return 0; - } else { - return 1; - } -#elif defined(TARGET_SPARC) - /* Prefer SNaN over QNaN, order B then A. */ - if (is_snan(b_cls)) { - return 1; - } else if (is_snan(a_cls)) { - return 0; - } else if (is_qnan(b_cls)) { - return 1; - } else { - return 0; - } -#elif defined(TARGET_XTENSA) /* - * Xtensa has two NaN propagation modes. - * Which one is active is controlled by float_status::use_first_nan. + * We guarantee not to require the target to tell us how to + * pick a NaN if we're always returning the default NaN. + * But if we're not in default-NaN mode then the target must + * specify via set_float_2nan_prop_rule(). */ - if (status->use_first_nan) { - if (is_nan(a_cls)) { + assert(!status->default_nan_mode); + + switch (status->float_2nan_prop_rule) { + case float_2nan_prop_s_ab: + if (is_snan(a_cls)) { + return 0; + } else if (is_snan(b_cls)) { + return 1; + } else if (is_qnan(a_cls)) { return 0; } else { return 1; } - } else { - if (is_nan(b_cls)) { + break; + case float_2nan_prop_s_ba: + if (is_snan(b_cls)) { + return 1; + } else if (is_snan(a_cls)) { + return 0; + } else if (is_qnan(b_cls)) { return 1; } else { return 0; } - } -#else - /* This implements x87 NaN propagation rules: - * SNaN + QNaN => return the QNaN - * two SNaNs => return the one with the larger significand, silenced - * two QNaNs => return the one with the larger significand - * SNaN and a non-NaN => return the SNaN, silenced - * QNaN and a non-NaN => return the QNaN - * - * If we get down to comparing significands and they are the same, - * return the NaN with the positive sign bit (if any). - */ - if (is_snan(a_cls)) { - if (is_snan(b_cls)) { - return aIsLargerSignificand ? 0 : 1; + break; + case float_2nan_prop_ab: + if (is_nan(a_cls)) { + return 0; + } else { + return 1; } - return is_qnan(b_cls) ? 1 : 0; - } else if (is_qnan(a_cls)) { - if (is_snan(b_cls) || !is_qnan(b_cls)) { + break; + case float_2nan_prop_ba: + if (is_nan(b_cls)) { + return 1; + } else { return 0; + } + break; + case float_2nan_prop_x87: + /* + * This implements x87 NaN propagation rules: + * SNaN + QNaN => return the QNaN + * two SNaNs => return the one with the larger significand, silenced + * two QNaNs => return the one with the larger significand + * SNaN and a non-NaN => return the SNaN, silenced + * QNaN and a non-NaN => return the QNaN + * + * If we get down to comparing significands and they are the same, + * return the NaN with the positive sign bit (if any). + */ + if (is_snan(a_cls)) { + if (is_snan(b_cls)) { + return aIsLargerSignificand ? 0 : 1; + } + return is_qnan(b_cls) ? 1 : 0; + } else if (is_qnan(a_cls)) { + if (is_snan(b_cls) || !is_qnan(b_cls)) { + return 0; + } else { + return aIsLargerSignificand ? 0 : 1; + } } else { - return aIsLargerSignificand ? 0 : 1; + return 1; } - } else { - return 1; + default: + g_assert_not_reached(); } -#endif } /*---------------------------------------------------------------------------- diff --git a/hw/acpi/acpi_generic_initiator.c b/hw/acpi/acpi_generic_initiator.c deleted file mode 100644 index 17b9a052f5..0000000000 --- a/hw/acpi/acpi_generic_initiator.c +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved - */ - -#include "qemu/osdep.h" -#include "hw/acpi/acpi_generic_initiator.h" -#include "hw/acpi/aml-build.h" -#include "hw/boards.h" -#include "hw/pci/pci_device.h" -#include "qemu/error-report.h" - -typedef struct AcpiGenericInitiatorClass { - ObjectClass parent_class; -} AcpiGenericInitiatorClass; - -OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericInitiator, acpi_generic_initiator, - ACPI_GENERIC_INITIATOR, OBJECT, - { TYPE_USER_CREATABLE }, - { NULL }) - -OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericInitiator, ACPI_GENERIC_INITIATOR) - -static void acpi_generic_initiator_init(Object *obj) -{ - AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); - - gi->node = MAX_NODES; - gi->pci_dev = NULL; -} - -static void acpi_generic_initiator_finalize(Object *obj) -{ - AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); - - g_free(gi->pci_dev); -} - -static void acpi_generic_initiator_set_pci_device(Object *obj, const char *val, - Error **errp) -{ - AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); - - gi->pci_dev = g_strdup(val); -} - -static void acpi_generic_initiator_set_node(Object *obj, Visitor *v, - const char *name, void *opaque, - Error **errp) -{ - AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); - MachineState *ms = MACHINE(qdev_get_machine()); - uint32_t value; - - if (!visit_type_uint32(v, name, &value, errp)) { - return; - } - - if (value >= MAX_NODES) { - error_printf("%s: Invalid NUMA node specified\n", - TYPE_ACPI_GENERIC_INITIATOR); - exit(1); - } - - gi->node = value; - ms->numa_state->nodes[gi->node].has_gi = true; -} - -static void acpi_generic_initiator_class_init(ObjectClass *oc, void *data) -{ - object_class_property_add_str(oc, "pci-dev", NULL, - acpi_generic_initiator_set_pci_device); - object_class_property_add(oc, "node", "int", NULL, - acpi_generic_initiator_set_node, NULL, NULL); -} - -/* - * ACPI 6.3: - * Table 5-78 Generic Initiator Affinity Structure - */ -static void -build_srat_generic_pci_initiator_affinity(GArray *table_data, int node, - PCIDeviceHandle *handle) -{ - uint8_t index; - - build_append_int_noprefix(table_data, 5, 1); /* Type */ - build_append_int_noprefix(table_data, 32, 1); /* Length */ - build_append_int_noprefix(table_data, 0, 1); /* Reserved */ - build_append_int_noprefix(table_data, 1, 1); /* Device Handle Type: PCI */ - build_append_int_noprefix(table_data, node, 4); /* Proximity Domain */ - - /* Device Handle - PCI */ - build_append_int_noprefix(table_data, handle->segment, 2); - build_append_int_noprefix(table_data, handle->bdf, 2); - for (index = 0; index < 12; index++) { - build_append_int_noprefix(table_data, 0, 1); - } - - build_append_int_noprefix(table_data, GEN_AFFINITY_ENABLED, 4); /* Flags */ - build_append_int_noprefix(table_data, 0, 4); /* Reserved */ -} - -static int build_all_acpi_generic_initiators(Object *obj, void *opaque) -{ - MachineState *ms = MACHINE(qdev_get_machine()); - AcpiGenericInitiator *gi; - GArray *table_data = opaque; - PCIDeviceHandle dev_handle; - PCIDevice *pci_dev; - Object *o; - - if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_INITIATOR)) { - return 0; - } - - gi = ACPI_GENERIC_INITIATOR(obj); - if (gi->node >= ms->numa_state->num_nodes) { - error_printf("%s: Specified node %d is invalid.\n", - TYPE_ACPI_GENERIC_INITIATOR, gi->node); - exit(1); - } - - o = object_resolve_path_type(gi->pci_dev, TYPE_PCI_DEVICE, NULL); - if (!o) { - error_printf("%s: Specified device must be a PCI device.\n", - TYPE_ACPI_GENERIC_INITIATOR); - exit(1); - } - - pci_dev = PCI_DEVICE(o); - - dev_handle.segment = 0; - dev_handle.bdf = PCI_BUILD_BDF(pci_bus_num(pci_get_bus(pci_dev)), - pci_dev->devfn); - - build_srat_generic_pci_initiator_affinity(table_data, - gi->node, &dev_handle); - - return 0; -} - -void build_srat_generic_pci_initiator(GArray *table_data) -{ - object_child_foreach_recursive(object_get_root(), - build_all_acpi_generic_initiators, - table_data); -} diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index 34e0ddbde8..6a76626177 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -1938,6 +1938,89 @@ void build_srat_memory(GArray *table_data, uint64_t base, } /* + * ACPI Spec Revision 6.3 + * Table 5-80 Device Handle - PCI + */ +static void build_append_srat_pci_device_handle(GArray *table_data, + uint16_t segment, + uint8_t bus, uint8_t devfn) +{ + /* PCI segment number */ + build_append_int_noprefix(table_data, segment, 2); + /* PCI Bus Device Function */ + build_append_int_noprefix(table_data, bus, 1); + build_append_int_noprefix(table_data, devfn, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 12); +} + +static void build_append_srat_acpi_device_handle(GArray *table_data, + const char *hid, + uint32_t uid) +{ + assert(strlen(hid) == 8); + /* Device Handle - ACPI */ + for (int i = 0; i < sizeof(hid); i++) { + build_append_int_noprefix(table_data, hid[i], 1); + } + build_append_int_noprefix(table_data, uid, 4); + build_append_int_noprefix(table_data, 0, 4); +} + +/* + * ACPI spec, Revision 6.3 + * 5.2.16.6 Generic Initiator Affinity Structure + * With PCI Device Handle. + */ +void build_srat_pci_generic_initiator(GArray *table_data, uint32_t node, + uint16_t segment, uint8_t bus, + uint8_t devfn) +{ + /* Type */ + build_append_int_noprefix(table_data, 5, 1); + /* Length */ + build_append_int_noprefix(table_data, 32, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + /* Device Handle Type: PCI */ + build_append_int_noprefix(table_data, 1, 1); + /* Proximity Domain */ + build_append_int_noprefix(table_data, node, 4); + /* Device Handle */ + build_append_srat_pci_device_handle(table_data, segment, bus, devfn); + /* Flags - GI Enabled */ + build_append_int_noprefix(table_data, 1, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); +} + +/* + * ACPI spec, Revision 6.5 + * 5.2.16.7 Generic Port Affinity Structure + * With ACPI Device Handle. + */ +void build_srat_acpi_generic_port(GArray *table_data, uint32_t node, + const char *hid, uint32_t uid) +{ + /* Type */ + build_append_int_noprefix(table_data, 6, 1); + /* Length */ + build_append_int_noprefix(table_data, 32, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + /* Device Handle Type: ACPI */ + build_append_int_noprefix(table_data, 0, 1); + /* Proximity Domain */ + build_append_int_noprefix(table_data, node, 4); + /* Device Handle */ + build_append_srat_acpi_device_handle(table_data, hid, uid); + /* Flags - GP Enabled */ + build_append_int_noprefix(table_data, 1, 4); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); +} + +/* * ACPI spec 5.2.17 System Locality Distance Information Table * (Revision 2.0 or later) */ diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index 5cb60ca8bc..23443f09a5 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -50,6 +50,18 @@ void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list) } } +static bool check_cpu_enabled_status(DeviceState *dev) +{ + CPUClass *k = dev ? CPU_GET_CLASS(dev) : NULL; + CPUState *cpu = CPU(dev); + + if (cpu && (!k->cpu_enabled_status || k->cpu_enabled_status(cpu))) { + return true; + } + + return false; +} + static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size) { uint64_t val = 0; @@ -63,10 +75,11 @@ static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size) cdev = &cpu_st->devs[cpu_st->selector]; switch (addr) { case ACPI_CPU_FLAGS_OFFSET_RW: /* pack and return is_* fields */ - val |= cdev->cpu ? 1 : 0; + val |= check_cpu_enabled_status(DEVICE(cdev->cpu)) ? 1 : 0; val |= cdev->is_inserting ? 2 : 0; val |= cdev->is_removing ? 4 : 0; val |= cdev->fw_remove ? 16 : 0; + val |= cdev->cpu ? 32 : 0; trace_cpuhp_acpi_read_flags(cpu_st->selector, val); break; case ACPI_CPU_CMD_DATA_OFFSET_RW: @@ -233,6 +246,17 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner, memory_region_add_subregion(as, base_addr, &state->ctrl_reg); } +static bool should_remain_acpi_present(DeviceState *dev) +{ + CPUClass *k = CPU_GET_CLASS(dev); + /* + * A system may contain CPUs that are always present on one die, NUMA node, + * or socket, yet may be non-present on another simultaneously. Check from + * architecture specific code. + */ + return k->cpu_persistent_status && k->cpu_persistent_status(CPU(dev)); +} + static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev) { CPUClass *k = CPU_GET_CLASS(dev); @@ -289,7 +313,9 @@ void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st, return; } - cdev->cpu = NULL; + if (!should_remain_acpi_present(dev)) { + cdev->cpu = NULL; + } } static const VMStateDescription vmstate_cpuhp_sts = { @@ -336,6 +362,7 @@ const VMStateDescription vmstate_cpu_hotplug = { #define CPU_REMOVE_EVENT "CRMV" #define CPU_EJECT_EVENT "CEJ0" #define CPU_FW_EJECT_EVENT "CEJF" +#define CPU_PRESENT "CPRS" void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, build_madt_cpu_fn build_madt_cpu, hwaddr base_addr, @@ -396,7 +423,9 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1)); /* tell firmware to do device eject, write only */ aml_append(field, aml_named_field(CPU_FW_EJECT_EVENT, 1)); - aml_append(field, aml_reserved_field(3)); + /* 1 if present, read only */ + aml_append(field, aml_named_field(CPU_PRESENT, 1)); + aml_append(field, aml_reserved_field(2)); aml_append(field, aml_named_field(CPU_COMMAND, 8)); aml_append(cpu_ctrl_dev, field); @@ -426,6 +455,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK); Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR); Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED); + Aml *is_present = aml_name("%s.%s", cphp_res_path, CPU_PRESENT); Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND); Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA); Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT); @@ -454,13 +484,26 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, { Aml *idx = aml_arg(0); Aml *sta = aml_local(0); + Aml *ifctx2; + Aml *else_ctx; aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); aml_append(method, aml_store(idx, cpu_selector)); aml_append(method, aml_store(zero, sta)); - ifctx = aml_if(aml_equal(is_enabled, one)); + ifctx = aml_if(aml_equal(is_present, one)); { - aml_append(ifctx, aml_store(aml_int(0xF), sta)); + ifctx2 = aml_if(aml_equal(is_enabled, one)); + { + /* cpu is present and enabled */ + aml_append(ifctx2, aml_store(aml_int(0xF), sta)); + } + aml_append(ifctx, ifctx2); + else_ctx = aml_else(); + { + /* cpu is present but disabled */ + aml_append(else_ctx, aml_store(aml_int(0xD), sta)); + } + aml_append(ifctx, else_ctx); } aml_append(method, ifctx); aml_append(method, aml_release(ctrl_lock)); diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c index d00f5a6c1c..663d9cb093 100644 --- a/hw/acpi/generic_event_device.c +++ b/hw/acpi/generic_event_device.c @@ -331,6 +331,24 @@ static const VMStateDescription vmstate_memhp_state = { } }; +static bool cpuhp_needed(void *opaque) +{ + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + + return mc->has_hotpluggable_cpus; +} + +static const VMStateDescription vmstate_cpuhp_state = { + .name = "acpi-ged/cpuhp", + .version_id = 1, + .minimum_version_id = 1, + .needed = cpuhp_needed, + .fields = (VMStateField[]) { + VMSTATE_CPU_HOTPLUG(cpuhp_state, AcpiGedState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_ged_state = { .name = "acpi-ged-state", .version_id = 1, @@ -379,6 +397,7 @@ static const VMStateDescription vmstate_acpi_ged = { }, .subsections = (const VMStateDescription * const []) { &vmstate_memhp_state, + &vmstate_cpuhp_state, &vmstate_ghes_state, NULL } diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index 7f8ccc9b7a..c8854f4d48 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -1,6 +1,5 @@ acpi_ss = ss.source_set() acpi_ss.add(files( - 'acpi_generic_initiator.c', 'acpi_interface.c', 'aml-build.c', 'bios-linker-loader.c', diff --git a/hw/acpi/pci.c b/hw/acpi/pci.c index 20b70dcd81..f88f450af3 100644 --- a/hw/acpi/pci.c +++ b/hw/acpi/pci.c @@ -24,8 +24,14 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qom/object_interfaces.h" +#include "qapi/error.h" +#include "hw/boards.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/pci.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_device.h" #include "hw/pci/pcie_host.h" /* @@ -59,3 +65,239 @@ void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info, acpi_table_end(linker, &table); } + +typedef struct AcpiGenericInitiator { + /* private */ + Object parent; + + /* public */ + char *pci_dev; + uint32_t node; +} AcpiGenericInitiator; + +typedef struct AcpiGenericInitiatorClass { + ObjectClass parent_class; +} AcpiGenericInitiatorClass; + +#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator" + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericInitiator, acpi_generic_initiator, + ACPI_GENERIC_INITIATOR, OBJECT, + { TYPE_USER_CREATABLE }, + { NULL }) + +OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericInitiator, ACPI_GENERIC_INITIATOR) + +static void acpi_generic_initiator_init(Object *obj) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + gi->node = MAX_NODES; + gi->pci_dev = NULL; +} + +static void acpi_generic_initiator_finalize(Object *obj) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + g_free(gi->pci_dev); +} + +static void acpi_generic_initiator_set_pci_device(Object *obj, const char *val, + Error **errp) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + + gi->pci_dev = g_strdup(val); +} + +static void acpi_generic_initiator_set_node(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + AcpiGenericInitiator *gi = ACPI_GENERIC_INITIATOR(obj); + MachineState *ms = MACHINE(qdev_get_machine()); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value >= MAX_NODES) { + error_printf("%s: Invalid NUMA node specified\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + gi->node = value; + ms->numa_state->nodes[gi->node].has_gi = true; +} + +static void acpi_generic_initiator_class_init(ObjectClass *oc, void *data) +{ + object_class_property_add_str(oc, "pci-dev", NULL, + acpi_generic_initiator_set_pci_device); + object_class_property_set_description(oc, "pci-dev", + "PCI device to associate with the node"); + object_class_property_add(oc, "node", "int", NULL, + acpi_generic_initiator_set_node, NULL, NULL); + object_class_property_set_description(oc, "node", + "NUMA node associated with the PCI device"); +} + +static int build_acpi_generic_initiator(Object *obj, void *opaque) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + AcpiGenericInitiator *gi; + GArray *table_data = opaque; + int32_t devfn; + uint8_t bus; + Object *o; + + if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_INITIATOR)) { + return 0; + } + + gi = ACPI_GENERIC_INITIATOR(obj); + if (gi->node >= ms->numa_state->num_nodes) { + error_printf("%s: Specified node %d is invalid.\n", + TYPE_ACPI_GENERIC_INITIATOR, gi->node); + exit(1); + } + + o = object_resolve_path_type(gi->pci_dev, TYPE_PCI_DEVICE, NULL); + if (!o) { + error_printf("%s: Specified device must be a PCI device.\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + bus = object_property_get_uint(o, "busnr", &error_fatal); + devfn = object_property_get_uint(o, "addr", &error_fatal); + /* devfn is constrained in PCI to be 8 bit but storage is an int32_t */ + assert(devfn >= 0 && devfn < PCI_DEVFN_MAX); + + build_srat_pci_generic_initiator(table_data, gi->node, 0, bus, devfn); + + return 0; +} + +typedef struct AcpiGenericPort { + /* private */ + Object parent; + + /* public */ + char *pci_bus; + uint32_t node; +} AcpiGenericPort; + +typedef struct AcpiGenericPortClass { + ObjectClass parent_class; +} AcpiGenericPortClass; + +#define TYPE_ACPI_GENERIC_PORT "acpi-generic-port" + +OBJECT_DEFINE_TYPE_WITH_INTERFACES(AcpiGenericPort, acpi_generic_port, + ACPI_GENERIC_PORT, OBJECT, + { TYPE_USER_CREATABLE }, + { NULL }) + +OBJECT_DECLARE_SIMPLE_TYPE(AcpiGenericPort, ACPI_GENERIC_PORT) + +static void acpi_generic_port_init(Object *obj) +{ + AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj); + + gp->node = MAX_NODES; + gp->pci_bus = NULL; +} + +static void acpi_generic_port_finalize(Object *obj) +{ + AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj); + + g_free(gp->pci_bus); +} + +static void acpi_generic_port_set_pci_bus(Object *obj, const char *val, + Error **errp) +{ + AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj); + + gp->pci_bus = g_strdup(val); +} + +static void acpi_generic_port_set_node(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + AcpiGenericPort *gp = ACPI_GENERIC_PORT(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value >= MAX_NODES) { + error_printf("%s: Invalid NUMA node specified\n", + TYPE_ACPI_GENERIC_INITIATOR); + exit(1); + } + + gp->node = value; +} + +static void acpi_generic_port_class_init(ObjectClass *oc, void *data) +{ + object_class_property_add_str(oc, "pci-bus", NULL, + acpi_generic_port_set_pci_bus); + object_class_property_set_description(oc, "pci-bus", + "PCI Bus of the host bridge associated with this GP affinity structure"); + object_class_property_add(oc, "node", "int", NULL, + acpi_generic_port_set_node, NULL, NULL); + object_class_property_set_description(oc, "node", + "The NUMA node like ID to index HMAT/SLIT NUMA properties involving GP"); +} + +static int build_acpi_generic_port(Object *obj, void *opaque) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + const char *hid = "ACPI0016"; + GArray *table_data = opaque; + AcpiGenericPort *gp; + uint32_t uid; + Object *o; + + if (!object_dynamic_cast(obj, TYPE_ACPI_GENERIC_PORT)) { + return 0; + } + + gp = ACPI_GENERIC_PORT(obj); + + if (gp->node >= ms->numa_state->num_nodes) { + error_printf("%s: node %d is invalid.\n", + TYPE_ACPI_GENERIC_PORT, gp->node); + exit(1); + } + + o = object_resolve_path_type(gp->pci_bus, TYPE_PXB_CXL_BUS, NULL); + if (!o) { + error_printf("%s: device must be a CXL host bridge.\n", + TYPE_ACPI_GENERIC_PORT); + exit(1); + } + + uid = object_property_get_uint(o, "acpi_uid", &error_fatal); + build_srat_acpi_generic_port(table_data, gp->node, hid, uid); + + return 0; +} + +void build_srat_generic_affinity_structures(GArray *table_data) +{ + object_child_foreach_recursive(object_get_root(), + build_acpi_generic_initiator, + table_data); + object_child_foreach_recursive(object_get_root(), build_acpi_generic_port, + table_data); +} diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index e7fd9338d1..1b25e73578 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -539,6 +539,7 @@ config ASPEED_SOC select PMBUS select MAX31785 select FSI_APB2OPB_ASPEED + select AT24C config MPS2 bool diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index b4b1ce9efb..6ca145362c 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -338,10 +338,20 @@ static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo, bool emmc, return; } card = qdev_new(emmc ? TYPE_EMMC : TYPE_SD_CARD); - if (emmc) { + + /* + * Force the boot properties of the eMMC device only when the + * machine is strapped to boot from eMMC. Without these + * settings, the machine would not boot. + * + * This also allows the machine to use an eMMC device without + * boot areas when booting from the flash device (or -kernel) + * Ideally, the device and its properties should be defined on + * the command line. + */ + if (emmc && boot_emmc) { qdev_prop_set_uint64(card, "boot-partition-size", 1 * MiB); - qdev_prop_set_uint8(card, "boot-config", - boot_emmc ? 0x1 << 3 : 0x0); + qdev_prop_set_uint8(card, "boot-config", 0x1 << 3); } qdev_prop_set_drive_err(card, "drive", blk_by_legacy_dinfo(dinfo), &error_fatal); @@ -1594,18 +1604,20 @@ static void aspeed_minibmc_machine_init(MachineState *machine) connect_serial_hds_to_uarts(bmc); qdev_realize(DEVICE(bmc->soc), NULL, &error_abort); - aspeed_board_init_flashes(&bmc->soc->fmc, - bmc->fmc_model ? bmc->fmc_model : amc->fmc_model, - amc->num_cs, - 0); + if (defaults_enabled()) { + aspeed_board_init_flashes(&bmc->soc->fmc, + bmc->fmc_model ? bmc->fmc_model : amc->fmc_model, + amc->num_cs, + 0); - aspeed_board_init_flashes(&bmc->soc->spi[0], - bmc->spi_model ? bmc->spi_model : amc->spi_model, - amc->num_cs, amc->num_cs); + aspeed_board_init_flashes(&bmc->soc->spi[0], + bmc->spi_model ? bmc->spi_model : amc->spi_model, + amc->num_cs, amc->num_cs); - aspeed_board_init_flashes(&bmc->soc->spi[1], - bmc->spi_model ? bmc->spi_model : amc->spi_model, - amc->num_cs, (amc->num_cs * 2)); + aspeed_board_init_flashes(&bmc->soc->spi[1], + bmc->spi_model ? bmc->spi_model : amc->spi_model, + amc->num_cs, (amc->num_cs * 2)); + } if (amc->i2c_init) { amc->i2c_init(bmc); diff --git a/hw/arm/aspeed_ast27x0.c b/hw/arm/aspeed_ast27x0.c index dca660eb6b..63d1fcb086 100644 --- a/hw/arm/aspeed_ast27x0.c +++ b/hw/arm/aspeed_ast27x0.c @@ -13,6 +13,7 @@ #include "qapi/error.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" +#include "hw/arm/bsa.h" #include "qemu/module.h" #include "qemu/error-report.h" #include "hw/i2c/aspeed_i2c.h" @@ -63,9 +64,10 @@ static const hwaddr aspeed_soc_ast2700_memmap[] = { [ASPEED_DEV_ADC] = 0x14C00000, [ASPEED_DEV_I2C] = 0x14C0F000, [ASPEED_DEV_GPIO] = 0x14C0B000, + [ASPEED_DEV_RTC] = 0x12C0F000, }; -#define AST2700_MAX_IRQ 288 +#define AST2700_MAX_IRQ 256 /* Shared Peripheral Interrupt values below are offset by -32 from datasheet */ static const int aspeed_soc_ast2700_irqmap[] = { @@ -376,6 +378,8 @@ static void aspeed_soc_ast2700_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.gpio-%s", socname); object_initialize_child(obj, "gpio", &s->gpio, typename); + + object_initialize_child(obj, "rtc", &s->rtc, TYPE_ASPEED_RTC); } /* @@ -402,7 +406,7 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp) gicdev = DEVICE(&a->gic); qdev_prop_set_uint32(gicdev, "revision", 3); qdev_prop_set_uint32(gicdev, "num-cpu", sc->num_cpus); - qdev_prop_set_uint32(gicdev, "num-irq", AST2700_MAX_IRQ); + qdev_prop_set_uint32(gicdev, "num-irq", AST2700_MAX_IRQ + GIC_INTERNAL); redist_region_count = qlist_new(); qlist_append_int(redist_region_count, sc->num_cpus); @@ -416,28 +420,27 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp) for (i = 0; i < sc->num_cpus; i++) { DeviceState *cpudev = DEVICE(&a->cpu[i]); - int NUM_IRQS = 256, ARCH_GIC_MAINT_IRQ = 9, VIRTUAL_PMU_IRQ = 7; - int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; + int intidbase = AST2700_MAX_IRQ + i * GIC_INTERNAL; const int timer_irq[] = { - [GTIMER_PHYS] = 14, - [GTIMER_VIRT] = 11, - [GTIMER_HYP] = 10, - [GTIMER_SEC] = 13, + [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, + [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, + [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, }; int j; for (j = 0; j < ARRAY_SIZE(timer_irq); j++) { qdev_connect_gpio_out(cpudev, j, - qdev_get_gpio_in(gicdev, ppibase + timer_irq[j])); + qdev_get_gpio_in(gicdev, intidbase + timer_irq[j])); } qemu_irq irq = qdev_get_gpio_in(gicdev, - ppibase + ARCH_GIC_MAINT_IRQ); + intidbase + ARCH_GIC_MAINT_IRQ); qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0, irq); qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, - qdev_get_gpio_in(gicdev, ppibase + VIRTUAL_PMU_IRQ)); + qdev_get_gpio_in(gicdev, intidbase + VIRTUAL_PMU_IRQ)); sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); sysbus_connect_irq(gicbusdev, i + sc->num_cpus, @@ -670,6 +673,14 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); + /* RTC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { + return; + } + aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); + create_unimplemented_device("ast2700.dpmcu", 0x11000000, 0x40000); create_unimplemented_device("ast2700.iomem0", 0x12000000, 0x01000000); create_unimplemented_device("ast2700.iomem1", 0x14000000, 0x01000000); diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index f76fb117ad..620992c92c 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -57,7 +57,6 @@ #include "migration/vmstate.h" #include "hw/acpi/ghes.h" #include "hw/acpi/viot.h" -#include "hw/acpi/acpi_generic_initiator.h" #include "hw/virtio/virtio-acpi.h" #include "target/arm/multiprocessing.h" @@ -511,7 +510,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } } - build_srat_generic_pci_initiator(table_data); + build_srat_generic_affinity_structures(table_data); if (ms->nvdimms_state->is_enabled) { nvdimm_build_srat(table_data); diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c index 7b6ec64442..21a81b44f0 100644 --- a/hw/block/pflash_cfi01.c +++ b/hw/block/pflash_cfi01.c @@ -47,7 +47,6 @@ #include "qemu/bitops.h" #include "qemu/host-utils.h" #include "qemu/log.h" -#include "qemu/module.h" #include "qemu/option.h" #include "hw/sysbus.h" #include "migration/vmstate.h" @@ -947,20 +946,16 @@ static void pflash_cfi01_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); } - -static const TypeInfo pflash_cfi01_info = { - .name = TYPE_PFLASH_CFI01, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(PFlashCFI01), - .class_init = pflash_cfi01_class_init, +static const TypeInfo pflash_cfi01_types[] = { + { + .name = TYPE_PFLASH_CFI01, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(PFlashCFI01), + .class_init = pflash_cfi01_class_init, + }, }; -static void pflash_cfi01_register_types(void) -{ - type_register_static(&pflash_cfi01_info); -} - -type_init(pflash_cfi01_register_types) +DEFINE_TYPES(pflash_cfi01_types) PFlashCFI01 *pflash_cfi01_register(hwaddr base, const char *name, diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 5b7f46bbb0..7996e49821 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -90,27 +90,39 @@ static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config) s->blkcfg.wce = blkcfg->wce; } +static int vhost_user_blk_sync_config(DeviceState *dev, Error **errp) +{ + int ret; + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserBlk *s = VHOST_USER_BLK(vdev); + + ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, + vdev->config_len, errp); + if (ret < 0) { + return ret; + } + + memcpy(vdev->config, &s->blkcfg, vdev->config_len); + virtio_notify_config(vdev); + + return 0; +} + static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) { int ret; - VirtIODevice *vdev = dev->vdev; - VHostUserBlk *s = VHOST_USER_BLK(dev->vdev); Error *local_err = NULL; if (!dev->started) { return 0; } - ret = vhost_dev_get_config(dev, (uint8_t *)&s->blkcfg, - vdev->config_len, &local_err); + ret = vhost_user_blk_sync_config(DEVICE(dev->vdev), &local_err); if (ret < 0) { error_report_err(local_err); return ret; } - memcpy(dev->vdev->config, &s->blkcfg, vdev->config_len); - virtio_notify_config(dev->vdev); - return 0; } @@ -579,6 +591,7 @@ static void vhost_user_blk_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, vhost_user_blk_properties); dc->vmsd = &vmstate_vhost_user_blk; + dc->sync_config = vhost_user_blk_sync_config; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); vdc->realize = vhost_user_blk_device_realize; vdc->unrealize = vhost_user_blk_device_unrealize; diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c index 5d8d7edcbd..640b2114b4 100644 --- a/hw/core/machine-smp.c +++ b/hw/core/machine-smp.c @@ -261,6 +261,72 @@ void machine_parse_smp_config(MachineState *ms, } } +static bool machine_check_topo_support(MachineState *ms, + CpuTopologyLevel topo, + Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if ((topo == CPU_TOPOLOGY_LEVEL_MODULE && !mc->smp_props.modules_supported) || + (topo == CPU_TOPOLOGY_LEVEL_CLUSTER && !mc->smp_props.clusters_supported) || + (topo == CPU_TOPOLOGY_LEVEL_DIE && !mc->smp_props.dies_supported) || + (topo == CPU_TOPOLOGY_LEVEL_BOOK && !mc->smp_props.books_supported) || + (topo == CPU_TOPOLOGY_LEVEL_DRAWER && !mc->smp_props.drawers_supported)) { + error_setg(errp, + "Invalid topology level: %s. " + "The topology level is not supported by this machine", + CpuTopologyLevel_str(topo)); + return false; + } + + return true; +} + +bool machine_parse_smp_cache(MachineState *ms, + const SmpCachePropertiesList *caches, + Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const SmpCachePropertiesList *node; + DECLARE_BITMAP(caches_bitmap, CACHE_LEVEL_AND_TYPE__MAX); + + for (node = caches; node; node = node->next) { + /* Prohibit users from repeating settings. */ + if (test_bit(node->value->cache, caches_bitmap)) { + error_setg(errp, + "Invalid cache properties: %s. " + "The cache properties are duplicated", + CacheLevelAndType_str(node->value->cache)); + return false; + } + + machine_set_cache_topo_level(ms, node->value->cache, + node->value->topology); + set_bit(node->value->cache, caches_bitmap); + } + + for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) { + const SmpCacheProperties *props = &ms->smp_cache.props[i]; + + /* + * Reject non "default" topology level if the cache isn't + * supported by the machine. + */ + if (props->topology != CPU_TOPOLOGY_LEVEL_DEFAULT && + !mc->smp_props.cache_supported[props->cache]) { + error_setg(errp, + "%s cache topology not supported by this machine", + CacheLevelAndType_str(node->value->cache)); + return false; + } + + if (!machine_check_topo_support(ms, props->topology, errp)) { + return false; + } + } + return true; +} + unsigned int machine_topo_get_cores_per_socket(const MachineState *ms) { return ms->smp.cores * ms->smp.modules * ms->smp.clusters * ms->smp.dies; @@ -270,3 +336,63 @@ unsigned int machine_topo_get_threads_per_socket(const MachineState *ms) { return ms->smp.threads * machine_topo_get_cores_per_socket(ms); } + +CpuTopologyLevel machine_get_cache_topo_level(const MachineState *ms, + CacheLevelAndType cache) +{ + return ms->smp_cache.props[cache].topology; +} + +void machine_set_cache_topo_level(MachineState *ms, CacheLevelAndType cache, + CpuTopologyLevel level) +{ + ms->smp_cache.props[cache].topology = level; +} + +/* + * When both cache1 and cache2 are configured with specific topology levels + * (not default level), is cache1's topology level higher than cache2? + */ +static bool smp_cache_topo_cmp(const SmpCache *smp_cache, + CacheLevelAndType cache1, + CacheLevelAndType cache2) +{ + /* + * Before comparing, the "default" topology level should be replaced + * with the specific level. + */ + assert(smp_cache->props[cache1].topology != CPU_TOPOLOGY_LEVEL_DEFAULT); + + return smp_cache->props[cache1].topology > smp_cache->props[cache2].topology; +} + +/* + * Currently, we have no way to expose the arch-specific default cache model + * because the cache model is sometimes related to the CPU model (e.g., i386). + * + * We can only check the correctness of the cache topology after the arch loads + * the user-configured cache model from MachineState and consumes the special + * "default" level by replacing it with the specific level. + */ +bool machine_check_smp_cache(const MachineState *ms, Error **errp) +{ + if (smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L1D, + CACHE_LEVEL_AND_TYPE_L2) || + smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L1I, + CACHE_LEVEL_AND_TYPE_L2)) { + error_setg(errp, + "Invalid smp cache topology. " + "L2 cache topology level shouldn't be lower than L1 cache"); + return false; + } + + if (smp_cache_topo_cmp(&ms->smp_cache, CACHE_LEVEL_AND_TYPE_L2, + CACHE_LEVEL_AND_TYPE_L3)) { + error_setg(errp, + "Invalid smp cache topology. " + "L3 cache topology level shouldn't be lower than L2 cache"); + return false; + } + + return true; +} diff --git a/hw/core/machine.c b/hw/core/machine.c index 222799bc46..a35c4a8fae 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -11,10 +11,12 @@ */ #include "qemu/osdep.h" +#include "qemu/units.h" #include "qemu/accel.h" #include "sysemu/replay.h" #include "hw/boards.h" #include "hw/loader.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "qapi/qapi-visit-machine.h" #include "qemu/madvise.h" @@ -34,7 +36,9 @@ #include "hw/virtio/virtio-iommu.h" #include "audio/audio.h" -GlobalProperty hw_compat_9_1[] = {}; +GlobalProperty hw_compat_9_1[] = { + { TYPE_PCI_DEVICE, "x-pcie-ext-tag", "false" }, +}; const size_t hw_compat_9_1_len = G_N_ELEMENTS(hw_compat_9_1); GlobalProperty hw_compat_9_0[] = { @@ -281,33 +285,6 @@ GlobalProperty hw_compat_2_4[] = { }; const size_t hw_compat_2_4_len = G_N_ELEMENTS(hw_compat_2_4); -GlobalProperty hw_compat_2_3[] = { - { "virtio-blk-pci", "any_layout", "off" }, - { "virtio-balloon-pci", "any_layout", "off" }, - { "virtio-serial-pci", "any_layout", "off" }, - { "virtio-9p-pci", "any_layout", "off" }, - { "virtio-rng-pci", "any_layout", "off" }, - { TYPE_PCI_DEVICE, "x-pcie-lnksta-dllla", "off" }, - { "migration", "send-configuration", "off" }, - { "migration", "send-section-footer", "off" }, - { "migration", "store-global-state", "off" }, -}; -const size_t hw_compat_2_3_len = G_N_ELEMENTS(hw_compat_2_3); - -GlobalProperty hw_compat_2_2[] = {}; -const size_t hw_compat_2_2_len = G_N_ELEMENTS(hw_compat_2_2); - -GlobalProperty hw_compat_2_1[] = { - { "intel-hda", "old_msi_addr", "on" }, - { "VGA", "qemu-extended-regs", "off" }, - { "secondary-vga", "qemu-extended-regs", "off" }, - { "virtio-scsi-pci", "any_layout", "off" }, - { "usb-mouse", "usb_version", "1" }, - { "usb-kbd", "usb_version", "1" }, - { "virtio-pci", "virtio-pci-bus-master-bug-migration", "on" }, -}; -const size_t hw_compat_2_1_len = G_N_ELEMENTS(hw_compat_2_1); - MachineState *current_machine; static char *machine_get_kernel(Object *obj, Error **errp) @@ -932,6 +909,40 @@ static void machine_set_smp(Object *obj, Visitor *v, const char *name, machine_parse_smp_config(ms, config, errp); } +static void machine_get_smp_cache(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + SmpCache *cache = &ms->smp_cache; + SmpCachePropertiesList *head = NULL; + SmpCachePropertiesList **tail = &head; + + for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) { + SmpCacheProperties *node = g_new(SmpCacheProperties, 1); + + node->cache = cache->props[i].cache; + node->topology = cache->props[i].topology; + QAPI_LIST_APPEND(tail, node); + } + + visit_type_SmpCachePropertiesList(v, name, &head, errp); + qapi_free_SmpCachePropertiesList(head); +} + +static void machine_set_smp_cache(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + SmpCachePropertiesList *caches; + + if (!visit_type_SmpCachePropertiesList(v, name, &caches, errp)) { + return; + } + + machine_parse_smp_cache(ms, caches, errp); + qapi_free_SmpCachePropertiesList(caches); +} + static void machine_get_boot(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -1092,6 +1103,11 @@ static void machine_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, "smp", "CPU topology"); + object_class_property_add(oc, "smp-cache", "SmpCachePropertiesWrapper", + machine_get_smp_cache, machine_set_smp_cache, NULL, NULL); + object_class_property_set_description(oc, "smp-cache", + "Cache properties list for SMP machine"); + object_class_property_add(oc, "phandle-start", "int", machine_get_phandle_start, machine_set_phandle_start, NULL, NULL); @@ -1230,6 +1246,11 @@ static void machine_initfn(Object *obj) ms->smp.cores = 1; ms->smp.threads = 1; + for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) { + ms->smp_cache.props[i].cache = (CacheLevelAndType)i; + ms->smp_cache.props[i].topology = CPU_TOPOLOGY_LEVEL_DEFAULT; + } + machine_copy_boot_config(ms, &(BootConfiguration){ 0 }); } diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c index 9258e48f95..2d4d62c454 100644 --- a/hw/cxl/cxl-mailbox-utils.c +++ b/hw/cxl/cxl-mailbox-utils.c @@ -151,6 +151,9 @@ static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd, in = (void *)payload_in; out = (void *)payload_out; + if (len_in < sizeof(*in)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } /* Enough room for minimum sized message - no payload */ if (in->size < sizeof(in->ccimessage)) { return CXL_MBOX_INVALID_PAYLOAD_LENGTH; @@ -266,6 +269,12 @@ static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd, CXLClearEventPayload *pl; pl = (CXLClearEventPayload *)payload_in; + + if (len_in < sizeof(*pl) || + len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + *len_out = 0; return cxl_event_clear_records(cxlds, pl); } @@ -374,7 +383,7 @@ static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd, uint16_t pcie_subsys_vid; uint16_t pcie_subsys_id; uint64_t sn; - uint8_t max_message_size; + uint8_t max_message_size; uint8_t component_type; } QEMU_PACKED *is_identify; QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18); @@ -521,6 +530,9 @@ static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd, in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in; out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out; + if (len_in < sizeof(*in)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } /* Check if what was requested can fit */ if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) { return CXL_MBOX_INVALID_INPUT; @@ -649,9 +661,9 @@ static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd, } QEMU_PACKED *fw_info; QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50); - if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) || - (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) || - (ct3d->dc.total_capacity < CXL_CAPACITY_MULTIPLIER)) { + if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) || + !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) || + !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) { return CXL_MBOX_INTERNAL_ERROR; } @@ -699,6 +711,10 @@ static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd, } QEMU_PACKED *fw_transfer = (void *)payload_in; size_t offset, length; + if (len < sizeof(*fw_transfer)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) { /* * At this point there aren't any on-going transfers @@ -927,24 +943,28 @@ static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd, get_log = (void *)payload_in; + if (get_log->length > cci->payload_max) { + return CXL_MBOX_INVALID_INPUT; + } + + if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { + return CXL_MBOX_INVALID_LOG; + } + /* * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) * The device shall return Invalid Input if the Offset or Length * fields attempt to access beyond the size of the log as reported by Get - * Supported Logs. + * Supported Log. * - * The CEL buffer is large enough to fit all commands in the emulation, so - * the only possible failure would be if the mailbox itself isn't big - * enough. + * Only valid for there to be one entry per opcode, but the length + offset + * may still be greater than that if the inputs are not valid and so access + * beyond the end of cci->cel_log. */ - if (get_log->offset + get_log->length > cci->payload_max) { + if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) { return CXL_MBOX_INVALID_INPUT; } - if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) { - return CXL_MBOX_INVALID_LOG; - } - /* Store off everything to local variables so we can wipe out the payload */ *len_out = get_log->length; @@ -1133,10 +1153,8 @@ static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd, (struct CXLSupportedFeatureEntry) { .uuid = ecs_uuid, .feat_index = index, - .get_feat_size = CXL_ECS_NUM_MEDIA_FRUS * - sizeof(CXLMemECSReadAttrs), - .set_feat_size = CXL_ECS_NUM_MEDIA_FRUS * - sizeof(CXLMemECSWriteAttrs), + .get_feat_size = sizeof(CXLMemECSReadAttrs), + .set_feat_size = sizeof(CXLMemECSWriteAttrs), .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE, .get_feat_version = CXL_ECS_GET_FEATURE_VERSION, .set_feat_version = CXL_ECS_SET_FEATURE_VERSION, @@ -1204,13 +1222,10 @@ static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd, (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset, bytes_to_copy); } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) { - if (get_feature->offset >= CXL_ECS_NUM_MEDIA_FRUS * - sizeof(CXLMemECSReadAttrs)) { + if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) { return CXL_MBOX_INVALID_INPUT; } - bytes_to_copy = CXL_ECS_NUM_MEDIA_FRUS * - sizeof(CXLMemECSReadAttrs) - - get_feature->offset; + bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset; bytes_to_copy = MIN(bytes_to_copy, get_feature->count); memcpy(payload_out, (uint8_t *)&ct3d->ecs_attrs + get_feature->offset, @@ -1243,6 +1258,9 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, CXLType3Dev *ct3d; uint16_t count; + if (len_in < sizeof(*hdr)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) { return CXL_MBOX_UNSUPPORTED; @@ -1277,6 +1295,11 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, ps_set_feature = (void *)payload_in; ps_write_attrs = &ps_set_feature->feat_data; + + if ((uint32_t)hdr->offset + bytes_to_copy > + sizeof(ct3d->patrol_scrub_wr_attrs)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset, ps_write_attrs, bytes_to_copy); @@ -1299,18 +1322,22 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, ecs_set_feature = (void *)payload_in; ecs_write_attrs = ecs_set_feature->feat_data; - memcpy((uint8_t *)ct3d->ecs_wr_attrs + hdr->offset, + + if ((uint32_t)hdr->offset + bytes_to_copy > + sizeof(ct3d->ecs_wr_attrs)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset, ecs_write_attrs, bytes_to_copy); set_feat_info->data_size += bytes_to_copy; if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER || data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) { + ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap; for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { - ct3d->ecs_attrs[count].ecs_log_cap = - ct3d->ecs_wr_attrs[count].ecs_log_cap; - ct3d->ecs_attrs[count].ecs_config = - ct3d->ecs_wr_attrs[count].ecs_config & 0x1F; + ct3d->ecs_attrs.fru_attrs[count].ecs_config = + ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F; } } } else { @@ -1324,7 +1351,7 @@ static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd, if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) { memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size); } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) { - memset(ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); + memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size); } set_feat_info->data_transfer_flag = 0; set_feat_info->data_saved_across_reset = false; @@ -1445,7 +1472,7 @@ static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd, } QEMU_PACKED *get_lsa; CXLType3Dev *ct3d = CXL_TYPE3(cci->d); CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d); - uint32_t offset, length; + uint64_t offset, length; get_lsa = (void *)payload_in; offset = get_lsa->offset; @@ -1479,8 +1506,8 @@ static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd, const size_t hdr_len = offsetof(struct set_lsa_pl, data); *len_out = 0; - if (!len_in) { - return CXL_MBOX_SUCCESS; + if (len_in < hdr_len) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; } if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) { @@ -2233,6 +2260,7 @@ static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd, stw_le_p(&out_rec->shared_seq, ent->shared_seq); record_done++; + out_rec++; if (record_done == record_count) { break; } @@ -2470,11 +2498,20 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd, uint64_t dpa, len; CXLRetCode ret; + if (len_in < sizeof(*in)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + if (in->num_entries_updated == 0) { cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending); return CXL_MBOX_SUCCESS; } + if (len_in < + sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + /* Adding extents causes exceeding device's extent tracking ability. */ if (in->num_entries_updated + ct3d->dc.total_extent_count > CXL_NUM_EXTENTS_SUPPORTED) { @@ -2629,10 +2666,19 @@ static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd, uint32_t updated_list_size; CXLRetCode ret; + if (len_in < sizeof(*in)) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + if (in->num_entries_updated == 0) { return CXL_MBOX_INVALID_INPUT; } + if (len_in < + sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) { + return CXL_MBOX_INVALID_PAYLOAD_LENGTH; + } + ret = cxl_detect_malformed_extent_list(ct3d, in); if (ret != CXL_MBOX_SUCCESS) { return ret; @@ -2879,7 +2925,8 @@ static void bg_timercb(void *opaque) } } else { /* estimate only */ - cci->bg.complete_pct = 100 * now / total_time; + cci->bg.complete_pct = + 100 * (now - cci->bg.starttime) / cci->bg.runtime; timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ); } diff --git a/hw/gpio/mpc8xxx.c b/hw/gpio/mpc8xxx.c index 63b7a5c881..a3c1d2fbf4 100644 --- a/hw/gpio/mpc8xxx.c +++ b/hw/gpio/mpc8xxx.c @@ -23,7 +23,6 @@ #include "hw/irq.h" #include "hw/sysbus.h" #include "migration/vmstate.h" -#include "qemu/module.h" #include "qom/object.h" #define TYPE_MPC8XXX_GPIO "mpc8xxx_gpio" @@ -208,17 +207,14 @@ static void mpc8xxx_gpio_class_init(ObjectClass *klass, void *data) device_class_set_legacy_reset(dc, mpc8xxx_gpio_reset); } -static const TypeInfo mpc8xxx_gpio_info = { - .name = TYPE_MPC8XXX_GPIO, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(MPC8XXXGPIOState), - .instance_init = mpc8xxx_gpio_initfn, - .class_init = mpc8xxx_gpio_class_init, +static const TypeInfo mpc8xxx_gpio_types[] = { + { + .name = TYPE_MPC8XXX_GPIO, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MPC8XXXGPIOState), + .instance_init = mpc8xxx_gpio_initfn, + .class_init = mpc8xxx_gpio_class_init, + }, }; -static void mpc8xxx_gpio_register_types(void) -{ - type_register_static(&mpc8xxx_gpio_info); -} - -type_init(mpc8xxx_gpio_register_types) +DEFINE_TYPES(mpc8xxx_gpio_types) diff --git a/hw/i2c/mpc_i2c.c b/hw/i2c/mpc_i2c.c index 2467d1a9aa..913d044ac1 100644 --- a/hw/i2c/mpc_i2c.c +++ b/hw/i2c/mpc_i2c.c @@ -20,10 +20,10 @@ #include "qemu/osdep.h" #include "hw/i2c/i2c.h" #include "hw/irq.h" -#include "qemu/module.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qom/object.h" +#include "trace.h" /* #define DEBUG_I2C */ @@ -224,8 +224,8 @@ static uint64_t mpc_i2c_read(void *opaque, hwaddr addr, unsigned size) break; } - DPRINTF("%s: addr " HWADDR_FMT_plx " %02" PRIx32 "\n", __func__, - addr, value); + trace_mpc_i2c_read(addr, value); + return (uint64_t)value; } @@ -234,8 +234,8 @@ static void mpc_i2c_write(void *opaque, hwaddr addr, { MPCI2CState *s = opaque; - DPRINTF("%s: addr " HWADDR_FMT_plx " val %08" PRIx64 "\n", __func__, - addr, value); + trace_mpc_i2c_write(addr, value); + switch (addr) { case MPC_I2C_ADR: s->adr = value & CADR_MASK; @@ -344,16 +344,13 @@ static void mpc_i2c_class_init(ObjectClass *klass, void *data) dc->desc = "MPC I2C Controller"; } -static const TypeInfo mpc_i2c_type_info = { - .name = TYPE_MPC_I2C, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(MPCI2CState), - .class_init = mpc_i2c_class_init, +static const TypeInfo mpc_i2c_types[] = { + { + .name = TYPE_MPC_I2C, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MPCI2CState), + .class_init = mpc_i2c_class_init, + }, }; -static void mpc_i2c_register_types(void) -{ - type_register_static(&mpc_i2c_type_info); -} - -type_init(mpc_i2c_register_types) +DEFINE_TYPES(mpc_i2c_types) diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c index 9e62c27a1a..e3e96d4a2d 100644 --- a/hw/i2c/smbus_eeprom.c +++ b/hw/i2c/smbus_eeprom.c @@ -151,19 +151,16 @@ static void smbus_eeprom_class_initfn(ObjectClass *klass, void *data) dc->user_creatable = false; } -static const TypeInfo smbus_eeprom_info = { - .name = TYPE_SMBUS_EEPROM, - .parent = TYPE_SMBUS_DEVICE, - .instance_size = sizeof(SMBusEEPROMDevice), - .class_init = smbus_eeprom_class_initfn, +static const TypeInfo smbus_eeprom_types[] = { + { + .name = TYPE_SMBUS_EEPROM, + .parent = TYPE_SMBUS_DEVICE, + .instance_size = sizeof(SMBusEEPROMDevice), + .class_init = smbus_eeprom_class_initfn, + }, }; -static void smbus_eeprom_register_types(void) -{ - type_register_static(&smbus_eeprom_info); -} - -type_init(smbus_eeprom_register_types) +DEFINE_TYPES(smbus_eeprom_types) void smbus_eeprom_init_one(I2CBus *smbus, uint8_t address, uint8_t *eeprom_buf) { diff --git a/hw/i2c/trace-events b/hw/i2c/trace-events index 6900e06eda..f708a7ace1 100644 --- a/hw/i2c/trace-events +++ b/hw/i2c/trace-events @@ -35,6 +35,11 @@ aspeed_i2c_bus_write(uint32_t busid, uint64_t offset, unsigned size, uint64_t va aspeed_i2c_bus_send(const char *mode, int i, int count, uint8_t byte) "%s send %d/%d 0x%02x" aspeed_i2c_bus_recv(const char *mode, int i, int count, uint8_t byte) "%s recv %d/%d 0x%02x" +# mpc_i2c.c + +mpc_i2c_read(uint64_t addr, uint32_t value) "[0x%" PRIx64 "] -> 0x%02" PRIx32 +mpc_i2c_write(uint64_t addr, uint32_t value) "[0x%" PRIx64 "] <- 0x%02" PRIx32 + # npcm7xx_smbus.c npcm7xx_smbus_read(const char *id, uint64_t offset, uint64_t value, unsigned size) "%s offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u" diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 4967aa7459..9fcc2897b8 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -68,7 +68,6 @@ #include "hw/acpi/utils.h" #include "hw/acpi/pci.h" #include "hw/acpi/cxl.h" -#include "hw/acpi/acpi_generic_initiator.h" #include "qom/qom-qobject.h" #include "hw/i386/amd_iommu.h" @@ -741,7 +740,8 @@ static Aml *build_prt(bool is_pci0_prt) int pin; method = aml_method("_PRT", 0, AML_NOTSERIALIZED); - rt_pkg = aml_varpackage(nroutes); + assert(nroutes < 256); + rt_pkg = aml_package(nroutes); for (pin = 0; pin < nroutes; pin++) { Aml *pkg = aml_package(4); @@ -1476,6 +1476,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); + uint32_t uid; /* look only for expander root buses */ if (!pci_bus_is_root(bus)) { @@ -1486,6 +1487,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, root_bus_limit = bus_num - 1; } + uid = object_property_get_uint(OBJECT(bus), "acpi_uid", + &error_fatal); scope = aml_scope("\\_SB"); if (pci_bus_is_cxl(bus)) { @@ -1493,7 +1496,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, } else { dev = aml_device("PC%.02X", bus_num); } - aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); + aml_append(dev, aml_name_decl("_UID", aml_int(uid))); aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); if (pci_bus_is_cxl(bus)) { struct Aml *aml_pkg = aml_package(2); @@ -1971,7 +1974,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) build_srat_memory(table_data, 0, 0, 0, MEM_AFFINITY_NOFLAGS); } - build_srat_generic_pci_initiator(table_data); + build_srat_generic_affinity_structures(table_data); /* * Entry is required for Windows to enable memory hotplug in OS @@ -2321,7 +2324,7 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, /* Capability offset */ build_append_int_noprefix(table_data, s->pci.capab_offset, 2); /* IOMMU base address */ - build_append_int_noprefix(table_data, s->mmio.addr, 8); + build_append_int_noprefix(table_data, s->mr_mmio.addr, 8); /* PCI Segment Group */ build_append_int_noprefix(table_data, 0, 2); /* IOMMU info */ @@ -2356,7 +2359,7 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, /* Capability offset */ build_append_int_noprefix(table_data, s->pci.capab_offset, 2); /* IOMMU base address */ - build_append_int_noprefix(table_data, s->mmio.addr, 8); + build_append_int_noprefix(table_data, s->mr_mmio.addr, 8); /* PCI Segment Group */ build_append_int_noprefix(table_data, 0, 2); /* IOMMU info */ diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 464f0b666e..13af7211e1 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -32,6 +32,7 @@ #include "trace.h" #include "hw/i386/apic-msidef.h" #include "hw/qdev-properties.h" +#include "kvm/kvm_i386.h" /* used AMD-Vi MMIO registers */ const char *amdvi_mmio_low[] = { @@ -60,8 +61,9 @@ struct AMDVIAddressSpace { uint8_t bus_num; /* bus number */ uint8_t devfn; /* device function */ AMDVIState *iommu_state; /* AMDVI - one per machine */ - MemoryRegion root; /* AMDVI Root memory map region */ + MemoryRegion root; /* AMDVI Root memory map region */ IOMMUMemoryRegion iommu; /* Device's address translation region */ + MemoryRegion iommu_nodma; /* Alias of shared nodma memory region */ MemoryRegion iommu_ir; /* Device's interrupt remapping region */ AddressSpace as; /* device's corresponding address space */ }; @@ -430,6 +432,12 @@ static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd) trace_amdvi_ppr_exec(); } +static void amdvi_intremap_inval_notify_all(AMDVIState *s, bool global, + uint32_t index, uint32_t mask) +{ + x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask); +} + static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd) { if (extract64(cmd[0], 0, 60) || cmd[1]) { @@ -437,6 +445,9 @@ static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd) s->cmdbuf + s->cmdbuf_head); } + /* Notify global invalidation */ + amdvi_intremap_inval_notify_all(s, true, 0, 0); + amdvi_iotlb_reset(s); trace_amdvi_all_inval(); } @@ -485,6 +496,9 @@ static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd) return; } + /* Notify global invalidation */ + amdvi_intremap_inval_notify_all(s, true, 0, 0); + trace_amdvi_intr_inval(); } @@ -1412,6 +1426,7 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) AMDVIState *s = opaque; AMDVIAddressSpace **iommu_as, *amdvi_dev_as; int bus_num = pci_bus_num(bus); + X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); iommu_as = s->address_spaces[bus_num]; @@ -1436,13 +1451,13 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) * Memory region relationships looks like (Address range shows * only lower 32 bits to make it short in length...): * - * |-----------------+-------------------+----------| - * | Name | Address range | Priority | - * |-----------------+-------------------+----------+ - * | amdvi_root | 00000000-ffffffff | 0 | - * | amdvi_iommu | 00000000-ffffffff | 1 | - * | amdvi_iommu_ir | fee00000-feefffff | 64 | - * |-----------------+-------------------+----------| + * |--------------------+-------------------+----------| + * | Name | Address range | Priority | + * |--------------------+-------------------+----------+ + * | amdvi-root | 00000000-ffffffff | 0 | + * | amdvi-iommu_nodma | 00000000-ffffffff | 0 | + * | amdvi-iommu_ir | fee00000-feefffff | 1 | + * |--------------------+-------------------+----------| */ memory_region_init_iommu(&amdvi_dev_as->iommu, sizeof(amdvi_dev_as->iommu), @@ -1452,16 +1467,34 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) memory_region_init(&amdvi_dev_as->root, OBJECT(s), "amdvi_root", UINT64_MAX); address_space_init(&amdvi_dev_as->as, &amdvi_dev_as->root, name); - memory_region_init_io(&amdvi_dev_as->iommu_ir, OBJECT(s), - &amdvi_ir_ops, s, "amd_iommu_ir", - AMDVI_INT_ADDR_SIZE); - memory_region_add_subregion_overlap(&amdvi_dev_as->root, - AMDVI_INT_ADDR_FIRST, - &amdvi_dev_as->iommu_ir, - 64); memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0, MEMORY_REGION(&amdvi_dev_as->iommu), - 1); + 0); + + /* Build the DMA Disabled alias to shared memory */ + memory_region_init_alias(&amdvi_dev_as->iommu_nodma, OBJECT(s), + "amdvi-sys", &s->mr_sys, 0, + memory_region_size(&s->mr_sys)); + memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0, + &amdvi_dev_as->iommu_nodma, + 0); + /* Build the Interrupt Remapping alias to shared memory */ + memory_region_init_alias(&amdvi_dev_as->iommu_ir, OBJECT(s), + "amdvi-ir", &s->mr_ir, 0, + memory_region_size(&s->mr_ir)); + memory_region_add_subregion_overlap(MEMORY_REGION(&amdvi_dev_as->iommu), + AMDVI_INT_ADDR_FIRST, + &amdvi_dev_as->iommu_ir, 1); + + if (!x86_iommu->pt_supported) { + memory_region_set_enabled(&amdvi_dev_as->iommu_nodma, false); + memory_region_set_enabled(MEMORY_REGION(&amdvi_dev_as->iommu), + true); + } else { + memory_region_set_enabled(MEMORY_REGION(&amdvi_dev_as->iommu), + false); + memory_region_set_enabled(&amdvi_dev_as->iommu_nodma, true); + } } return &iommu_as[devfn]->as; } @@ -1598,10 +1631,37 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); /* set up MMIO */ - memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio", - AMDVI_MMIO_SIZE); + memory_region_init_io(&s->mr_mmio, OBJECT(s), &mmio_mem_ops, s, + "amdvi-mmio", AMDVI_MMIO_SIZE); memory_region_add_subregion(get_system_memory(), AMDVI_BASE_ADDR, - &s->mmio); + &s->mr_mmio); + + /* Create the share memory regions by all devices */ + memory_region_init(&s->mr_sys, OBJECT(s), "amdvi-sys", UINT64_MAX); + + /* set up the DMA disabled memory region */ + memory_region_init_alias(&s->mr_nodma, OBJECT(s), + "amdvi-nodma", get_system_memory(), 0, + memory_region_size(get_system_memory())); + memory_region_add_subregion_overlap(&s->mr_sys, 0, + &s->mr_nodma, 0); + + /* set up the Interrupt Remapping memory region */ + memory_region_init_io(&s->mr_ir, OBJECT(s), &amdvi_ir_ops, + s, "amdvi-ir", AMDVI_INT_ADDR_SIZE); + memory_region_add_subregion_overlap(&s->mr_sys, AMDVI_INT_ADDR_FIRST, + &s->mr_ir, 1); + + /* AMD IOMMU with x2APIC mode requires xtsup=on */ + if (x86ms->apic_id_limit > 255 && !s->xtsup) { + error_report("AMD IOMMU with x2APIC confguration requires xtsup=on"); + exit(EXIT_FAILURE); + } + if (s->xtsup && kvm_irqchip_is_split() && !kvm_enable_x2apic()) { + error_report("AMD IOMMU xtsup=on requires support on the KVM side"); + exit(EXIT_FAILURE); + } + pci_setup_iommu(bus, &amdvi_iommu_ops, s); amdvi_init(s); } diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h index 73619fe9ea..e0dac4d9a9 100644 --- a/hw/i386/amd_iommu.h +++ b/hw/i386/amd_iommu.h @@ -353,7 +353,10 @@ struct AMDVIState { uint32_t pprlog_head; /* ppr log head */ uint32_t pprlog_tail; /* ppr log tail */ - MemoryRegion mmio; /* MMIO region */ + MemoryRegion mr_mmio; /* MMIO region */ + MemoryRegion mr_sys; + MemoryRegion mr_nodma; + MemoryRegion mr_ir; uint8_t mmior[AMDVI_MMIO_SIZE]; /* read/write MMIO */ uint8_t w1cmask[AMDVI_MMIO_SIZE]; /* read/write 1 clear mask */ uint8_t romask[AMDVI_MMIO_SIZE]; /* MMIO read/only mask */ diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 08fe218935..4c0d1d7d47 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -2532,15 +2532,51 @@ static bool vtd_get_inv_desc(IntelIOMMUState *s, return true; } +static bool vtd_inv_desc_reserved_check(IntelIOMMUState *s, + VTDInvDesc *inv_desc, + uint64_t mask[4], bool dw, + const char *func_name, + const char *desc_type) +{ + if (s->iq_dw) { + if (inv_desc->val[0] & mask[0] || inv_desc->val[1] & mask[1] || + inv_desc->val[2] & mask[2] || inv_desc->val[3] & mask[3]) { + error_report("%s: invalid %s desc val[3]: 0x%"PRIx64 + " val[2]: 0x%"PRIx64" val[1]=0x%"PRIx64 + " val[0]=0x%"PRIx64" (reserved nonzero)", + func_name, desc_type, inv_desc->val[3], + inv_desc->val[2], inv_desc->val[1], + inv_desc->val[0]); + return false; + } + } else { + if (dw) { + error_report("%s: 256-bit %s desc in 128-bit invalidation queue", + func_name, desc_type); + return false; + } + + if (inv_desc->lo & mask[0] || inv_desc->hi & mask[1]) { + error_report("%s: invalid %s desc: hi=%"PRIx64", lo=%"PRIx64 + " (reserved nonzero)", func_name, desc_type, + inv_desc->hi, inv_desc->lo); + return false; + } + } + + return true; +} + static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { - if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) || - (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) { - error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 - " (reserved nonzero)", __func__, inv_desc->hi, - inv_desc->lo); + uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "wait")) { return false; } + if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { /* Status Write */ uint32_t status_data = (uint32_t)(inv_desc->lo >> @@ -2574,13 +2610,14 @@ static bool vtd_process_context_cache_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { uint16_t sid, fmask; + uint64_t mask[4] = {VTD_INV_DESC_CC_RSVD, VTD_INV_DESC_ALL_ONE, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; - if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) { - error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 - " (reserved nonzero)", __func__, inv_desc->hi, - inv_desc->lo); + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "cc inv")) { return false; } + switch (inv_desc->lo & VTD_INV_DESC_CC_G) { case VTD_INV_DESC_CC_DOMAIN: trace_vtd_inv_desc_cc_domain( @@ -2610,12 +2647,11 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) uint16_t domain_id; uint8_t am; hwaddr addr; + uint64_t mask[4] = {VTD_INV_DESC_IOTLB_RSVD_LO, VTD_INV_DESC_IOTLB_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; - if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) || - (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) { - error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 - ", lo=0x%"PRIx64" (reserved bits unzero)", - __func__, inv_desc->hi, inv_desc->lo); + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "iotlb inv")) { return false; } @@ -2656,6 +2692,14 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) static bool vtd_process_inv_iec_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { + uint64_t mask[4] = {VTD_INV_DESC_IEC_RSVD, VTD_INV_DESC_ALL_ONE, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "iec inv")) { + return false; + } + trace_vtd_inv_desc_iec(inv_desc->iec.granularity, inv_desc->iec.index, inv_desc->iec.index_mask); @@ -2705,19 +2749,19 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, hwaddr addr; uint16_t sid; bool size; + uint64_t mask[4] = {VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO, + VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "dev-iotlb inv")) { + return false; + } addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); - if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || - (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) { - error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64 - ", lo=%"PRIx64" (reserved nonzero)", __func__, - inv_desc->hi, inv_desc->lo); - return false; - } - /* * Using sid is OK since the guest should have finished the * initialization of both the bus and device. @@ -2847,6 +2891,7 @@ static void vtd_handle_iqt_write(IntelIOMMUState *s) if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { error_report_once("%s: RSV bit is set: val=0x%"PRIx64, __func__, val); + vtd_handle_inv_queue_error(s); return; } s->iq_tail = VTD_IQT_QT(s->iq_dw, val); @@ -3372,6 +3417,7 @@ static Property vtd_properties[] = { DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false), DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState, dma_translation, true), + DEFINE_PROP_BOOL("stale-tm", IntelIOMMUState, stale_tm, false), DEFINE_PROP_END_OF_LIST(), }; @@ -4138,15 +4184,15 @@ static void vtd_init(IntelIOMMUState *s) */ vtd_spte_rsvd[0] = ~0ULL; vtd_spte_rsvd[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits, - x86_iommu->dt_supported); + x86_iommu->dt_supported && s->stale_tm); vtd_spte_rsvd[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits); vtd_spte_rsvd[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits); vtd_spte_rsvd[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits); vtd_spte_rsvd_large[2] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits, - x86_iommu->dt_supported); + x86_iommu->dt_supported && s->stale_tm); vtd_spte_rsvd_large[3] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits, - x86_iommu->dt_supported); + x86_iommu->dt_supported && s->stale_tm); if (s->scalable_mode || s->snoop_control) { vtd_spte_rsvd[1] &= ~VTD_SPTE_SNP; diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 13d5d129ae..4323fc5d6d 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -356,6 +356,7 @@ union VTDInvDesc { typedef union VTDInvDesc VTDInvDesc; /* Masks for struct VTDInvDesc */ +#define VTD_INV_DESC_ALL_ONE -1ULL #define VTD_INV_DESC_TYPE(val) ((((val) >> 5) & 0x70ULL) | \ ((val) & 0xfULL)) #define VTD_INV_DESC_CC 0x1 /* Context-cache Invalidate Desc */ @@ -409,11 +410,14 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI 0xffeULL #define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0f1f0 +/* Masks for Interrupt Entry Invalidate Descriptor */ +#define VTD_INV_DESC_IEC_RSVD 0xffff000007fff1e0ULL + /* Rsvd field masks for spte */ #define VTD_SPTE_SNP 0x800ULL -#define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, dt_supported) \ - dt_supported ? \ +#define VTD_SPTE_PAGE_L1_RSVD_MASK(aw, stale_tm) \ + stale_tm ? \ (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ (0x800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) #define VTD_SPTE_PAGE_L2_RSVD_MASK(aw) \ @@ -423,12 +427,12 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_SPTE_PAGE_L4_RSVD_MASK(aw) \ (0x880ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) -#define VTD_SPTE_LPAGE_L2_RSVD_MASK(aw, dt_supported) \ - dt_supported ? \ +#define VTD_SPTE_LPAGE_L2_RSVD_MASK(aw, stale_tm) \ + stale_tm ? \ (0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ (0x1ff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) -#define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw, dt_supported) \ - dt_supported ? \ +#define VTD_SPTE_LPAGE_L3_RSVD_MASK(aw, stale_tm) \ + stale_tm ? \ (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM | VTD_SL_TM)) : \ (0x3ffff800ULL | ~(VTD_HAW_MASK(aw) | VTD_SL_IGN_COM)) diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 2047633e4c..830614d930 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -82,6 +82,7 @@ GlobalProperty pc_compat_9_1[] = { { "ICH9-LPC", "x-smi-swsmi-timer", "off" }, { "ICH9-LPC", "x-smi-periodic-timer", "off" }, + { TYPE_INTEL_IOMMU_DEVICE, "stale-tm", "on" }, }; const size_t pc_compat_9_1_len = G_N_ELEMENTS(pc_compat_9_1); diff --git a/hw/i386/x86-common.c b/hw/i386/x86-common.c index b86c38212e..bc360a9ea4 100644 --- a/hw/i386/x86-common.c +++ b/hw/i386/x86-common.c @@ -273,12 +273,12 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev, if (ms->smp.modules > 1) { env->nr_modules = ms->smp.modules; - set_bit(CPU_TOPO_LEVEL_MODULE, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_MODULE, env->avail_cpu_topo); } if (ms->smp.dies > 1) { env->nr_dies = ms->smp.dies; - set_bit(CPU_TOPO_LEVEL_DIE, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_DIE, env->avail_cpu_topo); } /* diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c index 78609105a8..834d32287b 100644 --- a/hw/intc/pnv_xive2.c +++ b/hw/intc/pnv_xive2.c @@ -490,6 +490,23 @@ static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx, word_number); } +static int pnv_xive2_get_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t blk, uint32_t idx, + Xive2Nvgc *nvgc) +{ + return pnv_xive2_vst_read(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG, + blk, idx, nvgc); +} + +static int pnv_xive2_write_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t blk, uint32_t idx, + Xive2Nvgc *nvgc) +{ + return pnv_xive2_vst_write(PNV_XIVE2(xrtr), crowd ? VST_NVC : VST_NVG, + blk, idx, nvgc, + XIVE_VST_WORD_ALL); +} + static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type) { switch (nxc_type) { @@ -2407,6 +2424,8 @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data) xrc->write_end = pnv_xive2_write_end; xrc->get_nvp = pnv_xive2_get_nvp; xrc->write_nvp = pnv_xive2_write_nvp; + xrc->get_nvgc = pnv_xive2_get_nvgc; + xrc->write_nvgc = pnv_xive2_write_nvgc; xrc->get_config = pnv_xive2_get_config; xrc->get_block_id = pnv_xive2_get_block_id; @@ -2497,8 +2516,9 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf) Xive2Eas eas; Xive2End end; Xive2Nvp nvp; + Xive2Nvgc nvgc; int i; - uint64_t xive_nvp_per_subpage; + uint64_t entries_per_subpage; g_string_append_printf(buf, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, srcno0 + nr_esbs - 1); @@ -2530,10 +2550,28 @@ void pnv_xive2_pic_print_info(PnvXive2 *xive, GString *buf) g_string_append_printf(buf, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk, 0, XIVE2_NVP_COUNT - 1); - xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP); - for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) { + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) { xive2_nvp_pic_print_info(&nvp, i++, buf); } } + + g_string_append_printf(buf, "XIVE[%x] #%d NVGT %08x .. %08x\n", + chip_id, blk, 0, XIVE2_NVP_COUNT - 1); + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVG); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { + while (!xive2_router_get_nvgc(xrtr, false, blk, i, &nvgc)) { + xive2_nvgc_pic_print_info(&nvgc, i++, buf); + } + } + + g_string_append_printf(buf, "XIVE[%x] #%d NVCT %08x .. %08x\n", + chip_id, blk, 0, XIVE2_NVP_COUNT - 1); + entries_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVC); + for (i = 0; i < XIVE2_NVP_COUNT; i += entries_per_subpage) { + while (!xive2_router_get_nvgc(xrtr, true, blk, i, &nvgc)) { + xive2_nvgc_pic_print_info(&nvgc, i++, buf); + } + } } diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index 5789062379..7a86197fc9 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -720,7 +720,7 @@ int kvmppc_xive_connect(SpaprInterruptController *intc, uint32_t nr_servers, { SpaprXive *xive = SPAPR_XIVE(intc); XiveSource *xsrc = &xive->source; - size_t esb_len = xive_source_esb_len(xsrc); + uint64_t esb_len = xive_source_esb_len(xsrc); size_t tima_len = 4ull << TM_SHIFT; CPUState *cs; int fd; @@ -824,7 +824,7 @@ void kvmppc_xive_disconnect(SpaprInterruptController *intc) { SpaprXive *xive = SPAPR_XIVE(intc); XiveSource *xsrc; - size_t esb_len; + uint64_t esb_len; assert(xive->fd != -1); diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 6f4d5271ea..e893363dc9 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -335,22 +335,6 @@ static void icp_realize(DeviceState *dev, Error **errp) return; } } - /* - * The way that pre_2_10_icp is handling is really, really hacky. - * We used to have here this call: - * - * vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp); - * - * But we were doing: - * pre_2_10_vmstate_register_dummy_icp() - * this vmstate_register() - * pre_2_10_vmstate_unregister_dummy_icp() - * - * So for a short amount of time we had to vmstate entries with - * the same name. This fixes it. - */ - vmstate_replace_hack_for_ppc(NULL, icp->cs->cpu_index, - &vmstate_icp_server, icp); } static void icp_unrealize(DeviceState *dev) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 5a02dd8e02..245e4d181a 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -74,33 +74,48 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) if (regs[TM_NSR] & mask) { uint8_t cppr = regs[TM_PIPR]; + uint8_t alt_ring; + uint8_t *alt_regs; + + /* POOL interrupt uses IPB in QW2, POOL ring */ + if ((ring == TM_QW3_HV_PHYS) && (nsr & (TM_QW3_NSR_HE_POOL << 6))) { + alt_ring = TM_QW2_HV_POOL; + } else { + alt_ring = ring; + } + alt_regs = &tctx->regs[alt_ring]; regs[TM_CPPR] = cppr; /* Reset the pending buffer bit */ - regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); - regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); + alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); /* Drop Exception bit */ regs[TM_NSR] &= ~mask; - trace_xive_tctx_accept(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], + trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, + alt_regs[TM_IPB], regs[TM_PIPR], regs[TM_CPPR], regs[TM_NSR]); } - return (nsr << 8) | regs[TM_CPPR]; + return ((uint64_t)nsr << 8) | regs[TM_CPPR]; } static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) { + /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ + uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; + uint8_t *alt_regs = &tctx->regs[alt_ring]; uint8_t *regs = &tctx->regs[ring]; - if (regs[TM_PIPR] < regs[TM_CPPR]) { + if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { switch (ring) { case TM_QW1_OS: regs[TM_NSR] |= TM_QW1_NSR_EO; break; + case TM_QW2_HV_POOL: + alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6); + break; case TM_QW3_HV_PHYS: regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); break; @@ -108,26 +123,27 @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) g_assert_not_reached(); } trace_xive_tctx_notify(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - regs[TM_CPPR], regs[TM_NSR]); + regs[TM_IPB], alt_regs[TM_PIPR], + alt_regs[TM_CPPR], alt_regs[TM_NSR]); qemu_irq_raise(xive_tctx_output(tctx, ring)); } } -void xive_tctx_reset_os_signal(XiveTCTX *tctx) +void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring) { /* - * Lower the External interrupt. Used when pulling an OS - * context. It is necessary to avoid catching it in the hypervisor - * context. It should be raised again when re-pushing the OS - * context. + * Lower the External interrupt. Used when pulling a context. It is + * necessary to avoid catching it in the higher privilege context. It + * should be raised again when re-pushing the lower privilege context. */ - qemu_irq_lower(xive_tctx_output(tctx, TM_QW1_OS)); + qemu_irq_lower(xive_tctx_output(tctx, ring)); } static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) { uint8_t *regs = &tctx->regs[ring]; + uint8_t pipr_min; + uint8_t ring_min; trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, regs[TM_IPB], regs[TM_PIPR], @@ -139,8 +155,37 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) tctx->regs[ring + TM_CPPR] = cppr; + /* + * Recompute the PIPR based on local pending interrupts. The PHYS + * ring must take the minimum of both the PHYS and POOL PIPR values. + */ + pipr_min = ipb_to_pipr(regs[TM_IPB]); + ring_min = ring; + + /* PHYS updates also depend on POOL values */ + if (ring == TM_QW3_HV_PHYS) { + uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; + + /* POOL values only matter if POOL ctx is valid */ + if (pool_regs[TM_WORD2] & 0x80) { + + uint8_t pool_pipr = ipb_to_pipr(pool_regs[TM_IPB]); + + /* + * Determine highest priority interrupt and + * remember which ring has it. + */ + if (pool_pipr < pipr_min) { + pipr_min = pool_pipr; + ring_min = TM_QW2_HV_POOL; + } + } + } + + regs[TM_PIPR] = pipr_min; + /* CPPR has changed, check if we need to raise a pending exception */ - xive_tctx_notify(tctx, ring); + xive_tctx_notify(tctx, ring_min); } void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) @@ -179,6 +224,17 @@ static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, return qw2w2; } +static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; + uint8_t qw3b8; + + qw3b8 = qw3b8_prev & ~TM_QW3B8_VT; + tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8; + return qw3b8; +} + static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { @@ -207,14 +263,14 @@ static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, static const uint8_t xive_tm_hw_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ - 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ }; static const uint8_t xive_tm_hv_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ - 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ + 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ }; @@ -341,6 +397,19 @@ static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } +static void xive_tctx_set_lgs(XiveTCTX *tctx, uint8_t ring, uint8_t lgs) +{ + uint8_t *regs = &tctx->regs[ring]; + + regs[TM_LGS] = lgs; +} + +static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff); +} + /* * Adjust the IPB to allow a CPU to process event queues of other * priorities during one physical interrupt cycle. @@ -400,7 +469,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0); xive_tctx_set_os_cam(tctx, qw1w2_new); - xive_tctx_reset_os_signal(tctx); + xive_tctx_reset_signal(tctx, TM_QW1_OS); return qw1w2; } @@ -488,20 +557,34 @@ static const XiveTmOp xive_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, + xive_tm_vt_poll }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, + xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, + NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, + xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, + xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, + xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, + xive_tm_pull_phys_ctx }, }; static const XiveTmOp xive2_tm_operations[] = { @@ -509,20 +592,50 @@ static const XiveTmOp xive2_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx, + NULL }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, + NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, + xive_tm_vt_poll }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target, + NULL }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, + xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, + NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL, + xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, + xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, + xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, + xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, + xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol, + NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL, + xive_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, + xive_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol, + NULL }, }; static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset, @@ -718,6 +831,10 @@ void xive_tctx_reset(XiveTCTX *tctx) tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; + if (!(xive_presenter_get_config(tctx->xptr) & + XIVE_PRESENTER_GEN1_TIMA_OS)) { + tctx->regs[TM_QW1_OS + TM_OGEN] = 2; + } /* * Initialize PIPR to 0xFF to avoid phantom interrupts when the @@ -1242,7 +1359,7 @@ static void xive_source_reset(void *dev) static void xive_source_realize(DeviceState *dev, Error **errp) { XiveSource *xsrc = XIVE_SOURCE(dev); - size_t esb_len = xive_source_esb_len(xsrc); + uint64_t esb_len = xive_source_esb_len(xsrc); assert(xsrc->xive); diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c index 1f150685bf..d1df35e9b3 100644 --- a/hw/intc/xive2.c +++ b/hw/intc/xive2.c @@ -26,6 +26,43 @@ uint32_t xive2_router_get_config(Xive2Router *xrtr) return xrc->get_config(xrtr); } +static int xive2_router_get_block_id(Xive2Router *xrtr) +{ + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); + + return xrc->get_block_id(xrtr); +} + +static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp) +{ + uint64_t cache_addr; + + cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 | + xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7); + cache_addr <<= 8; /* aligned on a cache line pair */ + return cache_addr; +} + +static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority) +{ + uint32_t val = 0; + uint8_t *ptr, i; + + if (priority > 7) { + return 0; + } + + /* + * The per-priority backlog counters are 24-bit and the structure + * is stored in big endian + */ + ptr = (uint8_t *)&nvgc->w2 + priority * 3; + for (i = 0; i < 3; i++, ptr++) { + val = (val << 8) + *ptr; + } + return val; +} + void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) { if (!xive2_eas_is_valid(eas)) { @@ -144,14 +181,20 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) { uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5); uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5); + uint64_t cache_line = xive2_nvp_reporting_addr(nvp); if (!xive2_nvp_is_valid(nvp)) { return; } - g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x", + g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x PGoFirst:%02x", nvp_idx, eq_blk, eq_idx, - xive_get_field32(NVP2_W2_IPB, nvp->w2)); + xive_get_field32(NVP2_W2_IPB, nvp->w2), + xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0)); + if (cache_line) { + g_string_append_printf(buf, " reporting CL:%016"PRIx64, cache_line); + } + /* * When the NVP is HW controlled, more fields are updated */ @@ -166,6 +209,23 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf) g_string_append_c(buf, '\n'); } +void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf) +{ + uint8_t i; + + if (!xive2_nvgc_is_valid(nvgc)) { + return; + } + + g_string_append_printf(buf, " %08x PGoNext:%02x bklog: ", nvgc_idx, + xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0)); + for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { + g_string_append_printf(buf, "[%d]=0x%x ", + i, xive2_nvgc_get_backlog(nvgc, i)); + } + g_string_append_printf(buf, "\n"); +} + static void xive2_end_enqueue(Xive2End *end, uint32_t data) { uint64_t qaddr_base = xive2_end_qaddr(end); @@ -210,13 +270,14 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) * the NVP by changing the H bit while the context is enabled */ -static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, - uint8_t nvp_blk, uint32_t nvp_idx) +static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t nvp_blk, uint32_t nvp_idx, + uint8_t ring) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; Xive2Nvp nvp; - uint8_t *regs = &tctx->regs[TM_QW1_OS]; + uint8_t *regs = &tctx->regs[ring]; if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", @@ -261,44 +322,190 @@ static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); } -static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk, - uint32_t *nvp_idx, bool *vo, bool *ho) +static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk, + uint32_t *nvp_idx, bool *valid, bool *hw) { *nvp_blk = xive2_nvp_blk(cam); *nvp_idx = xive2_nvp_idx(cam); - *vo = !!(cam & TM2_QW1W2_VO); - *ho = !!(cam & TM2_QW1W2_HO); + *valid = !!(cam & TM2_W2_VALID); + *hw = !!(cam & TM2_W2_HW); } -uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, - hwaddr offset, unsigned size) +/* + * Encode the HW CAM line with 7bit or 8bit thread id. The thread id + * width and block id width is configurable at the IC level. + * + * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) + * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) + */ +static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) { Xive2Router *xrtr = XIVE2_ROUTER(xptr); - uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); - uint32_t qw1w2_new; - uint32_t cam = be32_to_cpu(qw1w2); + CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; + uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t blk = xive2_router_get_block_id(xrtr); + uint8_t tid_shift = + xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; + uint8_t tid_mask = (1 << tid_shift) - 1; + + return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); +} + +static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size, uint8_t ring) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]); + uint32_t cam = be32_to_cpu(target_ringw2); uint8_t nvp_blk; uint32_t nvp_idx; - bool vo; + uint8_t cur_ring; + bool valid; bool do_save; - xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save); + xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save); - if (!vo) { + if (!valid) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", nvp_blk, nvp_idx); } - /* Invalidate CAM line */ - qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0); - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4); + /* Invalidate CAM line of requested ring and all lower rings */ + for (cur_ring = TM_QW0_USER; cur_ring <= ring; + cur_ring += XIVE_TM_RING_SIZE) { + uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]); + uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0); + memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4); + } if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { - xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx); + xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring); + } + + /* + * Lower external interrupt line of requested ring and below except for + * USER, which doesn't exist. + */ + for (cur_ring = TM_QW1_OS; cur_ring <= ring; + cur_ring += XIVE_TM_RING_SIZE) { + xive_tctx_reset_signal(tctx, cur_ring); + } + return target_ringw2; +} + +uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS); +} + +#define REPORT_LINE_GEN1_SIZE 16 + +static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data, + uint8_t size) +{ + uint8_t *regs = tctx->regs; + + g_assert(size == REPORT_LINE_GEN1_SIZE); + memset(data, 0, size); + /* + * See xive architecture for description of what is saved. It is + * hand-picked information to fit in 16 bytes. + */ + data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR]; + data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR]; + data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB]; + data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB]; + data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT]; + data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS]; + data[0x6] = 0xFF; + data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80; + data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1; + data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2; + data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3); + data[0x8] = regs[TM_QW1_OS + TM_NSR]; + data[0x9] = regs[TM_QW1_OS + TM_CPPR]; + data[0xA] = regs[TM_QW1_OS + TM_IPB]; + data[0xB] = regs[TM_QW1_OS + TM_LGS]; + if (regs[TM_QW0_USER + TM_WORD2] & 0x80) { + /* + * Logical server extension, except VU bit replaced by EB bit + * from NSR + */ + data[0xC] = regs[TM_QW0_USER + TM_WORD2]; + data[0xC] &= ~0x80; + data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80; + data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1]; + data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2]; + data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3]; } +} + +static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, + unsigned size, uint8_t ring) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t hw_cam, nvp_idx, xive2_cfg, reserved; + uint8_t nvp_blk; + Xive2Nvp nvp; + uint64_t phys_addr; + MemTxResult result; - xive_tctx_reset_os_signal(tctx); - return qw1w2; + hw_cam = xive2_tctx_hw_cam_line(xptr, tctx); + nvp_blk = xive2_nvp_blk(hw_cam); + nvp_idx = xive2_nvp_idx(hw_cam); + + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + xive2_cfg = xive2_router_get_config(xrtr); + + phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */ + if (xive2_cfg & XIVE2_GEN1_TIMA_OS) { + uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE]; + + xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE); + result = dma_memory_write(&address_space_memory, phys_addr, + pull_ctxt, REPORT_LINE_GEN1_SIZE, + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + } else { + result = dma_memory_write(&address_space_memory, phys_addr, + &tctx->regs, sizeof(tctx->regs), + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + reserved = 0xFFFFFFFF; + result = dma_memory_write(&address_space_memory, phys_addr + 12, + &reserved, sizeof(reserved), + MEMTXATTRS_UNSPECIFIED); + assert(result == MEMTX_OK); + } + + /* the rest is similar to pull context to registers */ + xive2_tm_pull_ctx(xptr, tctx, offset, size, ring); +} + +void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS); +} + + +void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); } static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, @@ -390,17 +597,31 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - uint32_t cam = value; - uint32_t qw1w2 = cpu_to_be32(cam); + uint32_t cam; + uint32_t qw1w2; + uint64_t qw1dw1; uint8_t nvp_blk; uint32_t nvp_idx; bool vo; bool do_restore; - xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); - /* First update the thead context */ - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); + switch (size) { + case 4: + cam = value; + qw1w2 = cpu_to_be32(cam); + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); + break; + case 8: + cam = value >> 32; + qw1dw1 = cpu_to_be64(value); + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8); + break; + default: + g_assert_not_reached(); + } + + xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); /* Check the interrupt pending bits */ if (vo) { @@ -409,6 +630,19 @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, } } +static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) +{ + uint8_t *regs = &tctx->regs[ring]; + + regs[TM_T] = target; +} + +void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff); +} + /* * XIVE Router (aka. Virtualization Controller or IVRE) */ @@ -471,31 +705,22 @@ int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number); } -static int xive2_router_get_block_id(Xive2Router *xrtr) +int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc) { Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); - return xrc->get_block_id(xrtr); + return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); } -/* - * Encode the HW CAM line with 7bit or 8bit thread id. The thread id - * width and block id width is configurable at the IC level. - * - * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit) - * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit) - */ -static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) +int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc) { - Xive2Router *xrtr = XIVE2_ROUTER(xptr); - CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; - uint32_t pir = env->spr_cb[SPR_PIR].default_value; - uint8_t blk = xive2_router_get_block_id(xrtr); - uint8_t tid_shift = - xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7; - uint8_t tid_mask = (1 << tid_shift) - 1; + Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); - return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); + return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); } /* diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c index 235ac40aeb..5cf754b38f 100644 --- a/hw/mem/cxl_type3.c +++ b/hw/mem/cxl_type3.c @@ -17,6 +17,7 @@ #include "hw/mem/pc-dimm.h" #include "hw/pci/pci.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "qapi/error.h" #include "qemu/log.h" #include "qemu/module.h" @@ -919,16 +920,15 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp) ct3d->patrol_scrub_attrs.scrub_flags = CXL_MEMDEV_PS_ENABLE_DEFAULT; /* Set default value for DDR5 ECS read attributes */ + ct3d->ecs_attrs.ecs_log_cap = CXL_ECS_LOG_ENTRY_TYPE_DEFAULT; for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) { - ct3d->ecs_attrs[count].ecs_log_cap = - CXL_ECS_LOG_ENTRY_TYPE_DEFAULT; - ct3d->ecs_attrs[count].ecs_cap = + ct3d->ecs_attrs.fru_attrs[count].ecs_cap = CXL_ECS_REALTIME_REPORT_CAP_DEFAULT; - ct3d->ecs_attrs[count].ecs_config = + ct3d->ecs_attrs.fru_attrs[count].ecs_config = CXL_ECS_THRESHOLD_COUNT_DEFAULT | (CXL_ECS_MODE_DEFAULT << 3); /* Reserved */ - ct3d->ecs_attrs[count].ecs_flags = 0; + ct3d->ecs_attrs.fru_attrs[count].ecs_flags = 0; } return; @@ -1200,6 +1200,7 @@ static void ct3d_reset(DeviceState *dev) uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers; uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask; + pcie_cap_fill_link_ep_usp(PCI_DEVICE(dev), ct3d->width, ct3d->speed); cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE); cxl_device_register_init_t3(ct3d); @@ -1229,6 +1230,10 @@ static Property ct3_props[] = { DEFINE_PROP_UINT8("num-dc-regions", CXLType3Dev, dc.num_regions, 0), DEFINE_PROP_LINK("volatile-dc-memdev", CXLType3Dev, dc.host_dc, TYPE_MEMORY_BACKEND, HostMemoryBackend *), + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLType3Dev, + speed, PCIE_LINK_SPEED_32), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLType3Dev, + width, PCIE_LINK_WIDTH_16), DEFINE_PROP_END_OF_LIST(), }; @@ -1375,9 +1380,7 @@ void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length, ct3d = CXL_TYPE3(obj); QLIST_FOREACH(p, &ct3d->poison_list, node) { - if (((start >= p->start) && (start < p->start + p->length)) || - ((start + length > p->start) && - (start + length <= p->start + p->length))) { + if ((start < p->start + p->length) && (start + length > p->start)) { error_setg(errp, "Overlap with existing poisoned region not supported"); return; @@ -2060,11 +2063,11 @@ static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path, stw_le_p(&dCap.host_id, hid); /* only valid for DC_REGION_CONFIG_UPDATED event */ dCap.updated_region_id = 0; - dCap.flags = 0; for (i = 0; i < num_extents; i++) { memcpy(&dCap.dynamic_capacity_extent, &extents[i], sizeof(CXLDCExtentRaw)); + dCap.flags = 0; if (i < num_extents - 1) { /* Set "More" flag */ dCap.flags |= BIT(0); diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c index b4183c5267..61e47d8398 100644 --- a/hw/microblaze/petalogix_ml605_mmu.c +++ b/hw/microblaze/petalogix_ml605_mmu.c @@ -90,7 +90,7 @@ petalogix_ml605_init(MachineState *machine) object_property_set_int(OBJECT(cpu), "use-fpu", 1, &error_abort); object_property_set_bool(OBJECT(cpu), "dcache-writeback", true, &error_abort); - object_property_set_bool(OBJECT(cpu), "endianness", true, &error_abort); + object_property_set_bool(OBJECT(cpu), "little-endian", true, &error_abort); qdev_realize(DEVICE(cpu), NULL, &error_abort); /* Attach emulated BRAM through the LMB. */ @@ -213,7 +213,12 @@ petalogix_ml605_init(MachineState *machine) static void petalogix_ml605_machine_init(MachineClass *mc) { - mc->desc = "PetaLogix linux refdesign for xilinx ml605 little endian"; +#if TARGET_BIG_ENDIAN + mc->desc = "PetaLogix linux refdesign for xilinx ml605 (big endian)"; + mc->deprecation_reason = "big endian support is not tested"; +#else + mc->desc = "PetaLogix linux refdesign for xilinx ml605 (little endian)"; +#endif mc->init = petalogix_ml605_init; } diff --git a/hw/microblaze/petalogix_s3adsp1800_mmu.c b/hw/microblaze/petalogix_s3adsp1800_mmu.c index dad46bd7f9..6c0f5c6c65 100644 --- a/hw/microblaze/petalogix_s3adsp1800_mmu.c +++ b/hw/microblaze/petalogix_s3adsp1800_mmu.c @@ -55,6 +55,9 @@ #define ETHLITE_IRQ 1 #define UARTLITE_IRQ 3 +#define TYPE_PETALOGIX_S3ADSP1800_MACHINE \ + MACHINE_TYPE_NAME("petalogix-s3adsp1800") + static void petalogix_s3adsp1800_init(MachineState *machine) { @@ -71,6 +74,8 @@ petalogix_s3adsp1800_init(MachineState *machine) cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU)); object_property_set_str(OBJECT(cpu), "version", "7.10.d", &error_abort); + object_property_set_bool(OBJECT(cpu), "little-endian", + !TARGET_BIG_ENDIAN, &error_abort); qdev_realize(DEVICE(cpu), NULL, &error_abort); /* Attach emulated BRAM through the LMB. */ @@ -122,7 +127,7 @@ petalogix_s3adsp1800_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, ETHLITE_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[ETHLITE_IRQ]); - create_unimplemented_device("gpio", GPIO_BASEADDR, 0x10000); + create_unimplemented_device("xps_gpio", GPIO_BASEADDR, 0x10000); microblaze_load_kernel(cpu, ddr_base, ram_size, machine->initrd_filename, @@ -130,11 +135,21 @@ petalogix_s3adsp1800_init(MachineState *machine) NULL); } -static void petalogix_s3adsp1800_machine_init(MachineClass *mc) +static void petalogix_s3adsp1800_machine_class_init(ObjectClass *oc, void *data) { + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "PetaLogix linux refdesign for xilinx Spartan 3ADSP1800"; mc->init = petalogix_s3adsp1800_init; mc->is_default = true; } -DEFINE_MACHINE("petalogix-s3adsp1800", petalogix_s3adsp1800_machine_init) +static const TypeInfo petalogix_s3adsp1800_machine_types[] = { + { + .name = TYPE_PETALOGIX_S3ADSP1800_MACHINE, + .parent = TYPE_MACHINE, + .class_init = petalogix_s3adsp1800_machine_class_init, + }, +}; + +DEFINE_TYPES(petalogix_s3adsp1800_machine_types) diff --git a/hw/microblaze/xlnx-zynqmp-pmu.c b/hw/microblaze/xlnx-zynqmp-pmu.c index 1bfc9641d2..567aad47bf 100644 --- a/hw/microblaze/xlnx-zynqmp-pmu.c +++ b/hw/microblaze/xlnx-zynqmp-pmu.c @@ -90,7 +90,7 @@ static void xlnx_zynqmp_pmu_soc_realize(DeviceState *dev, Error **errp) object_property_set_bool(OBJECT(&s->cpu), "use-pcmp-instr", true, &error_abort); object_property_set_bool(OBJECT(&s->cpu), "use-mmu", false, &error_abort); - object_property_set_bool(OBJECT(&s->cpu), "endianness", true, + object_property_set_bool(OBJECT(&s->cpu), "little-endian", true, &error_abort); object_property_set_str(OBJECT(&s->cpu), "version", "8.40.b", &error_abort); @@ -181,9 +181,13 @@ static void xlnx_zynqmp_pmu_init(MachineState *machine) static void xlnx_zynqmp_pmu_machine_init(MachineClass *mc) { - mc->desc = "Xilinx ZynqMP PMU machine"; +#if TARGET_BIG_ENDIAN + mc->desc = "Xilinx ZynqMP PMU machine (big endian)"; + mc->deprecation_reason = "big endian support is not tested"; +#else + mc->desc = "Xilinx ZynqMP PMU machine (little endian)"; +#endif mc->init = xlnx_zynqmp_pmu_init; } DEFINE_MACHINE("xlnx-zynqmp-pmu", xlnx_zynqmp_pmu_machine_init) - diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c index 3fdd16ef2e..d8076e7be4 100644 --- a/hw/net/fsl_etsec/etsec.c +++ b/hw/net/fsl_etsec/etsec.c @@ -36,7 +36,6 @@ #include "registers.h" #include "qapi/error.h" #include "qemu/log.h" -#include "qemu/module.h" /* #define HEX_DUMP */ /* #define DEBUG_REGISTER */ @@ -431,17 +430,14 @@ static void etsec_class_init(ObjectClass *klass, void *data) dc->user_creatable = true; } -static const TypeInfo etsec_info = { - .name = TYPE_ETSEC_COMMON, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(eTSEC), - .class_init = etsec_class_init, - .instance_init = etsec_instance_init, +static const TypeInfo etsec_types[] = { + { + .name = TYPE_ETSEC_COMMON, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(eTSEC), + .class_init = etsec_class_init, + .instance_init = etsec_instance_init, + }, }; -static void etsec_register_types(void) -{ - type_register_static(&etsec_info); -} - -type_init(etsec_register_types) +DEFINE_TYPES(etsec_types) diff --git a/hw/net/fsl_etsec/miim.c b/hw/net/fsl_etsec/miim.c index b48d2cb57b..4e9169907a 100644 --- a/hw/net/fsl_etsec/miim.c +++ b/hw/net/fsl_etsec/miim.c @@ -29,13 +29,6 @@ /* #define DEBUG_MIIM */ -#define MIIM_CONTROL 0 -#define MIIM_STATUS 1 -#define MIIM_PHY_ID_1 2 -#define MIIM_PHY_ID_2 3 -#define MIIM_T2_STATUS 10 -#define MIIM_EXT_STATUS 15 - static void miim_read_cycle(eTSEC *etsec) { uint8_t phy; @@ -47,14 +40,14 @@ static void miim_read_cycle(eTSEC *etsec) addr = etsec->regs[MIIMADD].value & 0x1F; switch (addr) { - case MIIM_CONTROL: + case MII_BMCR: value = etsec->phy_control; break; - case MIIM_STATUS: + case MII_BMSR: value = etsec->phy_status; break; - case MIIM_T2_STATUS: - value = 0x1800; /* Local and remote receivers OK */ + case MII_STAT1000: + value = MII_STAT1000_LOK | MII_STAT1000_ROK; break; default: value = 0x0; @@ -84,8 +77,8 @@ static void miim_write_cycle(eTSEC *etsec) #endif switch (addr) { - case MIIM_CONTROL: - etsec->phy_control = value & ~(0x8100); + case MII_BMCR: + etsec->phy_control = value & ~(MII_BMCR_RESET | MII_BMCR_FD); break; default: break; diff --git a/hw/net/npcm_gmac.c b/hw/net/npcm_gmac.c index 6fa6bece61..685905f9e2 100644 --- a/hw/net/npcm_gmac.c +++ b/hw/net/npcm_gmac.c @@ -546,9 +546,8 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac) /* 1 = DMA Owned, 0 = Software Owned */ if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) { - qemu_log_mask(LOG_GUEST_ERROR, - "TX Descriptor @ 0x%x is owned by software\n", - desc_addr); + trace_npcm_gmac_tx_desc_owner(DEVICE(gmac)->canonical_path, + desc_addr); gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TU; gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT, NPCM_DMA_STATUS_TX_SUSPENDED_STATE); diff --git a/hw/net/trace-events b/hw/net/trace-events index 91a3d0c054..d0f1d8c0fb 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -478,6 +478,7 @@ npcm_gmac_packet_received(const char* name, uint32_t len) "%s: Reception finishe npcm_gmac_packet_sent(const char* name, uint16_t len) "%s: TX packet sent!, length: 0x%04" PRIX16 npcm_gmac_debug_desc_data(const char* name, void* addr, uint32_t des0, uint32_t des1, uint32_t des2, uint32_t des3)"%s: Address: %p Descriptor 0: 0x%04" PRIX32 " Descriptor 1: 0x%04" PRIX32 "Descriptor 2: 0x%04" PRIX32 " Descriptor 3: 0x%04" PRIX32 npcm_gmac_packet_tx_desc_data(const char* name, uint32_t tdes0, uint32_t tdes1) "%s: Tdes0: 0x%04" PRIX32 " Tdes1: 0x%04" PRIX32 +npcm_gmac_tx_desc_owner(const char* name, uint32_t desc_addr) "%s: TX Descriptor @0x%04" PRIX32 " is owned by software" # npcm_pcs.c npcm_pcs_reg_read(const char *name, uint16_t indirect_access_baes, uint64_t offset, uint16_t value) "%s: IND: 0x%02" PRIx16 " offset: 0x%04" PRIx64 " value: 0x%04" PRIx16 diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index f4e89203c1..8e4612e035 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -5692,6 +5692,33 @@ static uint16_t nvme_identify_sec_ctrl_list(NvmeCtrl *n, NvmeRequest *req) return nvme_c2h(n, (uint8_t *)&list, sizeof(list), req); } +static uint16_t nvme_identify_ns_ind(NvmeCtrl *n, NvmeRequest *req, bool alloc) +{ + NvmeNamespace *ns; + NvmeIdentify *c = (NvmeIdentify *)&req->cmd; + uint32_t nsid = le32_to_cpu(c->nsid); + + trace_pci_nvme_identify_ns_ind(nsid); + + if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) { + return NVME_INVALID_NSID | NVME_DNR; + } + + ns = nvme_ns(n, nsid); + if (unlikely(!ns)) { + if (alloc) { + ns = nvme_subsys_ns(n->subsys, nsid); + if (!ns) { + return nvme_rpt_empty_id_struct(n, req); + } + } else { + return nvme_rpt_empty_id_struct(n, req); + } + } + + return nvme_c2h(n, (uint8_t *)&ns->id_ns_ind, sizeof(NvmeIdNsInd), req); +} + static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, bool active) { @@ -5946,6 +5973,10 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) return nvme_identify_sec_ctrl_list(n, req); case NVME_ID_CNS_CS_NS: return nvme_identify_ns_csi(n, req, true); + case NVME_ID_CNS_CS_IND_NS: + return nvme_identify_ns_ind(n, req, false); + case NVME_ID_CNS_CS_IND_NS_ALLOCATED: + return nvme_identify_ns_ind(n, req, true); case NVME_ID_CNS_CS_NS_PRESENT: return nvme_identify_ns_csi(n, req, false); case NVME_ID_CNS_CTRL: diff --git a/hw/nvme/dif.c b/hw/nvme/dif.c index 01b19c3373..2805128498 100644 --- a/hw/nvme/dif.c +++ b/hw/nvme/dif.c @@ -575,11 +575,6 @@ uint16_t nvme_dif_rw(NvmeCtrl *n, NvmeRequest *req) uint8_t *mbuf, *end; int16_t pil = ns->lbaf.ms - nvme_pi_tuple_size(ns); - status = nvme_check_prinfo(ns, prinfo, slba, reftag); - if (status) { - goto err; - } - flags = 0; ctx->mdata.bounce = g_malloc0(mlen); diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c index ea8db175db..526e15aa80 100644 --- a/hw/nvme/ns.c +++ b/hw/nvme/ns.c @@ -30,6 +30,7 @@ void nvme_ns_init_format(NvmeNamespace *ns) { NvmeIdNs *id_ns = &ns->id_ns; + NvmeIdNsNvm *id_ns_nvm = &ns->id_ns_nvm; BlockDriverInfo bdi; int npdg, ret; int64_t nlbas; @@ -55,6 +56,8 @@ void nvme_ns_init_format(NvmeNamespace *ns) } id_ns->npda = id_ns->npdg = npdg - 1; + id_ns_nvm->npdal = npdg; + id_ns_nvm->npdgl = npdg; } static int nvme_ns_init(NvmeNamespace *ns, Error **errp) @@ -62,6 +65,7 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) static uint64_t ns_count; NvmeIdNs *id_ns = &ns->id_ns; NvmeIdNsNvm *id_ns_nvm = &ns->id_ns_nvm; + NvmeIdNsInd *id_ns_ind = &ns->id_ns_ind; uint8_t ds; uint16_t ms; int i; @@ -72,10 +76,12 @@ static int nvme_ns_init(NvmeNamespace *ns, Error **errp) ns->id_ns.dlfeat = 0x1; /* support DULBE and I/O optimization fields */ - id_ns->nsfeat |= (0x4 | 0x10); + id_ns->nsfeat |= (NVME_ID_NS_NSFEAT_DAE | NVME_ID_NS_NSFEAT_OPTPERF_ALL); if (ns->params.shared) { - id_ns->nmic |= NVME_NMIC_NS_SHARED; + id_ns->nmic |= NVME_ID_NS_IND_NMIC_SHRNS; + id_ns_ind->nmic = NVME_ID_NS_IND_NMIC_SHRNS; + id_ns_ind->nstat = NVME_ID_NS_IND_NSTAT_NRDY; } /* Substitute a missing EUI-64 by an autogenerated one */ @@ -770,6 +776,7 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp) subsys->namespaces[nsid] = ns; ns->id_ns.endgid = cpu_to_le16(0x1); + ns->id_ns_ind.endgrpid = cpu_to_le16(0x1); if (ns->params.detached) { return; diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h index 7566b316d1..7242206910 100644 --- a/hw/nvme/nvme.h +++ b/hw/nvme/nvme.h @@ -233,6 +233,7 @@ typedef struct NvmeNamespace { int64_t moff; NvmeIdNs id_ns; NvmeIdNsNvm id_ns_nvm; + NvmeIdNsInd id_ns_ind; NvmeLBAF lbaf; unsigned int nlbaf; size_t lbasz; diff --git a/hw/nvme/trace-events b/hw/nvme/trace-events index 3a67680c6a..6be0bfa1c1 100644 --- a/hw/nvme/trace-events +++ b/hw/nvme/trace-events @@ -56,6 +56,7 @@ pci_nvme_identify(uint16_t cid, uint8_t cns, uint16_t ctrlid, uint8_t csi) "cid pci_nvme_identify_ctrl(void) "identify controller" pci_nvme_identify_ctrl_csi(uint8_t csi) "identify controller, csi=0x%"PRIx8"" pci_nvme_identify_ns(uint32_t ns) "nsid %"PRIu32"" +pci_nvme_identify_ns_ind(uint32_t nsid) "nsid %"PRIu32"" pci_nvme_identify_ctrl_list(uint8_t cns, uint16_t cntid) "cns 0x%"PRIx8" cntid %"PRIu16"" pci_nvme_identify_pri_ctrl_cap(uint16_t cntlid) "identify primary controller capabilities cntlid=%"PRIu16"" pci_nvme_identify_sec_ctrl_list(uint16_t cntlid, uint8_t numcntl) "identify secondary controller list cntlid=%"PRIu16" numcntl=%"PRIu8"" diff --git a/hw/pci-bridge/cxl_downstream.c b/hw/pci-bridge/cxl_downstream.c index 4b42984360..c347ac06f3 100644 --- a/hw/pci-bridge/cxl_downstream.c +++ b/hw/pci-bridge/cxl_downstream.c @@ -13,6 +13,8 @@ #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "hw/pci/pcie_port.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/cxl/cxl.h" #include "qapi/error.h" @@ -210,24 +212,20 @@ static void cxl_dsp_exitfn(PCIDevice *d) pci_bridge_exitfn(d); } -static void cxl_dsp_instance_post_init(Object *obj) -{ - PCIESlot *s = PCIE_SLOT(obj); - - if (!s->speed) { - s->speed = QEMU_PCI_EXP_LNK_2_5GT; - } - - if (!s->width) { - s->width = QEMU_PCI_EXP_LNK_X1; - } -} +static Property cxl_dsp_props[] = { + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", PCIESlot, + speed, PCIE_LINK_SPEED_64), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot, + width, PCIE_LINK_WIDTH_16), + DEFINE_PROP_END_OF_LIST() +}; static void cxl_dsp_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PCIDeviceClass *k = PCI_DEVICE_CLASS(oc); + device_class_set_props(dc, cxl_dsp_props); k->config_write = cxl_dsp_config_write; k->realize = cxl_dsp_realize; k->exit = cxl_dsp_exitfn; @@ -243,7 +241,6 @@ static const TypeInfo cxl_dsp_info = { .name = TYPE_CXL_DSP, .instance_size = sizeof(CXLDownstreamPort), .parent = TYPE_PCIE_SLOT, - .instance_post_init = cxl_dsp_instance_post_init, .class_init = cxl_dsp_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, diff --git a/hw/pci-bridge/cxl_root_port.c b/hw/pci-bridge/cxl_root_port.c index 2dd10239bd..5e2156d7ba 100644 --- a/hw/pci-bridge/cxl_root_port.c +++ b/hw/pci-bridge/cxl_root_port.c @@ -24,6 +24,7 @@ #include "hw/pci/pcie_port.h" #include "hw/pci/msi.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/sysbus.h" #include "qapi/error.h" #include "hw/cxl/cxl.h" @@ -206,6 +207,10 @@ static Property gen_rp_props[] = { -1), DEFINE_PROP_SIZE("pref64-reserve", CXLRootPort, res_reserve.mem_pref_64, -1), + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", PCIESlot, + speed, PCIE_LINK_SPEED_64), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot, + width, PCIE_LINK_WIDTH_32), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c index a5a39cc524..55f8b0053f 100644 --- a/hw/pci-bridge/cxl_upstream.c +++ b/hw/pci-bridge/cxl_upstream.c @@ -11,6 +11,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/pci/msi.h" #include "hw/pci/pcie.h" #include "hw/pci/pcie_port.h" @@ -100,6 +101,7 @@ static void cxl_usp_reset(DeviceState *qdev) pci_bridge_reset(qdev); pcie_cap_deverr_reset(d); + pcie_cap_fill_link_ep_usp(d, usp->width, usp->speed); latch_registers(usp); } @@ -363,6 +365,10 @@ static void cxl_usp_exitfn(PCIDevice *d) static Property cxl_upstream_props[] = { DEFINE_PROP_UINT64("sn", CXLUpstreamPort, sn, UI64_NULL), DEFINE_PROP_STRING("cdat", CXLUpstreamPort, cxl_cstate.cdat.filename), + DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLUpstreamPort, + speed, PCIE_LINK_SPEED_32), + DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLUpstreamPort, + width, PCIE_LINK_WIDTH_16), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index dfaea6cbf4..07d411cff5 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -38,7 +38,6 @@ DECLARE_INSTANCE_CHECKER(PXBBus, PXB_BUS, DECLARE_INSTANCE_CHECKER(PXBBus, PXB_PCIE_BUS, TYPE_PXB_PCIE_BUS) -#define TYPE_PXB_CXL_BUS "pxb-cxl-bus" DECLARE_INSTANCE_CHECKER(PXBBus, PXB_CXL_BUS, TYPE_PXB_CXL_BUS) @@ -85,12 +84,25 @@ static uint16_t pxb_bus_numa_node(PCIBus *bus) return pxb->numa_node; } +static void prop_pxb_uid_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint32_t uid = pci_bus_num(PCI_BUS(obj)); + + visit_type_uint32(v, name, &uid, errp); +} + static void pxb_bus_class_init(ObjectClass *class, void *data) { PCIBusClass *pbc = PCI_BUS_CLASS(class); pbc->bus_num = pxb_bus_num; pbc->numa_node = pxb_bus_numa_node; + + object_class_property_add(class, "acpi_uid", "uint32", + prop_pxb_uid_get, NULL, NULL, NULL); + object_class_property_set_description(class, "acpi_uid", + "ACPI Unique ID used to distinguish this PCI Host Bridge / ACPI00016"); } static const TypeInfo pxb_bus_info = { @@ -318,7 +330,7 @@ static gint pxb_compare(gconstpointer a, gconstpointer b) 0; } -static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, +static bool pxb_dev_realize_common(PCIDevice *dev, enum BusType type, Error **errp) { PXBDev *pxb = PXB_DEV(dev); @@ -330,13 +342,13 @@ static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, if (ms->numa_state == NULL) { error_setg(errp, "NUMA is not supported by this machine-type"); - return; + return false; } if (pxb->numa_node != NUMA_NODE_UNASSIGNED && pxb->numa_node >= ms->numa_state->num_nodes) { error_setg(errp, "Illegal numa node %d", pxb->numa_node); - return; + return false; } if (dev->qdev.id && *dev->qdev.id) { @@ -382,12 +394,13 @@ static void pxb_dev_realize_common(PCIDevice *dev, enum BusType type, pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_HOST); pxb_dev_list = g_list_insert_sorted(pxb_dev_list, pxb, pxb_compare); - return; + return true; err_register_bus: object_unref(OBJECT(bds)); object_unparent(OBJECT(bus)); object_unref(OBJECT(ds)); + return false; } static void pxb_dev_realize(PCIDevice *dev, Error **errp) @@ -488,7 +501,9 @@ static void pxb_cxl_dev_realize(PCIDevice *dev, Error **errp) return; } - pxb_dev_realize_common(dev, CXL, errp); + if (!pxb_dev_realize_common(dev, CXL, errp)) { + return; + } pxb_cxl_dev_reset(DEVICE(dev)); } diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c index 391fabb8a8..e8b4c64c5f 100644 --- a/hw/pci-host/gpex-acpi.c +++ b/hw/pci-host/gpex-acpi.c @@ -141,6 +141,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); + uint32_t uid; bool is_cxl = pci_bus_is_cxl(bus); if (!pci_bus_is_root(bus)) { @@ -156,6 +157,8 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) nr_pcie_buses = bus_num; } + uid = object_property_get_uint(OBJECT(bus), "acpi_uid", + &error_fatal); dev = aml_device("PC%.02X", bus_num); if (is_cxl) { struct Aml *pkg = aml_package(2); @@ -168,7 +171,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) aml_append(dev, aml_name_decl("_CID", aml_string("PNP0A03"))); } aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); - aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); + aml_append(dev, aml_name_decl("_UID", aml_int(uid))); aml_append(dev, aml_name_decl("_STR", aml_unicode("pxb Device"))); aml_append(dev, aml_name_decl("_CCA", aml_int(1))); if (numa_node != NUMA_NODE_UNASSIGNED) { diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c index 95b983b2b3..b70631045a 100644 --- a/hw/pci-host/ppce500.c +++ b/hw/pci-host/ppce500.c @@ -22,7 +22,6 @@ #include "hw/pci/pci_device.h" #include "hw/pci/pci_host.h" #include "qemu/bswap.h" -#include "qemu/module.h" #include "hw/pci-host/ppce500.h" #include "qom/object.h" @@ -475,7 +474,7 @@ static void e500_pcihost_realize(DeviceState *dev, Error **errp) address_space_init(&s->bm_as, &s->bm, "pci-bm"); pci_setup_iommu(b, &ppce500_iommu_ops, s); - pci_create_simple(b, 0, "e500-host-bridge"); + pci_create_simple(b, 0, TYPE_PPC_E500_PCI_BRIDGE); memory_region_init(&s->container, OBJECT(h), "pci-container", PCIE500_ALL_SIZE); memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_be_ops, h, @@ -508,17 +507,6 @@ static void e500_host_bridge_class_init(ObjectClass *klass, void *data) dc->user_creatable = false; } -static const TypeInfo e500_host_bridge_info = { - .name = TYPE_PPC_E500_PCI_BRIDGE, - .parent = TYPE_PCI_DEVICE, - .instance_size = sizeof(PPCE500PCIBridgeState), - .class_init = e500_host_bridge_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_CONVENTIONAL_PCI_DEVICE }, - { }, - }, -}; - static Property pcihost_properties[] = { DEFINE_PROP_UINT32("first_slot", PPCE500PCIState, first_slot, 0x11), DEFINE_PROP_UINT32("first_pin_irq", PPCE500PCIState, first_pin_irq, 0x1), @@ -535,17 +523,23 @@ static void e500_pcihost_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_ppce500_pci; } -static const TypeInfo e500_pcihost_info = { - .name = TYPE_PPC_E500_PCI_HOST_BRIDGE, - .parent = TYPE_PCI_HOST_BRIDGE, - .instance_size = sizeof(PPCE500PCIState), - .class_init = e500_pcihost_class_init, +static const TypeInfo e500_pci_types[] = { + { + .name = TYPE_PPC_E500_PCI_BRIDGE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PPCE500PCIBridgeState), + .class_init = e500_host_bridge_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, + }, + { + .name = TYPE_PPC_E500_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(PPCE500PCIState), + .class_init = e500_pcihost_class_init, + }, }; -static void e500_pci_register_types(void) -{ - type_register_static(&e500_pcihost_info); - type_register_static(&e500_host_bridge_info); -} - -type_init(e500_pci_register_types) +DEFINE_TYPES(e500_pci_types) diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 87da35ca9b..1416ae202c 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -67,6 +67,19 @@ static char *pcibus_get_fw_dev_path(DeviceState *dev); static void pcibus_reset_hold(Object *obj, ResetType type); static bool pcie_has_upstream_port(PCIDevice *dev); +static void prop_pci_busnr_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint8_t busnr = pci_dev_bus_num(PCI_DEVICE(obj)); + + visit_type_uint8(v, name, &busnr, errp); +} + +static const PropertyInfo prop_pci_busnr = { + .name = "busnr", + .get = prop_pci_busnr_get, +}; + static Property pci_props[] = { DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1), DEFINE_PROP_STRING("romfile", PCIDevice, romfile), @@ -87,6 +100,9 @@ static Property pci_props[] = { QEMU_PCIE_ARI_NEXTFN_1_BITNR, false), DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), + DEFINE_PROP_BIT("x-pcie-ext-tag", PCIDevice, cap_present, + QEMU_PCIE_EXT_TAG_BITNR, true), + { .name = "busnr", .info = &prop_pci_busnr }, DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c index 6a4e38856d..2c7bb1a525 100644 --- a/hw/pci/pci_bridge.c +++ b/hw/pci/pci_bridge.c @@ -380,9 +380,12 @@ void pci_bridge_initfn(PCIDevice *dev, const char *typename) sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn; sec_bus->address_space_mem = &br->address_space_mem; memory_region_init(&br->address_space_mem, OBJECT(br), "pci_bridge_pci", UINT64_MAX); + address_space_init(&br->as_mem, &br->address_space_mem, + "pci_bridge_pci_mem"); sec_bus->address_space_io = &br->address_space_io; memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 4 * GiB); + address_space_init(&br->as_io, &br->address_space_io, "pci_bridge_pci_io"); pci_bridge_region_init(br); QLIST_INIT(&sec_bus->child); QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling); @@ -399,6 +402,8 @@ void pci_bridge_exitfn(PCIDevice *pci_dev) PCIBridge *s = PCI_BRIDGE(pci_dev); assert(QLIST_EMPTY(&s->sec_bus.child)); QLIST_REMOVE(&s->sec_bus, sibling); + address_space_destroy(&s->as_mem); + address_space_destroy(&s->as_io); pci_bridge_region_del(s, &s->windows); pci_bridge_region_cleanup(s, &s->windows); /* object_unparent() is called automatically during device deletion */ diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 4b2f0805c6..0b455c8654 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -86,7 +86,13 @@ pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version) * Specification, Revision 1.1., or subsequent PCI Express Base * Specification revisions. */ - pci_set_long(exp_cap + PCI_EXP_DEVCAP, PCI_EXP_DEVCAP_RBER); + uint32_t devcap = PCI_EXP_DEVCAP_RBER; + + if (dev->cap_present & QEMU_PCIE_EXT_TAG) { + devcap = PCI_EXP_DEVCAP_RBER | PCI_EXP_DEVCAP_EXT_TAG; + } + + pci_set_long(exp_cap + PCI_EXP_DEVCAP, devcap); pci_set_long(exp_cap + PCI_EXP_LNKCAP, (port << PCI_EXP_LNKCAP_PN_SHIFT) | @@ -105,46 +111,18 @@ pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version) pci_set_word(cmask + PCI_EXP_LNKSTA, 0); } -static void pcie_cap_fill_slot_lnk(PCIDevice *dev) +/* Includes setting the target speed default */ +static void pcie_cap_fill_lnk(uint8_t *exp_cap, PCIExpLinkWidth width, + PCIExpLinkSpeed speed) { - PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT); - uint8_t *exp_cap = dev->config + dev->exp.exp_cap; - - /* Skip anything that isn't a PCIESlot */ - if (!s) { - return; - } - /* Clear and fill LNKCAP from what was configured above */ pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP, PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, - QEMU_PCI_EXP_LNKCAP_MLW(s->width) | - QEMU_PCI_EXP_LNKCAP_MLS(s->speed)); - - /* - * Link bandwidth notification is required for all root ports and - * downstream ports supporting links wider than x1 or multiple link - * speeds. - */ - if (s->width > QEMU_PCI_EXP_LNK_X1 || - s->speed > QEMU_PCI_EXP_LNK_2_5GT) { - pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, - PCI_EXP_LNKCAP_LBNC); - } - - if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) { - /* - * Hot-plug capable downstream ports and downstream ports supporting - * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC - * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which - * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also - * technically implement this, but it's not done here for compatibility. - */ - pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, - PCI_EXP_LNKCAP_DLLLARC); - /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */ + QEMU_PCI_EXP_LNKCAP_MLW(width) | + QEMU_PCI_EXP_LNKCAP_MLS(speed)); + if (speed > QEMU_PCI_EXP_LNK_2_5GT) { /* * Target Link Speed defaults to the highest link speed supported by * the component. 2.5GT/s devices are permitted to hardwire to zero. @@ -152,7 +130,7 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev) pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKCTL2, PCI_EXP_LNKCTL2_TLS); pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKCTL2, - QEMU_PCI_EXP_LNKCAP_MLS(s->speed) & + QEMU_PCI_EXP_LNKCAP_MLS(speed) & PCI_EXP_LNKCTL2_TLS); } @@ -161,27 +139,82 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev) * actually a reference to the highest bit supported in this register. * We assume the device supports all link speeds. */ - if (s->speed > QEMU_PCI_EXP_LNK_5GT) { + if (speed > QEMU_PCI_EXP_LNK_5GT) { pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP2, ~0U); pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_2_5GB | PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_8_0GB); - if (s->speed > QEMU_PCI_EXP_LNK_8GT) { + if (speed > QEMU_PCI_EXP_LNK_8GT) { pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_16_0GB); } - if (s->speed > QEMU_PCI_EXP_LNK_16GT) { + if (speed > QEMU_PCI_EXP_LNK_16GT) { pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_32_0GB); } - if (s->speed > QEMU_PCI_EXP_LNK_32GT) { + if (speed > QEMU_PCI_EXP_LNK_32GT) { pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_64_0GB); } } } +void pcie_cap_fill_link_ep_usp(PCIDevice *dev, PCIExpLinkWidth width, + PCIExpLinkSpeed speed) +{ + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + + /* + * For an end point or USP need to set the current status as well + * as the capabilities. + */ + pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW); + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, + QEMU_PCI_EXP_LNKSTA_NLW(width) | + QEMU_PCI_EXP_LNKSTA_CLS(speed)); + + pcie_cap_fill_lnk(exp_cap, width, speed); +} + +static void pcie_cap_fill_slot_lnk(PCIDevice *dev) +{ + PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT); + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + + /* Skip anything that isn't a PCIESlot */ + if (!s) { + return; + } + + /* + * Link bandwidth notification is required for all root ports and + * downstream ports supporting links wider than x1 or multiple link + * speeds. + */ + if (s->width > QEMU_PCI_EXP_LNK_X1 || + s->speed > QEMU_PCI_EXP_LNK_2_5GT) { + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, + PCI_EXP_LNKCAP_LBNC); + } + + if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) { + /* + * Hot-plug capable downstream ports and downstream ports supporting + * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC + * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which + * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also + * technically implement this, but it's not done here for compatibility. + */ + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, + PCI_EXP_LNKCAP_DLLLARC); + /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */ + } + + pcie_cap_fill_lnk(exp_cap, s->width, s->speed); +} + int pcie_cap_init(PCIDevice *dev, uint8_t offset, uint8_t type, uint8_t port, Error **errp) diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index b760c6d6a2..46261223f3 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -203,6 +203,8 @@ static void dt_i2c_create(void *fdt, const char *soc, const char *mpic, qemu_fdt_setprop_cells(fdt, i2c, "cell-index", 0); qemu_fdt_setprop_cells(fdt, i2c, "interrupts", irq0, 0x2); qemu_fdt_setprop_phandle(fdt, i2c, "interrupt-parent", mpic); + qemu_fdt_setprop_cell(fdt, i2c, "#size-cells", 0); + qemu_fdt_setprop_cell(fdt, i2c, "#address-cells", 1); qemu_fdt_setprop_string(fdt, "/aliases", alias, i2c); g_free(i2c); @@ -721,11 +723,21 @@ static int ppce500_prep_device_tree(PPCE500MachineState *machine, kernel_base, kernel_size, true); } -hwaddr booke206_page_size_to_tlb(uint64_t size) +static hwaddr booke206_page_size_to_tlb(uint64_t size) { return 63 - clz64(size / KiB); } +void booke206_set_tlb(ppcmas_tlb_t *tlb, target_ulong va, hwaddr pa, + hwaddr len) +{ + tlb->mas1 = booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT; + tlb->mas1 |= MAS1_VALID; + tlb->mas2 = va & TARGET_PAGE_MASK; + tlb->mas7_3 = pa & TARGET_PAGE_MASK; + tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; +} + static int booke206_initial_map_tsize(CPUPPCState *env) { struct boot_info *bi = env->load_info; @@ -751,25 +763,6 @@ static uint64_t mmubooke_initial_mapsize(CPUPPCState *env) return (1ULL << 10 << tsize); } -/* Create -kernel TLB entries for BookE. */ -static void mmubooke_create_initial_mapping(CPUPPCState *env) -{ - ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0); - hwaddr size; - int ps; - - ps = booke206_initial_map_tsize(env); - size = (ps << MAS1_TSIZE_SHIFT); - tlb->mas1 = MAS1_VALID | size; - tlb->mas2 = 0; - tlb->mas7_3 = 0; - tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; - -#ifdef CONFIG_KVM - env->tlb_dirty = true; -#endif -} - static void ppce500_cpu_reset_sec(void *opaque) { PowerPCCPU *cpu = opaque; @@ -786,6 +779,8 @@ static void ppce500_cpu_reset(void *opaque) CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; struct boot_info *bi = env->load_info; + uint64_t map_size = mmubooke_initial_mapsize(env); + ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0); cpu_reset(cs); @@ -796,11 +791,15 @@ static void ppce500_cpu_reset(void *opaque) env->gpr[4] = 0; env->gpr[5] = 0; env->gpr[6] = EPAPR_MAGIC; - env->gpr[7] = mmubooke_initial_mapsize(env); + env->gpr[7] = map_size; env->gpr[8] = 0; env->gpr[9] = 0; env->nip = bi->entry; - mmubooke_create_initial_mapping(env); + /* create initial mapping */ + booke206_set_tlb(tlb, 0, 0, map_size); +#ifdef CONFIG_KVM + env->tlb_dirty = true; +#endif } static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, @@ -832,7 +831,7 @@ static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, } static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc, - IrqLines *irqs, Error **errp) + Error **errp) { #ifdef CONFIG_KVM DeviceState *dev; @@ -872,7 +871,7 @@ static DeviceState *ppce500_init_mpic(PPCE500MachineState *pms, Error *err = NULL; if (kvm_kernel_irqchip_allowed()) { - dev = ppce500_init_mpic_kvm(pmc, irqs, &err); + dev = ppce500_init_mpic_kvm(pmc, &err); } if (kvm_kernel_irqchip_required() && !dev) { error_reportf_err(err, @@ -1024,7 +1023,7 @@ void ppce500_init(MachineState *machine) sysbus_connect_irq(s, 0, qdev_get_gpio_in(mpicdev, MPC8544_I2C_IRQ)); memory_region_add_subregion(ccsr_addr_space, MPC8544_I2C_REGS_OFFSET, sysbus_mmio_get_region(s, 0)); - i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + i2c = I2C_BUS(qdev_get_child_bus(dev, "i2c")); i2c_slave_create_simple(i2c, "ds1338", RTC_REGS_OFFSET); /* eSDHC */ @@ -1073,7 +1072,7 @@ void ppce500_init(MachineState *machine) memory_region_add_subregion(ccsr_addr_space, MPC8544_PCI_REGS_OFFSET, sysbus_mmio_get_region(s, 0)); - pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0"); + pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0")); if (!pci_bus) printf("couldn't create PCI controller!\n"); diff --git a/hw/ppc/e500.h b/hw/ppc/e500.h index 8c09ef92e4..01db102625 100644 --- a/hw/ppc/e500.h +++ b/hw/ppc/e500.h @@ -41,8 +41,6 @@ struct PPCE500MachineClass { void ppce500_init(MachineState *machine); -hwaddr booke206_page_size_to_tlb(uint64_t size); - #define TYPE_PPCE500_MACHINE "ppce500-base-machine" OBJECT_DECLARE_TYPE(PPCE500MachineState, PPCE500MachineClass, PPCE500_MACHINE) diff --git a/hw/ppc/mpc8544_guts.c b/hw/ppc/mpc8544_guts.c index e3540b0281..e3c51458e6 100644 --- a/hw/ppc/mpc8544_guts.c +++ b/hw/ppc/mpc8544_guts.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu/module.h" #include "qemu/log.h" #include "sysemu/runstate.h" #include "cpu.h" @@ -29,6 +28,12 @@ #define MPC8544_GUTS_RSTCR_RESET 0x02 #define MPC8544_GUTS_ADDR_PORPLLSR 0x00 +REG32(GUTS_PORPLLSR, 0x00) + FIELD(GUTS_PORPLLSR, E500_1_RATIO, 24, 6) + FIELD(GUTS_PORPLLSR, E500_0_RATIO, 16, 6) + FIELD(GUTS_PORPLLSR, DDR_RATIO, 9, 5) + FIELD(GUTS_PORPLLSR, PLAT_RATIO, 1, 5) + #define MPC8544_GUTS_ADDR_PORBMSR 0x04 #define MPC8544_GUTS_ADDR_PORIMPSCR 0x08 #define MPC8544_GUTS_ADDR_PORDEVSR 0x0C @@ -75,6 +80,12 @@ static uint64_t mpc8544_guts_read(void *opaque, hwaddr addr, addr &= MPC8544_GUTS_MMIO_SIZE - 1; switch (addr) { + case MPC8544_GUTS_ADDR_PORPLLSR: + value = FIELD_DP32(value, GUTS_PORPLLSR, E500_1_RATIO, 6); /* 3:1 */ + value = FIELD_DP32(value, GUTS_PORPLLSR, E500_0_RATIO, 6); /* 3:1 */ + value = FIELD_DP32(value, GUTS_PORPLLSR, DDR_RATIO, 12); /* 12:1 */ + value = FIELD_DP32(value, GUTS_PORPLLSR, PLAT_RATIO, 6); /* 6:1 */ + break; case MPC8544_GUTS_ADDR_PVR: value = env->spr[SPR_PVR]; break; @@ -129,16 +140,13 @@ static void mpc8544_guts_initfn(Object *obj) sysbus_init_mmio(d, &s->iomem); } -static const TypeInfo mpc8544_guts_info = { - .name = TYPE_MPC8544_GUTS, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(GutsState), - .instance_init = mpc8544_guts_initfn, +static const TypeInfo mpc8544_guts_types[] = { + { + .name = TYPE_MPC8544_GUTS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(GutsState), + .instance_init = mpc8544_guts_initfn, + }, }; -static void mpc8544_guts_register_types(void) -{ - type_register_static(&mpc8544_guts_info); -} - -type_init(mpc8544_guts_register_types) +DEFINE_TYPES(mpc8544_guts_types) diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 795acc289f..f0f0d7567d 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -736,21 +736,27 @@ static void pnv_reset(MachineState *machine, ResetType type) } } - fdt = pnv_dt_create(machine); - - /* Pack resulting tree */ - _FDT((fdt_pack(fdt))); + if (machine->fdt) { + fdt = machine->fdt; + } else { + fdt = pnv_dt_create(machine); + /* Pack resulting tree */ + _FDT((fdt_pack(fdt))); + } qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); cpu_physical_memory_write(PNV_FDT_ADDR, fdt, fdt_totalsize(fdt)); - /* - * Set machine->fdt for 'dumpdtb' QMP/HMP command. Free - * the existing machine->fdt to avoid leaking it during - * a reset. - */ - g_free(machine->fdt); - machine->fdt = fdt; + /* Update machine->fdt with latest fdt */ + if (machine->fdt != fdt) { + /* + * Set machine->fdt for 'dumpdtb' QMP/HMP command. Free + * the existing machine->fdt to avoid leaking it during + * a reset. + */ + g_free(machine->fdt); + machine->fdt = fdt; + } } static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp) @@ -952,6 +958,14 @@ static void pnv_init(MachineState *machine) g_free(sz); exit(EXIT_FAILURE); } + + /* checks for invalid option combinations */ + if (machine->dtb && (strlen(machine->kernel_cmdline) != 0)) { + error_report("-append and -dtb cannot be used together, as passed" + " command line is ignored in case of custom dtb"); + exit(EXIT_FAILURE); + } + memory_region_add_subregion(get_system_memory(), 0, machine->ram); /* @@ -1003,6 +1017,21 @@ static void pnv_init(MachineState *machine) } } + /* load dtb if passed */ + if (machine->dtb) { + int fdt_size; + + warn_report("with manually passed dtb, some options like '-append'" + " will get ignored and the dtb passed will be used as-is"); + + /* read the file 'machine->dtb', and load it into 'fdt' buffer */ + machine->fdt = load_device_tree(machine->dtb, &fdt_size); + if (!machine->fdt) { + error_report("Could not load dtb '%s'", machine->dtb); + exit(1); + } + } + /* MSIs are supported on this platform */ msi_nonbroken = true; diff --git a/hw/ppc/pnv_adu.c b/hw/ppc/pnv_adu.c index 81b7d6e526..f636dedf79 100644 --- a/hw/ppc/pnv_adu.c +++ b/hw/ppc/pnv_adu.c @@ -116,6 +116,12 @@ static void pnv_adu_xscom_write(void *opaque, hwaddr addr, uint64_t val, uint32_t lpc_size = lpc_cmd_size(adu); uint64_t data = 0; + if (!is_power_of_2(lpc_size) || lpc_size > sizeof(data)) { + qemu_log_mask(LOG_GUEST_ERROR, "ADU: Unsupported LPC access " + "size:%" PRId32 "\n", lpc_size); + break; + } + pnv_lpc_opb_read(adu->lpc, lpc_addr, (void *)&data, lpc_size); /* @@ -135,6 +141,12 @@ static void pnv_adu_xscom_write(void *opaque, hwaddr addr, uint64_t val, uint32_t lpc_size = lpc_cmd_size(adu); uint64_t data; + if (!is_power_of_2(lpc_size) || lpc_size > sizeof(data)) { + qemu_log_mask(LOG_GUEST_ERROR, "ADU: Unsupported LPC access " + "size:%" PRId32 "\n", lpc_size); + break; + } + data = cpu_to_be64(val) >> ((lpc_addr & 7) * 8); /* See above */ pnv_lpc_opb_write(adu->lpc, lpc_addr, (void *)&data, lpc_size); } diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c index f8aad955b5..8c203d2059 100644 --- a/hw/ppc/pnv_lpc.c +++ b/hw/ppc/pnv_lpc.c @@ -427,21 +427,27 @@ static void pnv_lpc_eval_serirq_routes(PnvLpcController *lpc) int irq; if (!lpc->psi_has_serirq) { - if ((lpc->opb_irq_route0 & PPC_BITMASK(8, 13)) || - (lpc->opb_irq_route1 & PPC_BITMASK(4, 31))) { + if ((lpc->opb_irq_route0 & PPC_BITMASK32(8, 13)) || + (lpc->opb_irq_route1 & PPC_BITMASK32(4, 31))) { qemu_log_mask(LOG_GUEST_ERROR, "OPB: setting serirq routing on POWER8 system, ignoring.\n"); } return; } + /* + * Each of the ISA irqs is routed to one of the 4 SERIRQ irqs with 2 + * bits, split across 2 OPB registers. + */ for (irq = 0; irq <= 13; irq++) { - int serirq = (lpc->opb_irq_route1 >> (31 - 5 - (irq * 2))) & 0x3; + int serirq = extract32(lpc->opb_irq_route1, + PPC_BIT32_NR(5 + irq * 2), 2); lpc->irq_to_serirq_route[irq] = serirq; } for (irq = 14; irq < ISA_NUM_IRQS; irq++) { - int serirq = (lpc->opb_irq_route0 >> (31 - 9 - (irq * 2))) & 0x3; + int serirq = extract32(lpc->opb_irq_route0, + PPC_BIT32_NR(9 + (irq - 14) * 2), 2); lpc->irq_to_serirq_route[irq] = serirq; } } diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index fde4619412..b86b5847de 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -728,7 +728,9 @@ static inline int64_t __cpu_ppc_load_decr(CPUPPCState *env, int64_t now, int64_t decr; n = ns_to_tb(tb_env->decr_freq, now); - if (next > n && tb_env->flags & PPC_TIMER_BOOKE) { + + /* BookE timers stop when reaching 0. */ + if (next < n && tb_env->flags & PPC_TIMER_BOOKE) { decr = 0; } else { decr = next - n; diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c index 96d9ce65c2..a55f108434 100644 --- a/hw/ppc/ppc440_bamboo.c +++ b/hw/ppc/ppc440_bamboo.c @@ -110,29 +110,6 @@ static int bamboo_load_device_tree(MachineState *machine, return 0; } -/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - hwaddr pa) -{ - ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; - - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1U << 31; /* up to 0x80000000 */ - tlb->EPN = va & TARGET_PAGE_MASK; - tlb->RPN = pa & TARGET_PAGE_MASK; - tlb->PID = 0; - - tlb = &env->tlb.tlbe[1]; - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1U << 31; /* up to 0xffffffff */ - tlb->EPN = 0x80000000 & TARGET_PAGE_MASK; - tlb->RPN = 0x80000000 & TARGET_PAGE_MASK; - tlb->PID = 0; -} - static void main_cpu_reset(void *opaque) { PowerPCCPU *cpu = opaque; @@ -143,8 +120,9 @@ static void main_cpu_reset(void *opaque) env->gpr[3] = FDT_ADDR; env->nip = entry; - /* Create a mapping for the kernel. */ - mmubooke_create_initial_mapping(env, 0, 0); + /* Create a mapping spanning the 32bit addr space. */ + booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31); + booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31); } static void bamboo_init(MachineState *machine) diff --git a/hw/ppc/ppc_booke.c b/hw/ppc/ppc_booke.c index ca22da196a..c8849e66ff 100644 --- a/hw/ppc/ppc_booke.c +++ b/hw/ppc/ppc_booke.c @@ -31,6 +31,16 @@ #include "hw/loader.h" #include "kvm_ppc.h" +void booke_set_tlb(ppcemb_tlb_t *tlb, target_ulong va, hwaddr pa, + target_ulong size) +{ + tlb->attr = 0; + tlb->prot = PAGE_RWX << 4 | PAGE_VALID; + tlb->size = size; + tlb->EPN = va & TARGET_PAGE_MASK; + tlb->RPN = pa & TARGET_PAGE_MASK; + tlb->PID = 0; +} /* Timer Control Register */ diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index e08739a443..93b16320d4 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -33,6 +33,7 @@ #include "hw/hw.h" #include "hw/sysbus.h" #include "sysemu/hw_accel.h" +#include "hw/ppc/ppc.h" #include "e500.h" #include "qom/object.h" @@ -70,30 +71,12 @@ static void spin_reset(DeviceState *dev) } } -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - hwaddr pa, - hwaddr len) -{ - ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1); - hwaddr size; - - size = (booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT); - tlb->mas1 = MAS1_VALID | size; - tlb->mas2 = (va & TARGET_PAGE_MASK) | MAS2_M; - tlb->mas7_3 = pa & TARGET_PAGE_MASK; - tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; -#ifdef CONFIG_KVM - env->tlb_dirty = true; -#endif -} - static void spin_kick(CPUState *cs, run_on_cpu_data data) { CPUPPCState *env = cpu_env(cs); SpinInfo *curspin = data.host_ptr; - hwaddr map_size = 64 * MiB; - hwaddr map_start; + hwaddr map_start, map_size = 64 * MiB; + ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1); cpu_synchronize_state(cs); stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]); @@ -107,7 +90,12 @@ static void spin_kick(CPUState *cs, run_on_cpu_data data) env->gpr[9] = 0; map_start = ldq_p(&curspin->addr) & ~(map_size - 1); - mmubooke_create_initial_mapping(env, 0, map_start, map_size); + /* create initial mapping */ + booke206_set_tlb(tlb, 0, map_start, map_size); + tlb->mas2 |= MAS2_M; +#ifdef CONFIG_KVM + env->tlb_dirty = true; +#endif cs->halted = 0; cs->exception_index = -1; diff --git a/hw/ppc/sam460ex.c b/hw/ppc/sam460ex.c index 1fce093ac8..78e2a46e75 100644 --- a/hw/ppc/sam460ex.c +++ b/hw/ppc/sam460ex.c @@ -213,38 +213,6 @@ static int sam460ex_load_device_tree(MachineState *machine, return fdt_size; } -/* Create reset TLB entries for BookE, mapping only the flash memory. */ -static void mmubooke_create_initial_mapping_uboot(CPUPPCState *env) -{ - ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; - - /* on reset the flash is mapped by a shadow TLB, - * but since we don't implement them we need to use - * the same values U-Boot will use to avoid a fault. - */ - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 0x10000000; /* up to 0xffffffff */ - tlb->EPN = 0xf0000000 & TARGET_PAGE_MASK; - tlb->RPN = (0xf0000000 & TARGET_PAGE_MASK) | 0x4; - tlb->PID = 0; -} - -/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - hwaddr pa) -{ - ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; - - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1 << 31; /* up to 0x80000000 */ - tlb->EPN = va & TARGET_PAGE_MASK; - tlb->RPN = pa & TARGET_PAGE_MASK; - tlb->PID = 0; -} - static void main_cpu_reset(void *opaque) { PowerPCCPU *cpu = opaque; @@ -253,20 +221,27 @@ static void main_cpu_reset(void *opaque) cpu_reset(CPU(cpu)); - /* either we have a kernel to boot or we jump to U-Boot */ + /* + * On reset the flash is mapped by a shadow TLB, but since we + * don't implement them we need to use the same values U-Boot + * will use to avoid a fault. + * either we have a kernel to boot or we jump to U-Boot + */ if (bi->entry != UBOOT_ENTRY) { env->gpr[1] = (16 * MiB) - 8; env->gpr[3] = FDT_ADDR; env->nip = bi->entry; /* Create a mapping for the kernel. */ - mmubooke_create_initial_mapping(env, 0, 0); + booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1 << 31); env->gpr[6] = tswap32(EPAPR_MAGIC); env->gpr[7] = (16 * MiB) - 8; /* bi->ima_size; */ } else { env->nip = UBOOT_ENTRY; - mmubooke_create_initial_mapping_uboot(env); + /* Create a mapping for U-Boot. */ + booke_set_tlb(&env->tlb.tlbe[0], 0xf0000000, 0xf0000000, 0x10000000); + env->tlb.tlbe[0].RPN |= 4; } } diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 2c10a70a48..5c02037c56 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -132,61 +132,6 @@ static bool spapr_is_thread0_in_vcore(SpaprMachineState *spapr, return spapr_get_vcpu_id(cpu) % spapr->vsmt == 0; } -static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque) -{ - /* Dummy entries correspond to unused ICPState objects in older QEMUs, - * and newer QEMUs don't even have them. In both cases, we don't want - * to send anything on the wire. - */ - return false; -} - -static const VMStateDescription pre_2_10_vmstate_dummy_icp = { - /* - * Hack ahead. We can't have two devices with the same name and - * instance id. So I rename this to pass make check. - * Real help from people who knows the hardware is needed. - */ - .name = "icp/server", - .version_id = 1, - .minimum_version_id = 1, - .needed = pre_2_10_vmstate_dummy_icp_needed, - .fields = (const VMStateField[]) { - VMSTATE_UNUSED(4), /* uint32_t xirr */ - VMSTATE_UNUSED(1), /* uint8_t pending_priority */ - VMSTATE_UNUSED(1), /* uint8_t mfrr */ - VMSTATE_END_OF_LIST() - }, -}; - -/* - * See comment in hw/intc/xics.c:icp_realize() - * - * You have to remove vmstate_replace_hack_for_ppc() when you remove - * the machine types that need the following function. - */ -static void pre_2_10_vmstate_register_dummy_icp(int i) -{ - vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp, - (void *)(uintptr_t) i); -} - -/* - * See comment in hw/intc/xics.c:icp_realize() - * - * You have to remove vmstate_replace_hack_for_ppc() when you remove - * the machine types that need the following function. - */ -static void pre_2_10_vmstate_unregister_dummy_icp(int i) -{ - /* - * This used to be: - * - * vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp, - * (void *)(uintptr_t) i); - */ -} - int spapr_max_server_number(SpaprMachineState *spapr) { MachineState *ms = MACHINE(spapr); @@ -682,7 +627,6 @@ static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState *spapr, static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt) { MachineState *machine = MACHINE(spapr); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); hwaddr mem_start, node_size; int i, nb_nodes = machine->numa_state->num_nodes; NodeInfo *nodes = machine->numa_state->nodes; @@ -724,7 +668,6 @@ static int spapr_dt_memory(SpaprMachineState *spapr, void *fdt) if (spapr_ovec_test(spapr->ov5_cas, OV5_DRCONF_MEMORY)) { int ret; - g_assert(smc->dr_lmb_enabled); ret = spapr_dt_dynamic_reconfiguration_memory(spapr, fdt); if (ret) { return ret; @@ -1307,9 +1250,7 @@ void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) spapr_dt_cpus(fdt, spapr); /* ibm,drc-indexes and friends */ - if (smc->dr_lmb_enabled) { - root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB; - } + root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_LMB; if (smc->dr_phb_enabled) { root_drc_type_mask |= SPAPR_DR_CONNECTOR_TYPE_PHB; } @@ -2715,7 +2656,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr) { MachineState *machine = MACHINE(spapr); MachineClass *mc = MACHINE_GET_CLASS(machine); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); const char *type = spapr_get_cpu_core_type(machine->cpu_type); const CPUArchIdList *possible_cpus; unsigned int smp_cpus = machine->smp.cpus; @@ -2744,15 +2684,6 @@ static void spapr_init_cpus(SpaprMachineState *spapr) boot_cores_nr = possible_cpus->len; } - if (smc->pre_2_10_has_unused_icps) { - for (i = 0; i < spapr_max_server_number(spapr); i++) { - /* Dummy entries get deregistered when real ICPState objects - * are registered during CPU core hotplug. - */ - pre_2_10_vmstate_register_dummy_icp(i); - } - } - for (i = 0; i < possible_cpus->len; i++) { int core_id = i * smp_threads; @@ -2929,10 +2860,8 @@ static void spapr_machine_init(MachineState *machine) spapr->ov5 = spapr_ovec_new(); spapr->ov5_cas = spapr_ovec_new(); - if (smc->dr_lmb_enabled) { - spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); - spapr_validate_node_memory(machine, &error_fatal); - } + spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY); + spapr_validate_node_memory(machine, &error_fatal); spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); @@ -3016,9 +2945,7 @@ static void spapr_machine_init(MachineState *machine) machine_memory_devices_init(machine, device_mem_base, device_mem_size); } - if (smc->dr_lmb_enabled) { - spapr_create_lmb_dr_connectors(spapr); - } + spapr_create_lmb_dr_connectors(spapr); if (mc->nvdimm_supported) { spapr_create_nvdimm_dr_connectors(spapr); @@ -3078,11 +3005,7 @@ static void spapr_machine_init(MachineState *machine) } if (machine->usb) { - if (smc->use_ohci_by_default) { - pci_create_simple(phb->bus, -1, "pci-ohci"); - } else { - pci_create_simple(phb->bus, -1, "nec-usb-xhci"); - } + pci_create_simple(phb->bus, -1, "nec-usb-xhci"); if (has_vga) { USBBus *usb_bus; @@ -3662,7 +3585,6 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev) static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { - const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev); SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); PCDIMMDevice *dimm = PC_DIMM(dev); @@ -3671,11 +3593,6 @@ static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Object *memdev; hwaddr pagesize; - if (!smc->dr_lmb_enabled) { - error_setg(errp, "Memory hotplug not supported for this machine"); - return; - } - size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); if (local_err) { error_propagate(errp, local_err); @@ -3932,21 +3849,9 @@ void spapr_core_release(DeviceState *dev) static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev) { MachineState *ms = MACHINE(hotplug_dev); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms); CPUCore *cc = CPU_CORE(dev); CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); - if (smc->pre_2_10_has_unused_icps) { - SpaprCpuCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); - int i; - - for (i = 0; i < cc->nr_threads; i++) { - CPUState *cs = CPU(sc->threads[i]); - - pre_2_10_vmstate_register_dummy_icp(cs->cpu_index); - } - } - assert(core_slot); core_slot->cpu = NULL; qdev_unrealize(dev); @@ -4027,7 +3932,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) { SpaprMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); MachineClass *mc = MACHINE_GET_CLASS(spapr); - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); SpaprCpuCore *core = SPAPR_CPU_CORE(OBJECT(dev)); CPUCore *cc = CPU_CORE(dev); SpaprDrc *drc; @@ -4077,12 +3981,6 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev) } } - if (smc->pre_2_10_has_unused_icps) { - for (i = 0; i < cc->nr_threads; i++) { - CPUState *cs = CPU(core->threads[i]); - pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index); - } - } } static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, @@ -4713,7 +4611,6 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) hc->unplug_request = spapr_machine_device_unplug_request; hc->unplug = spapr_machine_device_unplug; - smc->dr_lmb_enabled = true; smc->update_dt_enabled = true; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power10_v2.0"); mc->has_hotpluggable_cpus = true; @@ -4834,8 +4731,6 @@ static void spapr_machine_latest_class_options(MachineClass *mc) DEFINE_SPAPR_MACHINE_IMPL(true, major, minor) #define DEFINE_SPAPR_MACHINE(major, minor) \ DEFINE_SPAPR_MACHINE_IMPL(false, major, minor) -#define DEFINE_SPAPR_MACHINE_TAGGED(major, minor, tag) \ - DEFINE_SPAPR_MACHINE_IMPL(false, major, minor, _, tag) /* * pseries-9.2 @@ -5120,278 +5015,6 @@ static void spapr_machine_3_0_class_options(MachineClass *mc) DEFINE_SPAPR_MACHINE(3, 0); -/* - * pseries-2.12 - */ -static void spapr_machine_2_12_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { TYPE_POWERPC_CPU, "pre-3.0-migration", "on" }, - { TYPE_SPAPR_CPU_CORE, "pre-3.0-migration", "on" }, - }; - - spapr_machine_3_0_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_12, hw_compat_2_12_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - - /* We depend on kvm_enabled() to choose a default value for the - * hpt-max-page-size capability. Of course we can't do it here - * because this is too early and the HW accelerator isn't initialized - * yet. Postpone this to machine init (see default_caps_with_cpu()). - */ - smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; -} - -DEFINE_SPAPR_MACHINE(2, 12); - -static void spapr_machine_2_12_sxxm_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - - spapr_machine_2_12_class_options(mc); - smc->default_caps.caps[SPAPR_CAP_CFPC] = SPAPR_CAP_WORKAROUND; - smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_WORKAROUND; - smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_FIXED_CCD; -} - -DEFINE_SPAPR_MACHINE_TAGGED(2, 12, sxxm); - -/* - * pseries-2.11 - */ - -static void spapr_machine_2_11_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - - spapr_machine_2_12_class_options(mc); - smc->default_caps.caps[SPAPR_CAP_HTM] = SPAPR_CAP_ON; - compat_props_add(mc->compat_props, hw_compat_2_11, hw_compat_2_11_len); -} - -DEFINE_SPAPR_MACHINE(2, 11); - -/* - * pseries-2.10 - */ - -static void spapr_machine_2_10_class_options(MachineClass *mc) -{ - spapr_machine_2_11_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_10, hw_compat_2_10_len); -} - -DEFINE_SPAPR_MACHINE(2, 10); - -/* - * pseries-2.9 - */ - -static void spapr_machine_2_9_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { TYPE_POWERPC_CPU, "pre-2.10-migration", "on" }, - }; - - spapr_machine_2_10_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_9, hw_compat_2_9_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - smc->pre_2_10_has_unused_icps = true; - smc->resize_hpt_default = SPAPR_RESIZE_HPT_DISABLED; -} - -DEFINE_SPAPR_MACHINE(2, 9); - -/* - * pseries-2.8 - */ - -static void spapr_machine_2_8_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_SPAPR_PCI_HOST_BRIDGE, "pcie-extended-configuration-space", "off" }, - }; - - spapr_machine_2_9_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_8, hw_compat_2_8_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - mc->numa_mem_align_shift = 23; -} - -DEFINE_SPAPR_MACHINE(2, 8); - -/* - * pseries-2.7 - */ - -static bool phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, - uint64_t *buid, hwaddr *pio, - hwaddr *mmio32, hwaddr *mmio64, - unsigned n_dma, uint32_t *liobns, Error **errp) -{ - /* Legacy PHB placement for pseries-2.7 and earlier machine types */ - const uint64_t base_buid = 0x800000020000000ULL; - const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ - const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ - const hwaddr pio_offset = 0x80000000; /* 2 GiB */ - const uint32_t max_index = 255; - const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ - - uint64_t ram_top = MACHINE(spapr)->ram_size; - hwaddr phb0_base, phb_base; - int i; - - /* Do we have device memory? */ - if (MACHINE(spapr)->device_memory) { - /* Can't just use maxram_size, because there may be an - * alignment gap between normal and device memory regions - */ - ram_top = MACHINE(spapr)->device_memory->base + - memory_region_size(&MACHINE(spapr)->device_memory->mr); - } - - phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); - - if (index > max_index) { - error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)", - max_index); - return false; - } - - *buid = base_buid + index; - for (i = 0; i < n_dma; ++i) { - liobns[i] = SPAPR_PCI_LIOBN(index, i); - } - - phb_base = phb0_base + index * phb_spacing; - *pio = phb_base + pio_offset; - *mmio32 = phb_base + mmio_offset; - /* - * We don't set the 64-bit MMIO window, relying on the PHB's - * fallback behaviour of automatically splitting a large "32-bit" - * window into contiguous 32-bit and 64-bit windows - */ - - return true; -} - -static void spapr_machine_2_7_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0xf80000000", }, - { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem64_win_size", "0", }, - { TYPE_POWERPC_CPU, "pre-2.8-migration", "on", }, - { TYPE_SPAPR_PCI_HOST_BRIDGE, "pre-2.8-migration", "on", }, - }; - - spapr_machine_2_8_class_options(mc); - mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power7_v2.3"); - mc->default_machine_opts = "modern-hotplug-events=off"; - compat_props_add(mc->compat_props, hw_compat_2_7, hw_compat_2_7_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - smc->phb_placement = phb_placement_2_7; -} - -DEFINE_SPAPR_MACHINE(2, 7); - -/* - * pseries-2.6 - */ - -static void spapr_machine_2_6_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_SPAPR_PCI_HOST_BRIDGE, "ddw", "off" }, - }; - - spapr_machine_2_7_class_options(mc); - mc->has_hotpluggable_cpus = false; - compat_props_add(mc->compat_props, hw_compat_2_6, hw_compat_2_6_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} - -DEFINE_SPAPR_MACHINE(2, 6); - -/* - * pseries-2.5 - */ - -static void spapr_machine_2_5_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - static GlobalProperty compat[] = { - { "spapr-vlan", "use-rx-buffer-pools", "off" }, - }; - - spapr_machine_2_6_class_options(mc); - smc->use_ohci_by_default = true; - compat_props_add(mc->compat_props, hw_compat_2_5, hw_compat_2_5_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} - -DEFINE_SPAPR_MACHINE(2, 5); - -/* - * pseries-2.4 - */ - -static void spapr_machine_2_4_class_options(MachineClass *mc) -{ - SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); - - spapr_machine_2_5_class_options(mc); - smc->dr_lmb_enabled = false; - compat_props_add(mc->compat_props, hw_compat_2_4, hw_compat_2_4_len); -} - -DEFINE_SPAPR_MACHINE(2, 4); - -/* - * pseries-2.3 - */ - -static void spapr_machine_2_3_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" }, - }; - spapr_machine_2_4_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_3, hw_compat_2_3_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); -} -DEFINE_SPAPR_MACHINE(2, 3); - -/* - * pseries-2.2 - */ - -static void spapr_machine_2_2_class_options(MachineClass *mc) -{ - static GlobalProperty compat[] = { - { TYPE_SPAPR_PCI_HOST_BRIDGE, "mem_win_size", "0x20000000" }, - }; - - spapr_machine_2_3_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_2, hw_compat_2_2_len); - compat_props_add(mc->compat_props, compat, G_N_ELEMENTS(compat)); - mc->default_machine_opts = "modern-hotplug-events=off,suppress-vmdesc=on"; -} -DEFINE_SPAPR_MACHINE(2, 2); - -/* - * pseries-2.1 - */ - -static void spapr_machine_2_1_class_options(MachineClass *mc) -{ - spapr_machine_2_2_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_2_1, hw_compat_2_1_len); -} -DEFINE_SPAPR_MACHINE(2, 1); - static void spapr_machine_register_types(void) { type_register_static(&spapr_machine_info); diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 4642245168..ada439e831 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -197,9 +197,7 @@ static void spapr_unrealize_vcpu(PowerPCCPU *cpu, SpaprCpuCore *sc) { CPUPPCState *env = &cpu->env; - if (!sc->pre_3_0_migration) { - vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data); - } + vmstate_unregister(NULL, &vmstate_spapr_cpu_state, cpu->machine_data); spapr_irq_cpu_intc_destroy(SPAPR_MACHINE(qdev_get_machine()), cpu); cpu_ppc_tb_free(env); qdev_unrealize(DEVICE(cpu)); @@ -285,10 +283,8 @@ static bool spapr_realize_vcpu(PowerPCCPU *cpu, SpaprMachineState *spapr, return false; } - if (!sc->pre_3_0_migration) { - vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state, - cpu->machine_data); - } + vmstate_register(NULL, cs->cpu_index, &vmstate_spapr_cpu_state, + cpu->machine_data); return true; } @@ -366,8 +362,6 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp) static Property spapr_cpu_core_properties[] = { DEFINE_PROP_INT32("node-id", SpaprCpuCore, node_id, CPU_UNSET_NUMA_NODE_ID), - DEFINE_PROP_BOOL("pre-3.0-migration", SpaprCpuCore, pre_3_0_migration, - false), DEFINE_PROP_END_OF_LIST() }; @@ -411,6 +405,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.2"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power11_v2.0"), #ifdef CONFIG_KVM DEFINE_SPAPR_CPU_CORE_TYPE("host"), #endif diff --git a/hw/ppc/spapr_nested.c b/hw/ppc/spapr_nested.c index c02785756c..7def8eb73b 100644 --- a/hw/ppc/spapr_nested.c +++ b/hw/ppc/spapr_nested.c @@ -771,6 +771,7 @@ static void copy_logical_pvr(void *a, void *b, bool set) if (*pvr_logical_ptr) { switch (*pvr_logical_ptr) { + case CPU_POWERPC_LOGICAL_3_10_P11: case CPU_POWERPC_LOGICAL_3_10: pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00; break; @@ -982,6 +983,7 @@ struct guest_state_element_type guest_state_element_types[] = { GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr), GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb), GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl), + GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DPDES, dpdes), GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave), GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar), GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr), @@ -1184,6 +1186,12 @@ static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu, return H_PARAMETER; } + /* P11 capabilities */ + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0, + spapr->max_compat_pvr)) { + env->gpr[4] |= H_GUEST_CAPABILITIES_P11_MODE; + } + /* P10 capabilities */ if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, spapr->max_compat_pvr)) { @@ -1226,7 +1234,10 @@ static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu, env->gpr[4] = 1; /* set R5 to the first supported Power Processor Mode */ - if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, + if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0, + spapr->max_compat_pvr)) { + env->gpr[5] = H_GUEST_CAP_P11_MODE_BMAP; + } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, spapr->max_compat_pvr)) { env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP; } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 5c0024bef9..7e24084673 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1237,10 +1237,6 @@ static void add_drcs(SpaprPhbState *phb, PCIBus *bus) int i; uint8_t chassis; - if (!phb->dr_enabled) { - return; - } - chassis = chassis_from_bus(bus); if (pci_bus_is_root(bus)) { @@ -1260,10 +1256,6 @@ static void remove_drcs(SpaprPhbState *phb, PCIBus *bus) int i; uint8_t chassis; - if (!phb->dr_enabled) { - return; - } - chassis = chassis_from_bus(bus); for (i = PCI_SLOT_MAX * PCI_FUNC_MAX - 1; i >= 0; i--) { @@ -1548,17 +1540,6 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler, PCIBus *bus = PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))); uint32_t slotnr = PCI_SLOT(pdev->devfn); - if (!phb->dr_enabled) { - /* if this is a hotplug operation initiated by the user - * we need to let them know it's not enabled - */ - if (plugged_dev->hotplugged) { - error_setg(errp, "Bus '%s' does not support hotplugging", - phb->parent_obj.bus->qbus.name); - return; - } - } - if (IS_PCI_BRIDGE(plugged_dev)) { if (!bridge_has_valid_chassis_nr(OBJECT(plugged_dev), errp)) { return; @@ -1591,14 +1572,6 @@ static void spapr_pci_plug(HotplugHandler *plug_handler, SpaprDrc *drc = drc_from_dev(phb, pdev); uint32_t slotnr = PCI_SLOT(pdev->devfn); - /* - * If DR is disabled we don't need to do anything in the case of - * hotplug or coldplug callbacks. - */ - if (!phb->dr_enabled) { - return; - } - g_assert(drc); if (IS_PCI_BRIDGE(plugged_dev)) { @@ -1673,12 +1646,6 @@ static void spapr_pci_unplug_request(HotplugHandler *plug_handler, PCIDevice *pdev = PCI_DEVICE(plugged_dev); SpaprDrc *drc = drc_from_dev(phb, pdev); - if (!phb->dr_enabled) { - error_setg(errp, "Bus '%s' does not support hotplugging", - phb->parent_obj.bus->qbus.name); - return; - } - g_assert(drc); g_assert(drc->dev == plugged_dev); @@ -1847,30 +1814,15 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) assert(sphb->index != (uint32_t)-1); /* checked in spapr_phb_pre_plug() */ - if (sphb->mem64_win_size != 0) { - if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { - error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx - " (max 2 GiB)", sphb->mem_win_size); - return; - } - - /* 64-bit window defaults to identity mapping */ - sphb->mem64_win_pciaddr = sphb->mem64_win_addr; - } else if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { - /* - * For compatibility with old configuration, if no 64-bit MMIO - * window is specified, but the ordinary (32-bit) memory - * window is specified as > 2GiB, we treat it as a 2GiB 32-bit - * window, with a 64-bit MMIO window following on immediately - * afterwards - */ - sphb->mem64_win_size = sphb->mem_win_size - SPAPR_PCI_MEM32_WIN_SIZE; - sphb->mem64_win_addr = sphb->mem_win_addr + SPAPR_PCI_MEM32_WIN_SIZE; - sphb->mem64_win_pciaddr = - SPAPR_PCI_MEM_WIN_BUS_OFFSET + SPAPR_PCI_MEM32_WIN_SIZE; - sphb->mem_win_size = SPAPR_PCI_MEM32_WIN_SIZE; + if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) { + error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx + " (max 2 GiB)", sphb->mem_win_size); + return; } + /* 64-bit window defaults to identity mapping */ + sphb->mem64_win_pciaddr = sphb->mem64_win_addr; + if (spapr_pci_find_phb(spapr, sphb->buid)) { SpaprPhbState *s; @@ -2089,8 +2041,6 @@ static Property spapr_phb_properties[] = { SPAPR_PCI_MEM64_WIN_SIZE), DEFINE_PROP_UINT64("io_win_size", SpaprPhbState, io_win_size, SPAPR_PCI_IO_WIN_SIZE), - DEFINE_PROP_BOOL("dynamic-reconfiguration", SpaprPhbState, dr_enabled, - true), /* Default DMA window is 0..1GB */ DEFINE_PROP_UINT64("dma_win_addr", SpaprPhbState, dma_win_addr, 0), DEFINE_PROP_UINT64("dma_win_size", SpaprPhbState, dma_win_size, 0x40000000), @@ -2101,8 +2051,6 @@ static Property spapr_phb_properties[] = { (1ULL << 12) | (1ULL << 16) | (1ULL << 21) | (1ULL << 24)), DEFINE_PROP_UINT32("numa_node", SpaprPhbState, numa_node, -1), - DEFINE_PROP_BOOL("pre-2.8-migration", SpaprPhbState, - pre_2_8_migration, false), DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState, pcie_ecs, true), DEFINE_PROP_BOOL("pre-5.1-associativity", SpaprPhbState, @@ -2140,20 +2088,6 @@ static int spapr_pci_pre_save(void *opaque) gpointer key, value; int i; - if (sphb->pre_2_8_migration) { - sphb->mig_liobn = sphb->dma_liobn[0]; - sphb->mig_mem_win_addr = sphb->mem_win_addr; - sphb->mig_mem_win_size = sphb->mem_win_size; - sphb->mig_io_win_addr = sphb->io_win_addr; - sphb->mig_io_win_size = sphb->io_win_size; - - if ((sphb->mem64_win_size != 0) - && (sphb->mem64_win_addr - == (sphb->mem_win_addr + sphb->mem_win_size))) { - sphb->mig_mem_win_size += sphb->mem64_win_size; - } - } - g_free(sphb->msi_devs); sphb->msi_devs = NULL; sphb->msi_devs_num = g_hash_table_size(sphb->msi); @@ -2200,13 +2134,6 @@ static int spapr_pci_post_load(void *opaque, int version_id) return 0; } -static bool pre_2_8_migration(void *opaque, int version_id) -{ - SpaprPhbState *sphb = opaque; - - return sphb->pre_2_8_migration; -} - static const VMStateDescription vmstate_spapr_pci = { .name = "spapr_pci", .version_id = 2, @@ -2216,11 +2143,6 @@ static const VMStateDescription vmstate_spapr_pci = { .post_load = spapr_pci_post_load, .fields = (const VMStateField[]) { VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL), - VMSTATE_UINT32_TEST(mig_liobn, SpaprPhbState, pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_mem_win_addr, SpaprPhbState, pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_mem_win_size, SpaprPhbState, pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_io_win_addr, SpaprPhbState, pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_io_win_size, SpaprPhbState, pre_2_8_migration), VMSTATE_STRUCT_ARRAY(lsi_table, SpaprPhbState, PCI_NUM_PINS, 0, vmstate_spapr_pci_lsi, SpaprPciLsi), VMSTATE_INT32(msi_devs_num, SpaprPhbState), diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c index 235281e939..f378e5c4a9 100644 --- a/hw/ppc/virtex_ml507.c +++ b/hw/ppc/virtex_ml507.c @@ -67,29 +67,6 @@ static struct boot_info void *vfdt; } boot_info; -/* Create reset TLB entries for BookE, spanning the 32bit addr space. */ -static void mmubooke_create_initial_mapping(CPUPPCState *env, - target_ulong va, - hwaddr pa) -{ - ppcemb_tlb_t *tlb = &env->tlb.tlbe[0]; - - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1U << 31; /* up to 0x80000000 */ - tlb->EPN = va & TARGET_PAGE_MASK; - tlb->RPN = pa & TARGET_PAGE_MASK; - tlb->PID = 0; - - tlb = &env->tlb.tlbe[1]; - tlb->attr = 0; - tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4); - tlb->size = 1U << 31; /* up to 0xffffffff */ - tlb->EPN = 0x80000000 & TARGET_PAGE_MASK; - tlb->RPN = 0x80000000 & TARGET_PAGE_MASK; - tlb->PID = 0; -} - static PowerPCCPU *ppc440_init_xilinx(const char *cpu_type, uint32_t sysclk) { PowerPCCPU *cpu; @@ -139,8 +116,9 @@ static void main_cpu_reset(void *opaque) env->gpr[3] = bi->fdt; env->nip = bi->bootstrap_pc; - /* Create a mapping for the kernel. */ - mmubooke_create_initial_mapping(env, 0, 0); + /* Create a mapping spanning the 32bit addr space. */ + booke_set_tlb(&env->tlb.tlbe[0], 0, 0, 1U << 31); + booke_set_tlb(&env->tlb.tlbe[1], 0x80000000, 0x80000000, 1U << 31); env->gpr[6] = tswap32(EPAPR_MAGIC); env->gpr[7] = bi->ima_size; } diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c index feb650549a..12f01a75f5 100644 --- a/hw/riscv/riscv-iommu.c +++ b/hw/riscv/riscv-iommu.c @@ -183,8 +183,25 @@ static void riscv_iommu_pri(RISCVIOMMUState *s, } } -/* Portable implementation of pext_u64, bit-mask extraction. */ -static uint64_t _pext_u64(uint64_t val, uint64_t ext) +/* + * Discards all bits from 'val' whose matching bits in the same + * positions in the mask 'ext' are zeros, and packs the remaining + * bits from 'val' contiguously at the least-significant end of the + * result, keeping the same bit order as 'val' and filling any + * other bits at the most-significant end of the result with zeros. + * + * For example, for the following 'val' and 'ext', the return 'ret' + * will be: + * + * val = a b c d e f g h + * ext = 1 0 1 0 0 1 1 0 + * ret = 0 0 0 0 a c f g + * + * This function, taken from the riscv-iommu 1.0 spec, section 2.3.3 + * "Process to translate addresses of MSIs", is similar to bit manip + * function PEXT (Parallel bits extract) from x86. + */ +static uint64_t riscv_iommu_pext_u64(uint64_t val, uint64_t ext) { uint64_t ret = 0; uint64_t rot = 1; @@ -528,7 +545,7 @@ static MemTxResult riscv_iommu_msi_write(RISCVIOMMUState *s, int cause; /* Interrupt File Number */ - intn = _pext_u64(PPN_DOWN(gpa), ctx->msi_addr_mask); + intn = riscv_iommu_pext_u64(PPN_DOWN(gpa), ctx->msi_addr_mask); if (intn >= 256) { /* Interrupt file number out of range */ res = MEMTX_ACCESS_ERROR; diff --git a/hw/rtc/ds1338.c b/hw/rtc/ds1338.c index a5fe221418..c993182ae4 100644 --- a/hw/rtc/ds1338.c +++ b/hw/rtc/ds1338.c @@ -14,9 +14,9 @@ #include "hw/i2c/i2c.h" #include "migration/vmstate.h" #include "qemu/bcd.h" -#include "qemu/module.h" #include "qom/object.h" #include "sysemu/rtc.h" +#include "trace.h" /* Size of NVRAM including both the user-accessible area and the * secondary register area. @@ -126,6 +126,9 @@ static uint8_t ds1338_recv(I2CSlave *i2c) uint8_t res; res = s->nvram[s->ptr]; + + trace_ds1338_recv(s->ptr, res); + inc_regptr(s); return res; } @@ -134,6 +137,8 @@ static int ds1338_send(I2CSlave *i2c, uint8_t data) { DS1338State *s = DS1338(i2c); + trace_ds1338_send(s->ptr, data); + if (s->addr_byte) { s->ptr = data & (NVRAM_SIZE - 1); s->addr_byte = false; @@ -227,16 +232,13 @@ static void ds1338_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_ds1338; } -static const TypeInfo ds1338_info = { - .name = TYPE_DS1338, - .parent = TYPE_I2C_SLAVE, - .instance_size = sizeof(DS1338State), - .class_init = ds1338_class_init, +static const TypeInfo ds1338_types[] = { + { + .name = TYPE_DS1338, + .parent = TYPE_I2C_SLAVE, + .instance_size = sizeof(DS1338State), + .class_init = ds1338_class_init, + }, }; -static void ds1338_register_types(void) -{ - type_register_static(&ds1338_info); -} - -type_init(ds1338_register_types) +DEFINE_TYPES(ds1338_types) diff --git a/hw/rtc/trace-events b/hw/rtc/trace-events index ebb311a5b0..8012afe102 100644 --- a/hw/rtc/trace-events +++ b/hw/rtc/trace-events @@ -22,6 +22,10 @@ pl031_set_alarm(uint32_t ticks) "alarm set for %u ticks" aspeed_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 aspeed_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 +# ds1338.c +ds1338_recv(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] -> 0x%02" PRIx8 +ds1338_send(uint32_t addr, uint8_t value) "[0x%" PRIx32 "] <- 0x%02" PRIx8 + # m48t59.c m48txx_nvram_io_read(uint64_t addr, uint64_t value) "io read addr:0x%04" PRIx64 " value:0x%02" PRIx64 m48txx_nvram_io_write(uint64_t addr, uint64_t value) "io write addr:0x%04" PRIx64 " value:0x%02" PRIx64 diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c index 427e5336a8..98d5460905 100644 --- a/hw/sd/aspeed_sdhci.c +++ b/hw/sd/aspeed_sdhci.c @@ -24,8 +24,10 @@ #define ASPEED_SDHCI_DEBOUNCE_RESET 0x00000005 #define ASPEED_SDHCI_BUS 0x08 #define ASPEED_SDHCI_SDIO_140 0x10 +#define ASPEED_SDHCI_SDIO_144 0x14 #define ASPEED_SDHCI_SDIO_148 0x18 #define ASPEED_SDHCI_SDIO_240 0x20 +#define ASPEED_SDHCI_SDIO_244 0x24 #define ASPEED_SDHCI_SDIO_248 0x28 #define ASPEED_SDHCI_WP_POL 0xec #define ASPEED_SDHCI_CARD_DET 0xf0 @@ -35,21 +37,27 @@ static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size) { - uint32_t val = 0; + uint64_t val = 0; AspeedSDHCIState *sdhci = opaque; switch (addr) { case ASPEED_SDHCI_SDIO_140: - val = (uint32_t)sdhci->slots[0].capareg; + val = extract64(sdhci->slots[0].capareg, 0, 32); + break; + case ASPEED_SDHCI_SDIO_144: + val = extract64(sdhci->slots[0].capareg, 32, 32); break; case ASPEED_SDHCI_SDIO_148: - val = (uint32_t)sdhci->slots[0].maxcurr; + val = extract64(sdhci->slots[0].maxcurr, 0, 32); break; case ASPEED_SDHCI_SDIO_240: - val = (uint32_t)sdhci->slots[1].capareg; + val = extract64(sdhci->slots[1].capareg, 0, 32); + break; + case ASPEED_SDHCI_SDIO_244: + val = extract64(sdhci->slots[1].capareg, 32, 32); break; case ASPEED_SDHCI_SDIO_248: - val = (uint32_t)sdhci->slots[1].maxcurr; + val = extract64(sdhci->slots[1].maxcurr, 0, 32); break; default: if (addr < ASPEED_SDHCI_REG_SIZE) { @@ -61,9 +69,9 @@ static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size) } } - trace_aspeed_sdhci_read(addr, size, (uint64_t) val); + trace_aspeed_sdhci_read(addr, size, val); - return (uint64_t)val; + return val; } static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val, @@ -79,16 +87,26 @@ static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val, sdhci->regs[TO_REG(addr)] = (uint32_t)val & ~ASPEED_SDHCI_INFO_RESET; break; case ASPEED_SDHCI_SDIO_140: - sdhci->slots[0].capareg = (uint64_t)(uint32_t)val; + sdhci->slots[0].capareg = deposit64(sdhci->slots[0].capareg, 0, 32, val); + break; + case ASPEED_SDHCI_SDIO_144: + sdhci->slots[0].capareg = deposit64(sdhci->slots[0].capareg, 32, 32, val); break; case ASPEED_SDHCI_SDIO_148: - sdhci->slots[0].maxcurr = (uint64_t)(uint32_t)val; + sdhci->slots[0].maxcurr = deposit64(sdhci->slots[0].maxcurr, + 0, 32, val); break; case ASPEED_SDHCI_SDIO_240: - sdhci->slots[1].capareg = (uint64_t)(uint32_t)val; + sdhci->slots[1].capareg = deposit64(sdhci->slots[1].capareg, + 0, 32, val); + break; + case ASPEED_SDHCI_SDIO_244: + sdhci->slots[1].capareg = deposit64(sdhci->slots[1].capareg, + 32, 32, val); break; case ASPEED_SDHCI_SDIO_248: - sdhci->slots[1].maxcurr = (uint64_t)(uint32_t)val; + sdhci->slots[1].maxcurr = deposit64(sdhci->slots[0].maxcurr, + 0, 32, val); break; default: if (addr < ASPEED_SDHCI_REG_SIZE) { diff --git a/hw/sd/sd.c b/hw/sd/sd.c index b2e2d58e01..f9bd03f3fd 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -834,7 +834,9 @@ static void sd_reset(DeviceState *dev) sect = 0; } size = sect << HWBLOCK_SHIFT; - size -= sd_bootpart_offset(sd); + if (sd_is_emmc(sd)) { + size -= sd->boot_part_size * 2; + } sect = sd_addr_to_wpnum(size) + 1; diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index ed01499391..dbe5c2340c 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -37,7 +37,6 @@ #include "migration/vmstate.h" #include "sdhci-internal.h" #include "qemu/log.h" -#include "qemu/module.h" #include "trace.h" #include "qom/object.h" @@ -1598,15 +1597,6 @@ static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) sdhci_common_class_init(klass, data); } -static const TypeInfo sdhci_sysbus_info = { - .name = TYPE_SYSBUS_SDHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SDHCIState), - .instance_init = sdhci_sysbus_init, - .instance_finalize = sdhci_sysbus_finalize, - .class_init = sdhci_sysbus_class_init, -}; - /* --- qdev bus master --- */ static void sdhci_bus_class_init(ObjectClass *klass, void *data) @@ -1617,13 +1607,6 @@ static void sdhci_bus_class_init(ObjectClass *klass, void *data) sbc->set_readonly = sdhci_set_readonly; } -static const TypeInfo sdhci_bus_info = { - .name = TYPE_SDHCI_BUS, - .parent = TYPE_SD_BUS, - .instance_size = sizeof(SDBus), - .class_init = sdhci_bus_class_init, -}; - /* --- qdev i.MX eSDHC --- */ #define USDHC_MIX_CTRL 0x48 @@ -1882,12 +1865,6 @@ static void imx_usdhc_init(Object *obj) s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ; } -static const TypeInfo imx_usdhc_info = { - .name = TYPE_IMX_USDHC, - .parent = TYPE_SYSBUS_SDHCI, - .instance_init = imx_usdhc_init, -}; - /* --- qdev Samsung s3c --- */ #define S3C_SDHCI_CONTROL2 0x80 @@ -1946,18 +1923,31 @@ static void sdhci_s3c_init(Object *obj) s->io_ops = &sdhci_s3c_mmio_ops; } -static const TypeInfo sdhci_s3c_info = { - .name = TYPE_S3C_SDHCI , - .parent = TYPE_SYSBUS_SDHCI, - .instance_init = sdhci_s3c_init, +static const TypeInfo sdhci_types[] = { + { + .name = TYPE_SDHCI_BUS, + .parent = TYPE_SD_BUS, + .instance_size = sizeof(SDBus), + .class_init = sdhci_bus_class_init, + }, + { + .name = TYPE_SYSBUS_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SDHCIState), + .instance_init = sdhci_sysbus_init, + .instance_finalize = sdhci_sysbus_finalize, + .class_init = sdhci_sysbus_class_init, + }, + { + .name = TYPE_IMX_USDHC, + .parent = TYPE_SYSBUS_SDHCI, + .instance_init = imx_usdhc_init, + }, + { + .name = TYPE_S3C_SDHCI, + .parent = TYPE_SYSBUS_SDHCI, + .instance_init = sdhci_s3c_init, + }, }; -static void sdhci_register_types(void) -{ - type_register_static(&sdhci_sysbus_info); - type_register_static(&sdhci_bus_info); - type_register_static(&imx_usdhc_info); - type_register_static(&sdhci_s3c_info); -} - -type_init(sdhci_register_types) +DEFINE_TYPES(sdhci_types) diff --git a/hw/sensor/tmp105.c b/hw/sensor/tmp105.c index 9d7b911f59..ef2824f3e1 100644 --- a/hw/sensor/tmp105.c +++ b/hw/sensor/tmp105.c @@ -27,6 +27,7 @@ #include "qapi/visitor.h" #include "qemu/module.h" #include "hw/registerfields.h" +#include "trace.h" FIELD(CONFIG, SHUTDOWN_MODE, 0, 1) FIELD(CONFIG, THERMOSTAT_MODE, 1, 1) @@ -150,17 +151,21 @@ static void tmp105_read(TMP105State *s) s->buf[s->len++] = ((uint16_t) s->limit[1]) >> 0; break; } + + trace_tmp105_read(s->i2c.address, s->pointer); } static void tmp105_write(TMP105State *s) { + trace_tmp105_write(s->i2c.address, s->pointer); + switch (s->pointer & 3) { case TMP105_REG_TEMPERATURE: break; case TMP105_REG_CONFIG: if (FIELD_EX8(s->buf[0] & ~s->config, CONFIG, SHUTDOWN_MODE)) { - printf("%s: TMP105 shutdown\n", __func__); + trace_tmp105_write_shutdown(s->i2c.address); } s->config = FIELD_DP8(s->buf[0], CONFIG, ONE_SHOT, 0); s->faults = tmp105_faultq[FIELD_EX8(s->config, CONFIG, FAULT_QUEUE)]; diff --git a/hw/sensor/trace-events b/hw/sensor/trace-events new file mode 100644 index 0000000000..a3fe54fa6d --- /dev/null +++ b/hw/sensor/trace-events @@ -0,0 +1,6 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# tmp105.c +tmp105_read(uint8_t dev, uint8_t addr) "device: 0x%02x, addr: 0x%02x" +tmp105_write(uint8_t dev, uint8_t addr) "device: 0x%02x, addr 0x%02x" +tmp105_write_shutdown(uint8_t dev) "device: 0x%02x" diff --git a/hw/sensor/trace.h b/hw/sensor/trace.h new file mode 100644 index 0000000000..e4721560b0 --- /dev/null +++ b/hw/sensor/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_sensor.h" diff --git a/hw/ssi/pnv_spi.c b/hw/ssi/pnv_spi.c index 9e7207bf7c..c21b2ebb3c 100644 --- a/hw/ssi/pnv_spi.c +++ b/hw/ssi/pnv_spi.c @@ -53,8 +53,8 @@ static PnvXferBuffer *pnv_spi_xfer_buffer_new(void) static void pnv_spi_xfer_buffer_free(PnvXferBuffer *payload) { - free(payload->data); - free(payload); + g_free(payload->data); + g_free(payload); } static uint8_t *pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer *payload, @@ -217,6 +217,9 @@ static void transfer(PnvSpi *s, PnvXferBuffer *payload) PnvXferBuffer *rsp_payload = NULL; rsp_payload = pnv_spi_xfer_buffer_new(); + if (!rsp_payload) { + return; + } for (int offset = 0; offset < payload->len; offset += s->transfer_len) { tx = 0; for (int i = 0; i < s->transfer_len; i++) { @@ -235,9 +238,8 @@ static void transfer(PnvSpi *s, PnvXferBuffer *payload) (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF; } } - if (rsp_payload != NULL) { - spi_response(s, s->N1_bits, rsp_payload); - } + spi_response(s, s->N1_bits, rsp_payload); + pnv_spi_xfer_buffer_free(rsp_payload); } static inline uint8_t get_seq_index(PnvSpi *s) diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index b1f860ecfb..149f7cc5a6 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -276,7 +276,8 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg, old_reload = t->reload; t->reload = calculate_min_ticks(t, value); - /* If the reload value was not previously set, or zero, and + /* + * If the reload value was not previously set, or zero, and * the current value is valid, try to start the timer if it is * enabled. */ @@ -312,7 +313,8 @@ static void aspeed_timer_set_value(AspeedTimerCtrlState *s, int timer, int reg, } } -/* Control register operations are broken out into helpers that can be +/* + * Control register operations are broken out into helpers that can be * explicitly called on aspeed_timer_reset(), but also from * aspeed_timer_ctrl_op(). */ @@ -396,7 +398,8 @@ static void aspeed_timer_set_ctrl(AspeedTimerCtrlState *s, uint32_t reg) AspeedTimer *t; const uint8_t enable_mask = BIT(op_enable); - /* Handle a dependency between the 'enable' and remaining three + /* + * Handle a dependency between the 'enable' and remaining three * configuration bits - i.e. if more than one bit in the control set has * changed, including the 'enable' bit, then we want either disable the * timer and perform configuration, or perform configuration and then @@ -577,12 +580,11 @@ static void aspeed_2600_timer_write(AspeedTimerCtrlState *s, hwaddr offset, switch (offset) { case 0x34: - s->irq_sts &= tv; + s->irq_sts &= ~tv; break; case 0x3C: aspeed_timer_set_ctrl(s, s->ctrl & ~tv); break; - case 0x38: default: qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", @@ -623,7 +625,8 @@ static void aspeed_timer_reset(DeviceState *dev) for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { AspeedTimer *t = &s->timers[i]; - /* Explicitly call helpers to avoid any conditional behaviour through + /* + * Explicitly call helpers to avoid any conditional behaviour through * aspeed_timer_set_ctrl(). */ aspeed_timer_ctrl_enable(t, false); diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c index 23b3d79bdb..2663a9d9ef 100644 --- a/hw/timer/imx_gpt.c +++ b/hw/timer/imx_gpt.c @@ -18,19 +18,12 @@ #include "migration/vmstate.h" #include "qemu/module.h" #include "qemu/log.h" +#include "trace.h" #ifndef DEBUG_IMX_GPT #define DEBUG_IMX_GPT 0 #endif -#define DPRINTF(fmt, args...) \ - do { \ - if (DEBUG_IMX_GPT) { \ - fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_GPT, \ - __func__, ##args); \ - } \ - } while (0) - static const char *imx_gpt_reg_name(uint32_t reg) { switch (reg) { @@ -145,7 +138,7 @@ static void imx_gpt_set_freq(IMXGPTState *s) s->freq = imx_ccm_get_clock_frequency(s->ccm, s->clocks[clksrc]) / (1 + s->pr); - DPRINTF("Setting clksrc %d to frequency %d\n", clksrc, s->freq); + trace_imx_gpt_set_freq(clksrc, s->freq); if (s->freq) { ptimer_set_freq(s->timer, s->freq); @@ -317,7 +310,7 @@ static uint64_t imx_gpt_read(void *opaque, hwaddr offset, unsigned size) break; } - DPRINTF("(%s) = 0x%08x\n", imx_gpt_reg_name(offset >> 2), reg_value); + trace_imx_gpt_read(imx_gpt_reg_name(offset >> 2), reg_value); return reg_value; } @@ -384,8 +377,7 @@ static void imx_gpt_write(void *opaque, hwaddr offset, uint64_t value, IMXGPTState *s = IMX_GPT(opaque); uint32_t oldreg; - DPRINTF("(%s, value = 0x%08x)\n", imx_gpt_reg_name(offset >> 2), - (uint32_t)value); + trace_imx_gpt_write(imx_gpt_reg_name(offset >> 2), (uint32_t)value); switch (offset >> 2) { case 0: @@ -485,7 +477,7 @@ static void imx_gpt_timeout(void *opaque) { IMXGPTState *s = IMX_GPT(opaque); - DPRINTF("\n"); + trace_imx_gpt_timeout(); s->sr |= s->next_int; s->next_int = 0; diff --git a/hw/timer/trace-events b/hw/timer/trace-events index f48a712801..5cfc369fba 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -49,6 +49,12 @@ cmsdk_apb_dualtimer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK A cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset" +# imx_gpt.c +imx_gpt_set_freq(uint32_t clksrc, uint32_t freq) "Setting clksrc %u to %u Hz" +imx_gpt_read(const char *name, uint64_t value) "%s -> 0x%08" PRIx64 +imx_gpt_write(const char *name, uint64_t value) "%s <- 0x%08" PRIx64 +imx_gpt_timeout(void) "" + # npcm7xx_timer.c npcm7xx_timer_read(const char *id, uint64_t offset, uint64_t value) " %s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 npcm7xx_timer_write(const char *id, uint64_t offset, uint64_t value) "%s offset: 0x%04" PRIx64 " value 0x%08" PRIx64 diff --git a/hw/usb/hcd-ehci-sysbus.c b/hw/usb/hcd-ehci-sysbus.c index 2b1652f7a8..eb7df93ac2 100644 --- a/hw/usb/hcd-ehci-sysbus.c +++ b/hw/usb/hcd-ehci-sysbus.c @@ -19,7 +19,6 @@ #include "hw/qdev-properties.h" #include "hw/usb/hcd-ehci.h" #include "migration/vmstate.h" -#include "qemu/module.h" static const VMStateDescription vmstate_ehci_sysbus = { .name = "ehci-sysbus", @@ -97,17 +96,6 @@ static void ehci_sysbus_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_type_info = { - .name = TYPE_SYS_BUS_EHCI, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(EHCISysBusState), - .instance_init = ehci_sysbus_init, - .instance_finalize = ehci_sysbus_finalize, - .abstract = true, - .class_init = ehci_sysbus_class_init, - .class_size = sizeof(SysBusEHCIClass), -}; - static void ehci_platform_class_init(ObjectClass *oc, void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); @@ -118,12 +106,6 @@ static void ehci_platform_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_platform_type_info = { - .name = TYPE_PLATFORM_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_platform_class_init, -}; - static void ehci_exynos4210_class_init(ObjectClass *oc, void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); @@ -134,12 +116,6 @@ static void ehci_exynos4210_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_exynos4210_type_info = { - .name = TYPE_EXYNOS4210_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_exynos4210_class_init, -}; - static void ehci_aw_h3_class_init(ObjectClass *oc, void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); @@ -150,12 +126,6 @@ static void ehci_aw_h3_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_aw_h3_type_info = { - .name = TYPE_AW_H3_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_aw_h3_class_init, -}; - static void ehci_npcm7xx_class_init(ObjectClass *oc, void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); @@ -168,12 +138,6 @@ static void ehci_npcm7xx_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_npcm7xx_type_info = { - .name = TYPE_NPCM7XX_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_npcm7xx_class_init, -}; - static void ehci_tegra2_class_init(ObjectClass *oc, void *data) { SysBusEHCIClass *sec = SYS_BUS_EHCI_CLASS(oc); @@ -184,12 +148,6 @@ static void ehci_tegra2_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_tegra2_type_info = { - .name = TYPE_TEGRA2_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_tegra2_class_init, -}; - static void ehci_ppc4xx_init(Object *o) { EHCISysBusState *s = SYS_BUS_EHCI(o); @@ -207,13 +165,6 @@ static void ehci_ppc4xx_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_ppc4xx_type_info = { - .name = TYPE_PPC4xx_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .class_init = ehci_ppc4xx_class_init, - .instance_init = ehci_ppc4xx_init, -}; - /* * Faraday FUSBH200 USB 2.0 EHCI */ @@ -282,24 +233,55 @@ static void fusbh200_ehci_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_USB, dc->categories); } -static const TypeInfo ehci_fusbh200_type_info = { - .name = TYPE_FUSBH200_EHCI, - .parent = TYPE_SYS_BUS_EHCI, - .instance_size = sizeof(FUSBH200EHCIState), - .instance_init = fusbh200_ehci_init, - .class_init = fusbh200_ehci_class_init, +static const TypeInfo ehci_sysbus_types[] = { + { + .name = TYPE_SYS_BUS_EHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(EHCISysBusState), + .instance_init = ehci_sysbus_init, + .instance_finalize = ehci_sysbus_finalize, + .abstract = true, + .class_init = ehci_sysbus_class_init, + .class_size = sizeof(SysBusEHCIClass), + }, + { + .name = TYPE_PLATFORM_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_platform_class_init, + }, + { + .name = TYPE_EXYNOS4210_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_exynos4210_class_init, + }, + { + .name = TYPE_AW_H3_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_aw_h3_class_init, + }, + { + .name = TYPE_NPCM7XX_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_npcm7xx_class_init, + }, + { + .name = TYPE_TEGRA2_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_tegra2_class_init, + }, + { + .name = TYPE_PPC4xx_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .class_init = ehci_ppc4xx_class_init, + .instance_init = ehci_ppc4xx_init, + }, + { + .name = TYPE_FUSBH200_EHCI, + .parent = TYPE_SYS_BUS_EHCI, + .instance_size = sizeof(FUSBH200EHCIState), + .instance_init = fusbh200_ehci_init, + .class_init = fusbh200_ehci_class_init, + }, }; -static void ehci_sysbus_register_types(void) -{ - type_register_static(&ehci_type_info); - type_register_static(&ehci_platform_type_info); - type_register_static(&ehci_exynos4210_type_info); - type_register_static(&ehci_aw_h3_type_info); - type_register_static(&ehci_npcm7xx_type_info); - type_register_static(&ehci_tegra2_type_info); - type_register_static(&ehci_ppc4xx_type_info); - type_register_static(&ehci_fusbh200_type_info); -} - -type_init(ehci_sysbus_register_types) +DEFINE_TYPES(ehci_sysbus_types) diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c index 992dc3b102..01aa11013e 100644 --- a/hw/vfio/migration.c +++ b/hw/vfio/migration.c @@ -370,6 +370,10 @@ static ssize_t vfio_save_block(QEMUFile *f, VFIOMigration *migration) * please refer to the Linux kernel VFIO uAPI. */ if (errno == ENOMSG) { + if (!migration->event_precopy_empty_hit) { + trace_vfio_save_block_precopy_empty_hit(migration->vbasedev->name); + migration->event_precopy_empty_hit = true; + } return 0; } @@ -379,6 +383,9 @@ static ssize_t vfio_save_block(QEMUFile *f, VFIOMigration *migration) return 0; } + /* Non-empty read: re-arm the trace event */ + migration->event_precopy_empty_hit = false; + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); qemu_put_be64(f, data_size); qemu_put_buffer(f, migration->data_buffer, data_size); @@ -472,6 +479,9 @@ static int vfio_save_setup(QEMUFile *f, void *opaque, Error **errp) return -ENOMEM; } + migration->event_save_iterate_started = false; + migration->event_precopy_empty_hit = false; + if (vfio_precopy_supported(vbasedev)) { switch (migration->device_state) { case VFIO_DEVICE_STATE_RUNNING: @@ -602,6 +612,11 @@ static int vfio_save_iterate(QEMUFile *f, void *opaque) VFIOMigration *migration = vbasedev->migration; ssize_t data_size; + if (!migration->event_save_iterate_started) { + trace_vfio_save_iterate_start(vbasedev->name); + migration->event_save_iterate_started = true; + } + data_size = vfio_save_block(f, migration); if (data_size < 0) { return data_size; @@ -630,6 +645,8 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) int ret; Error *local_err = NULL; + trace_vfio_save_complete_precopy_start(vbasedev->name); + /* We reach here with device state STOP or STOP_COPY only */ ret = vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP_COPY, VFIO_DEVICE_STATE_STOP, &local_err); diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 29789e8d27..cab1cf1de0 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -157,10 +157,13 @@ vfio_migration_set_device_state(const char *name, const char *state) " (%s) stat vfio_migration_set_state(const char *name, const char *new_state, const char *recover_state) " (%s) new state %s, recover state %s" vfio_migration_state_notifier(const char *name, int state) " (%s) state %d" vfio_save_block(const char *name, int data_size) " (%s) data_size %d" +vfio_save_block_precopy_empty_hit(const char *name) " (%s)" vfio_save_cleanup(const char *name) " (%s)" vfio_save_complete_precopy(const char *name, int ret) " (%s) ret %d" +vfio_save_complete_precopy_start(const char *name) " (%s)" vfio_save_device_config_state(const char *name) " (%s)" vfio_save_iterate(const char *name, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy initial size %"PRIu64" precopy dirty size %"PRIu64 +vfio_save_iterate_start(const char *name) " (%s)" vfio_save_setup(const char *name, uint64_t data_buffer_size) " (%s) data buffer size %"PRIu64 vfio_state_pending_estimate(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy %"PRIu64" postcopy %"PRIu64" precopy initial size %"PRIu64" precopy dirty size %"PRIu64 vfio_state_pending_exact(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t stopcopy_size, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy %"PRIu64" postcopy %"PRIu64" stopcopy size %"PRIu64" precopy initial size %"PRIu64" precopy dirty size %"PRIu64 diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 00561daa06..f170f0b25b 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -1185,9 +1185,16 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev, static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) { - assert(n && n->unmap_addr); - munmap(n->unmap_addr, qemu_real_host_page_size()); - n->unmap_addr = NULL; + if (n->unmap_addr) { + munmap(n->unmap_addr, qemu_real_host_page_size()); + n->unmap_addr = NULL; + } + if (n->destroy) { + memory_region_transaction_begin(); + object_unparent(OBJECT(&n->mr)); + memory_region_transaction_commit(); + g_free(n); + } } /* @@ -1195,17 +1202,28 @@ static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) * under rcu. */ static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n, - VirtIODevice *vdev) + VirtIODevice *vdev, bool destroy) { + /* + * if destroy == false and n->addr == NULL, we have nothing to do. + * so, just return. + */ + if (!n || (!destroy && !n->addr)) { + return; + } + if (n->addr) { if (vdev) { + memory_region_transaction_begin(); virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false); + memory_region_transaction_commit(); } assert(!n->unmap_addr); n->unmap_addr = n->addr; n->addr = NULL; - call_rcu(n, vhost_user_host_notifier_free, rcu); } + n->destroy = destroy; + call_rcu(n, vhost_user_host_notifier_free, rcu); } static int vhost_user_set_vring_base(struct vhost_dev *dev, @@ -1279,9 +1297,7 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev, struct vhost_user *u = dev->opaque; VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index); - if (n) { - vhost_user_host_notifier_remove(n, dev->vdev); - } + vhost_user_host_notifier_remove(n, dev->vdev, false); ret = vhost_user_write(dev, &msg, NULL, 0); if (ret < 0) { @@ -1562,7 +1578,7 @@ static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev, * new mapped address. */ n = fetch_or_create_notifier(user, queue_idx); - vhost_user_host_notifier_remove(n, vdev); + vhost_user_host_notifier_remove(n, vdev, false); if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { return 0; @@ -1607,9 +1623,14 @@ vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev, QemuUUID uuid; memcpy(uuid.data, object->uuid, sizeof(object->uuid)); - return virtio_add_vhost_device(&uuid, dev); + return !virtio_add_vhost_device(&uuid, dev); } +/* + * Handle VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE backend requests. + * + * Return: 0 on success, 1 on error. + */ static int vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev, VhostUserShared *object) @@ -1623,16 +1644,16 @@ vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev, struct vhost_dev *owner = virtio_lookup_vhost_device(&uuid); if (dev != owner) { /* Not allowed to remove non-owned entries */ - return 0; + return 1; } break; } default: /* Not allowed to remove non-owned entries */ - return 0; + return 1; } - return virtio_remove_resource(&uuid); + return !virtio_remove_resource(&uuid); } static bool vhost_user_send_resp(QIOChannel *ioc, VhostUserHeader *hdr, @@ -2736,15 +2757,7 @@ static int vhost_user_set_inflight_fd(struct vhost_dev *dev, static void vhost_user_state_destroy(gpointer data) { VhostUserHostNotifier *n = (VhostUserHostNotifier *) data; - if (n) { - vhost_user_host_notifier_remove(n, NULL); - object_unparent(OBJECT(&n->mr)); - /* - * We can't free until vhost_user_host_notifier_remove has - * done it's thing so schedule the free with RCU. - */ - g_free_rcu(n, rcu); - } + vhost_user_host_notifier_remove(n, NULL, true); } bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) @@ -2765,9 +2778,7 @@ void vhost_user_cleanup(VhostUserState *user) if (!user->chr) { return; } - memory_region_transaction_begin(); user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true); - memory_region_transaction_commit(); user->chr = NULL; } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 4d832fe845..5a394821da 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -2057,6 +2057,8 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) if (modern_pio) { memory_region_init(&proxy->io_bar, OBJECT(proxy), "virtio-pci-io", 0x4); + address_space_init(&proxy->modern_cfg_io_as, &proxy->io_bar, + "virtio-pci-cfg-io-as"); pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); @@ -2180,6 +2182,9 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) /* PCI BAR regions must be powers of 2 */ pow2ceil(proxy->notify.offset + proxy->notify.size)); + address_space_init(&proxy->modern_cfg_mem_as, &proxy->modern_bar, + "virtio-pci-cfg-mem-as"); + if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } @@ -2269,12 +2274,17 @@ static void virtio_pci_exit(PCIDevice *pci_dev) VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && !pci_bus_is_root(pci_get_bus(pci_dev)); + bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; msix_uninit_exclusive_bar(pci_dev); if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port && pci_is_express(pci_dev)) { pcie_aer_exit(pci_dev); } + address_space_destroy(&proxy->modern_cfg_mem_as); + if (modern_pio) { + address_space_destroy(&proxy->modern_cfg_io_as); + } } static void virtio_pci_reset(DeviceState *qdev) @@ -2385,6 +2395,14 @@ static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) vpciklass->parent_dc_realize(qdev, errp); } +static int virtio_pci_sync_config(DeviceState *dev, Error **errp) +{ + VirtIOPCIProxy *proxy = VIRTIO_PCI(dev); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + return qdev_sync_config(DEVICE(vdev), errp); +} + static void virtio_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -2401,6 +2419,7 @@ static void virtio_pci_class_init(ObjectClass *klass, void *data) device_class_set_parent_realize(dc, virtio_pci_dc_realize, &vpciklass->parent_dc_realize); rc->phases.hold = virtio_pci_bus_reset_hold; + dc->sync_config = virtio_pci_sync_config; } static const TypeInfo virtio_pci_info = { diff --git a/hw/watchdog/wdt_imx2.c b/hw/watchdog/wdt_imx2.c index be63d421da..8162d58afa 100644 --- a/hw/watchdog/wdt_imx2.c +++ b/hw/watchdog/wdt_imx2.c @@ -39,7 +39,6 @@ static void imx2_wdt_expired(void *opaque) /* Perform watchdog action if watchdog is enabled */ if (s->wcr & IMX2_WDT_WCR_WDE) { - s->wrsr = IMX2_WDT_WRSR_TOUT; watchdog_perform_action(); } } diff --git a/include/block/nvme.h b/include/block/nvme.h index a37be0d0da..f4d108841b 100644 --- a/include/block/nvme.h +++ b/include/block/nvme.h @@ -1077,6 +1077,7 @@ enum NvmeIdCns { NVME_ID_CNS_CS_NS = 0x05, NVME_ID_CNS_CS_CTRL = 0x06, NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07, + NVME_ID_CNS_CS_IND_NS = 0x08, NVME_ID_CNS_NS_PRESENT_LIST = 0x10, NVME_ID_CNS_NS_PRESENT = 0x11, NVME_ID_CNS_NS_ATTACHED_CTRL_LIST = 0x12, @@ -1087,6 +1088,7 @@ enum NvmeIdCns { NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a, NVME_ID_CNS_CS_NS_PRESENT = 0x1b, NVME_ID_CNS_IO_COMMAND_SET = 0x1c, + NVME_ID_CNS_CS_IND_NS_ALLOCATED = 0x1f, }; typedef struct QEMU_PACKED NvmeIdCtrl { @@ -1416,9 +1418,28 @@ typedef struct QEMU_PACKED NvmeIdNsNvm { uint8_t pic; uint8_t rsvd9[3]; uint32_t elbaf[NVME_MAX_NLBAF]; - uint8_t rsvd268[3828]; + uint32_t npdgl; + uint32_t nprg; + uint32_t npra; + uint32_t nors; + uint32_t npdal; + uint8_t rsvd288[3808]; } NvmeIdNsNvm; +typedef struct QEMU_PACKED NvmeIdNsInd { + uint8_t nsfeat; + uint8_t nmic; + uint8_t rescap; + uint8_t fpi; + uint32_t anagrpid; + uint8_t nsattr; + uint8_t rsvd9; + uint16_t nvmsetid; + uint16_t endgrpid; + uint8_t nstat; + uint8_t rsvd15[4081]; +} NvmeIdNsInd; + typedef struct QEMU_PACKED NvmeIdNsDescr { uint8_t nidt; uint8_t nidl; @@ -1439,8 +1460,10 @@ enum NvmeNsIdentifierType { NVME_NIDT_CSI = 0x04, }; -enum NvmeIdNsNmic { - NVME_NMIC_NS_SHARED = 1 << 0, +enum NvmeIdNsIndependent { + NVME_ID_NS_IND_NMIC_SHRNS = 1 << 0, + NVME_ID_NS_IND_NMIC_DISNS = 1 << 1, + NVME_ID_NS_IND_NSTAT_NRDY = 1 << 0, }; enum NvmeCsi { @@ -1518,6 +1541,16 @@ enum NvmeIdNsMc { NVME_ID_NS_MC_SEPARATE = 1 << 1, }; +enum NvmeIdNsNsfeat { + NVME_ID_NS_NSFEAT_THINP = 1 << 0, + NVME_ID_NS_NSFEAT_NSABPNS = 1 << 1, + NVME_ID_NS_NSFEAT_DAE = 1 << 2, + NVME_ID_NS_NSFEAT_UIDREUSE = 1 << 3, + NVME_ID_NS_NSFEAT_OPTPERF_ALL = 3 << 4, + NVME_ID_NS_NSFEAT_MAM = 1 << 6, + NVME_ID_NS_NSFEAT_OPTRPERF = 1 << 7, +}; + #define NVME_ID_NS_DPS_TYPE(dps) (dps & NVME_ID_NS_DPS_TYPE_MASK) enum NvmePIFormat { @@ -1873,6 +1906,7 @@ static inline void _nvme_check_size(void) QEMU_BUILD_BUG_ON(sizeof(NvmeLBAF) != 4); QEMU_BUILD_BUG_ON(sizeof(NvmeLBAFE) != 16); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096); + QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsInd) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsNvm) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsZoned) != 4096); QEMU_BUILD_BUG_ON(sizeof(NvmeSglDescriptor) != 16); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 712cac79ee..1868d4a0f7 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -31,6 +31,7 @@ #define QCRYPTO_HASH_DIGEST_LEN_SHA384 48 #define QCRYPTO_HASH_DIGEST_LEN_SHA512 64 #define QCRYPTO_HASH_DIGEST_LEN_RIPEMD160 20 +#define QCRYPTO_HASH_DIGEST_LEN_SM3 32 /* See also "QCryptoHashAlgo" defined in qapi/crypto.json */ diff --git a/include/disas/capstone.h b/include/disas/capstone.h index a11985151d..c43033f7f6 100644 --- a/include/disas/capstone.h +++ b/include/disas/capstone.h @@ -4,6 +4,7 @@ #ifdef CONFIG_CAPSTONE #define CAPSTONE_AARCH64_COMPAT_HEADER +#define CAPSTONE_SYSTEMZ_COMPAT_HEADER #include <capstone.h> #else diff --git a/include/exec/memory.h b/include/exec/memory.h index e5e865d1a9..9458e2801d 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -1104,7 +1104,14 @@ struct AddressSpace { QTAILQ_HEAD(, MemoryListener) listeners; QTAILQ_ENTRY(AddressSpace) address_spaces_link; - /* Maximum DMA bounce buffer size used for indirect memory map requests */ + /* + * Maximum DMA bounce buffer size used for indirect memory map requests. + * This limits the total size of bounce buffer allocations made for + * DMA requests to indirect memory regions within this AddressSpace. DMA + * requests that exceed the limit (e.g. due to overly large requested size + * or concurrent DMA requests having claimed too much buffer space) will be + * rejected and left to the caller to handle. + */ size_t max_bounce_buffer_size; /* Total size of bounce buffers currently allocated, atomically accessed */ size_t bounce_buffer_size; diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h index 94cbe073ec..453188de70 100644 --- a/include/fpu/softfloat-helpers.h +++ b/include/fpu/softfloat-helpers.h @@ -75,6 +75,12 @@ static inline void set_floatx80_rounding_precision(FloatX80RoundPrec val, status->floatx80_rounding_precision = val; } +static inline void set_float_2nan_prop_rule(Float2NaNPropRule rule, + float_status *status) +{ + status->float_2nan_prop_rule = rule; +} + static inline void set_flush_to_zero(bool val, float_status *status) { status->flush_to_zero = val; @@ -126,6 +132,11 @@ get_floatx80_rounding_precision(float_status *status) return status->floatx80_rounding_precision; } +static inline Float2NaNPropRule get_float_2nan_prop_rule(float_status *status) +{ + return status->float_2nan_prop_rule; +} + static inline bool get_flush_to_zero(float_status *status) { return status->flush_to_zero; diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h index 0884ec4ef7..8f39691dfd 100644 --- a/include/fpu/softfloat-types.h +++ b/include/fpu/softfloat-types.h @@ -171,6 +171,43 @@ typedef enum __attribute__((__packed__)) { } FloatX80RoundPrec; /* + * 2-input NaN propagation rule. Individual architectures have + * different rules for which input NaN is propagated to the output + * when there is more than one NaN on the input. + * + * If default_nan_mode is enabled then it is valid not to set a + * NaN propagation rule, because the softfloat code guarantees + * not to try to pick a NaN to propagate in default NaN mode. + * When not in default-NaN mode, it is an error for the target + * not to set the rule in float_status, and we will assert if + * we need to handle an input NaN and no rule was selected. + */ +typedef enum __attribute__((__packed__)) { + /* No propagation rule specified */ + float_2nan_prop_none = 0, + /* Prefer SNaN over QNaN, then operand A over B */ + float_2nan_prop_s_ab, + /* Prefer SNaN over QNaN, then operand B over A */ + float_2nan_prop_s_ba, + /* Prefer A over B regardless of SNaN vs QNaN */ + float_2nan_prop_ab, + /* Prefer B over A regardless of SNaN vs QNaN */ + float_2nan_prop_ba, + /* + * This implements x87 NaN propagation rules: + * SNaN + QNaN => return the QNaN + * two SNaNs => return the one with the larger significand, silenced + * two QNaNs => return the one with the larger significand + * SNaN and a non-NaN => return the SNaN, silenced + * QNaN and a non-NaN => return the QNaN + * + * If we get down to comparing significands and they are the same, + * return the NaN with the positive sign bit (if any). + */ + float_2nan_prop_x87, +} Float2NaNPropRule; + +/* * Floating Point Status. Individual architectures may maintain * several versions of float_status for different functions. The * correct status for the operation is then passed by reference to @@ -181,6 +218,7 @@ typedef struct float_status { uint16_t float_exception_flags; FloatRoundMode float_rounding_mode; FloatX80RoundPrec floatx80_rounding_precision; + Float2NaNPropRule float_2nan_prop_rule; bool tininess_before_rounding; /* should denormalised results go to zero and set the inexact flag? */ bool flush_to_zero; diff --git a/include/hw/acpi/acpi_generic_initiator.h b/include/hw/acpi/acpi_generic_initiator.h deleted file mode 100644 index a304bad73e..0000000000 --- a/include/hw/acpi/acpi_generic_initiator.h +++ /dev/null @@ -1,47 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved - */ - -#ifndef ACPI_GENERIC_INITIATOR_H -#define ACPI_GENERIC_INITIATOR_H - -#include "qom/object_interfaces.h" - -#define TYPE_ACPI_GENERIC_INITIATOR "acpi-generic-initiator" - -typedef struct AcpiGenericInitiator { - /* private */ - Object parent; - - /* public */ - char *pci_dev; - uint16_t node; -} AcpiGenericInitiator; - -/* - * ACPI 6.3: - * Table 5-81 Flags – Generic Initiator Affinity Structure - */ -typedef enum { - /* - * If clear, the OSPM ignores the contents of the Generic - * Initiator/Port Affinity Structure. This allows system firmware - * to populate the SRAT with a static number of structures, but only - * enable them as necessary. - */ - GEN_AFFINITY_ENABLED = (1 << 0), -} GenericAffinityFlags; - -/* - * ACPI 6.3: - * Table 5-80 Device Handle - PCI - */ -typedef struct PCIDeviceHandle { - uint16_t segment; - uint16_t bdf; -} PCIDeviceHandle; - -void build_srat_generic_pci_initiator(GArray *table_data); - -#endif diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index a3784155cb..4fd5da49e7 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -486,6 +486,13 @@ Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset, void build_srat_memory(GArray *table_data, uint64_t base, uint64_t len, int node, MemoryAffinityFlags flags); +void build_srat_pci_generic_initiator(GArray *table_data, uint32_t node, + uint16_t segment, uint8_t bus, + uint8_t devfn); + +void build_srat_acpi_generic_port(GArray *table_data, uint32_t node, + const char *hid, uint32_t uid); + void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id); diff --git a/include/hw/acpi/pci.h b/include/hw/acpi/pci.h index 467a99461c..6359d574fd 100644 --- a/include/hw/acpi/pci.h +++ b/include/hw/acpi/pci.h @@ -40,4 +40,7 @@ Aml *aml_pci_device_dsm(void); void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus); void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope); + +void build_srat_generic_affinity_structures(GArray *table_data); + #endif diff --git a/include/hw/boards.h b/include/hw/boards.h index 91f2edd392..36fbb9b59d 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -44,8 +44,16 @@ void machine_set_cpu_numa_node(MachineState *machine, Error **errp); void machine_parse_smp_config(MachineState *ms, const SMPConfiguration *config, Error **errp); +bool machine_parse_smp_cache(MachineState *ms, + const SmpCachePropertiesList *caches, + Error **errp); unsigned int machine_topo_get_cores_per_socket(const MachineState *ms); unsigned int machine_topo_get_threads_per_socket(const MachineState *ms); +CpuTopologyLevel machine_get_cache_topo_level(const MachineState *ms, + CacheLevelAndType cache); +void machine_set_cache_topo_level(MachineState *ms, CacheLevelAndType cache, + CpuTopologyLevel level); +bool machine_check_smp_cache(const MachineState *ms, Error **errp); void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size); /** @@ -146,6 +154,8 @@ typedef struct { * @books_supported - whether books are supported by the machine * @drawers_supported - whether drawers are supported by the machine * @modules_supported - whether modules are supported by the machine + * @cache_supported - whether cache (l1d, l1i, l2 and l3) configuration are + * supported by the machine */ typedef struct { bool prefer_sockets; @@ -155,6 +165,7 @@ typedef struct { bool books_supported; bool drawers_supported; bool modules_supported; + bool cache_supported[CACHE_LEVEL_AND_TYPE__MAX]; } SMPCompatProps; /** @@ -371,6 +382,10 @@ typedef struct CpuTopology { unsigned int max_cpus; } CpuTopology; +typedef struct SmpCache { + SmpCacheProperties props[CACHE_LEVEL_AND_TYPE__MAX]; +} SmpCache; + /** * MachineState: */ @@ -421,6 +436,7 @@ struct MachineState { AccelState *accelerator; CPUArchIdList *possible_cpus; CpuTopology smp; + SmpCache smp_cache; struct NVDIMMState *nvdimms_state; struct NumaState *numa_state; }; @@ -824,13 +840,4 @@ extern const size_t hw_compat_2_5_len; extern GlobalProperty hw_compat_2_4[]; extern const size_t hw_compat_2_4_len; -extern GlobalProperty hw_compat_2_3[]; -extern const size_t hw_compat_2_3_len; - -extern GlobalProperty hw_compat_2_2[]; -extern const size_t hw_compat_2_2_len; - -extern GlobalProperty hw_compat_2_1[]; -extern const size_t hw_compat_2_1_len; - #endif diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index c3ca0babcb..db8a6fbc6e 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -158,6 +158,8 @@ struct CPUClass { void (*dump_state)(CPUState *cpu, FILE *, int flags); void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value); int64_t (*get_arch_id)(CPUState *cpu); + bool (*cpu_persistent_status)(CPUState *cpu); + bool (*cpu_enabled_status)(CPUState *cpu); void (*set_pc)(CPUState *cpu, vaddr value); vaddr (*get_pc)(CPUState *cpu); int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg); diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h index fdd0f4e62b..561b375dc8 100644 --- a/include/hw/cxl/cxl_device.h +++ b/include/hw/cxl/cxl_device.h @@ -463,18 +463,6 @@ typedef struct CXLMemPatrolScrubWriteAttrs { #define CXL_MEMDEV_PS_ENABLE_DEFAULT 0 /* CXL memory device DDR5 ECS control attributes */ -typedef struct CXLMemECSReadAttrs { - uint8_t ecs_log_cap; - uint8_t ecs_cap; - uint16_t ecs_config; - uint8_t ecs_flags; -} QEMU_PACKED CXLMemECSReadAttrs; - -typedef struct CXLMemECSWriteAttrs { - uint8_t ecs_log_cap; - uint16_t ecs_config; -} QEMU_PACKED CXLMemECSWriteAttrs; - #define CXL_ECS_GET_FEATURE_VERSION 0x01 #define CXL_ECS_SET_FEATURE_VERSION 0x01 #define CXL_ECS_LOG_ENTRY_TYPE_DEFAULT 0x01 @@ -483,6 +471,26 @@ typedef struct CXLMemECSWriteAttrs { #define CXL_ECS_MODE_DEFAULT 0 #define CXL_ECS_NUM_MEDIA_FRUS 3 /* Default */ +typedef struct CXLMemECSFRUReadAttrs { + uint8_t ecs_cap; + uint16_t ecs_config; + uint8_t ecs_flags; +} QEMU_PACKED CXLMemECSFRUReadAttrs; + +typedef struct CXLMemECSReadAttrs { + uint8_t ecs_log_cap; + CXLMemECSFRUReadAttrs fru_attrs[CXL_ECS_NUM_MEDIA_FRUS]; +} QEMU_PACKED CXLMemECSReadAttrs; + +typedef struct CXLMemECSFRUWriteAttrs { + uint16_t ecs_config; +} QEMU_PACKED CXLMemECSFRUWriteAttrs; + +typedef struct CXLMemECSWriteAttrs { + uint8_t ecs_log_cap; + CXLMemECSFRUWriteAttrs fru_attrs[CXL_ECS_NUM_MEDIA_FRUS]; +} QEMU_PACKED CXLMemECSWriteAttrs; + #define DCD_MAX_NUM_REGION 8 typedef struct CXLDCExtentRaw { @@ -549,6 +557,10 @@ struct CXLType3Dev { CXLCCI vdm_fm_owned_ld_mctp_cci; CXLCCI ld0_cci; + /* PCIe link characteristics */ + PCIExpLinkSpeed speed; + PCIExpLinkWidth width; + /* DOE */ DOECap doe_cdat; @@ -571,8 +583,8 @@ struct CXLType3Dev { CXLMemPatrolScrubReadAttrs patrol_scrub_attrs; CXLMemPatrolScrubWriteAttrs patrol_scrub_wr_attrs; /* ECS control attributes */ - CXLMemECSReadAttrs ecs_attrs[CXL_ECS_NUM_MEDIA_FRUS]; - CXLMemECSWriteAttrs ecs_wr_attrs[CXL_ECS_NUM_MEDIA_FRUS]; + CXLMemECSReadAttrs ecs_attrs; + CXLMemECSWriteAttrs ecs_wr_attrs; struct dynamic_capacity { HostMemoryBackend *host_dc; diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h index 1eb05c29fc..d372cd396b 100644 --- a/include/hw/i386/intel_iommu.h +++ b/include/hw/i386/intel_iommu.h @@ -306,6 +306,9 @@ struct IntelIOMMUState { bool dma_translation; /* Whether DMA translation supported */ bool pasid; /* Whether to support PASID */ + /* Transient Mapping, Reserved(0) since VTD spec revision 3.2 */ + bool stale_tm; + /* * Protects IOMMU states in general. Currently it protects the * per-IOMMU IOTLB cache, and context entry cache in VTDAddressSpace. diff --git a/include/hw/i386/topology.h b/include/hw/i386/topology.h index dff49fce11..b2c8bf2de1 100644 --- a/include/hw/i386/topology.h +++ b/include/hw/i386/topology.h @@ -39,7 +39,7 @@ * CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width(). */ - +#include "qapi/qapi-types-machine-common.h" #include "qemu/bitops.h" /* @@ -62,21 +62,7 @@ typedef struct X86CPUTopoInfo { unsigned threads_per_core; } X86CPUTopoInfo; -/* - * CPUTopoLevel is the general i386 topology hierarchical representation, - * ordered by increasing hierarchical relationship. - * Its enumeration value is not bound to the type value of Intel (CPUID[0x1F]) - * or AMD (CPUID[0x80000026]). - */ -enum CPUTopoLevel { - CPU_TOPO_LEVEL_INVALID, - CPU_TOPO_LEVEL_SMT, - CPU_TOPO_LEVEL_CORE, - CPU_TOPO_LEVEL_MODULE, - CPU_TOPO_LEVEL_DIE, - CPU_TOPO_LEVEL_PACKAGE, - CPU_TOPO_LEVEL_MAX, -}; +#define CPU_TOPOLOGY_LEVEL_INVALID CPU_TOPOLOGY_LEVEL__MAX /* Return the bit width needed for 'count' IDs */ static unsigned apicid_bitwidth_for_count(unsigned count) @@ -212,8 +198,8 @@ static inline apic_id_t x86_apicid_from_cpu_idx(X86CPUTopoInfo *topo_info, */ static inline bool x86_has_extended_topo(unsigned long *topo_bitmap) { - return test_bit(CPU_TOPO_LEVEL_MODULE, topo_bitmap) || - test_bit(CPU_TOPO_LEVEL_DIE, topo_bitmap); + return test_bit(CPU_TOPOLOGY_LEVEL_MODULE, topo_bitmap) || + test_bit(CPU_TOPOLOGY_LEVEL_DIE, topo_bitmap); } #endif /* HW_I386_TOPOLOGY_H */ diff --git a/include/hw/pci-bridge/cxl_upstream_port.h b/include/hw/pci-bridge/cxl_upstream_port.h index 12635139f6..f208397ffe 100644 --- a/include/hw/pci-bridge/cxl_upstream_port.h +++ b/include/hw/pci-bridge/cxl_upstream_port.h @@ -12,6 +12,10 @@ typedef struct CXLUpstreamPort { /*< public >*/ CXLComponentState cxl_cstate; CXLCCI swcci; + + PCIExpLinkSpeed speed; + PCIExpLinkWidth width; + DOECap doe_cdat; uint64_t sn; } CXLUpstreamPort; diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h index 3778aac27b..0db87f1281 100644 --- a/include/hw/pci-host/spapr.h +++ b/include/hw/pci-host/spapr.h @@ -53,7 +53,6 @@ struct SpaprPhbState { uint32_t index; uint64_t buid; char *dtbusname; - bool dr_enabled; MemoryRegion memspace, iospace; hwaddr mem_win_addr, mem_win_size, mem64_win_addr, mem64_win_size; @@ -84,10 +83,6 @@ struct SpaprPhbState { bool pcie_ecs; /* Allow access to PCIe extended config space? */ /* Fields for migration compatibility hacks */ - bool pre_2_8_migration; - uint32_t mig_liobn; - hwaddr mig_mem_win_addr, mig_mem_win_size; - hwaddr mig_io_win_addr, mig_io_win_size; bool pre_5_1_assoc; }; diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index 35d4fe0bbf..135695c551 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -16,7 +16,7 @@ extern bool pci_available; #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) -#define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn)) +#define PCI_BUILD_BDF(bus, devfn) (((bus) << 8) | (devfn)) #define PCI_BDF_TO_DEVFN(x) ((x) & 0xff) #define PCI_BUS_MAX 256 #define PCI_DEVFN_MAX 256 @@ -214,6 +214,8 @@ enum { QEMU_PCIE_ERR_UNC_MASK = (1 << QEMU_PCIE_ERR_UNC_MASK_BITNR), #define QEMU_PCIE_ARI_NEXTFN_1_BITNR 12 QEMU_PCIE_ARI_NEXTFN_1 = (1 << QEMU_PCIE_ARI_NEXTFN_1_BITNR), +#define QEMU_PCIE_EXT_TAG_BITNR 13 + QEMU_PCIE_EXT_TAG = (1 << QEMU_PCIE_EXT_TAG_BITNR), }; typedef struct PCIINTxRoute { diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h index 5cd452115a..b0f5204d80 100644 --- a/include/hw/pci/pci_bridge.h +++ b/include/hw/pci/pci_bridge.h @@ -72,6 +72,8 @@ struct PCIBridge { */ MemoryRegion address_space_mem; MemoryRegion address_space_io; + AddressSpace as_mem; + AddressSpace as_io; PCIBridgeWindows windows; @@ -102,6 +104,7 @@ typedef struct PXBPCIEDev { PXBDev parent_obj; } PXBPCIEDev; +#define TYPE_PXB_CXL_BUS "pxb-cxl-bus" #define TYPE_PXB_DEV "pxb" OBJECT_DECLARE_SIMPLE_TYPE(PXBDev, PXB_DEV) diff --git a/include/hw/pci/pci_device.h b/include/hw/pci/pci_device.h index 91df40f989..8eaf0d58bb 100644 --- a/include/hw/pci/pci_device.h +++ b/include/hw/pci/pci_device.h @@ -168,7 +168,11 @@ struct PCIDevice { char *failover_pair_id; uint32_t acpi_index; - /* Maximum DMA bounce buffer size used for indirect memory map requests */ + /* + * Indirect DMA region bounce buffer size as configured for the device. This + * is a configuration parameter that is reflected into bus_master_as when + * realizing the device. + */ uint32_t max_bounce_buffer_size; }; diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index 5eddb90976..b8d59732bc 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -141,6 +141,8 @@ void pcie_acs_reset(PCIDevice *dev); void pcie_ari_init(PCIDevice *dev, uint16_t offset); void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num); void pcie_ats_init(PCIDevice *dev, uint16_t offset, bool aligned); +void pcie_cap_fill_link_ep_usp(PCIDevice *dev, PCIExpLinkWidth width, + PCIExpLinkSpeed speed); void pcie_cap_slot_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h index d5d119ea7f..8a14d623f8 100644 --- a/include/hw/ppc/ppc.h +++ b/include/hw/ppc/ppc.h @@ -116,6 +116,13 @@ enum { #define PPC_SERIAL_MM_BAUDBASE 399193 +#ifndef CONFIG_USER_ONLY +void booke206_set_tlb(ppcmas_tlb_t *tlb, target_ulong va, hwaddr pa, + hwaddr len); +void booke_set_tlb(ppcemb_tlb_t *tlb, target_ulong va, hwaddr pa, + target_ulong size); +#endif + /* ppc_booke.c */ void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags); #endif diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index f6de3e9972..af4aa1cb0f 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -141,11 +141,8 @@ struct SpaprMachineClass { MachineClass parent_class; /*< public >*/ - bool dr_lmb_enabled; /* enable dynamic-reconfig/hotplug of LMBs */ bool dr_phb_enabled; /* enable dynamic-reconfig/hotplug of PHBs */ bool update_dt_enabled; /* enable KVMPPC_H_UPDATE_DT */ - bool use_ohci_by_default; /* use USB-OHCI instead of XHCI */ - bool pre_2_10_has_unused_icps; bool legacy_irq_allocation; uint32_t nr_xirqs; bool broken_host_serial_model; /* present real host info to the guest */ diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h index 69a52e39b8..68f7083483 100644 --- a/include/hw/ppc/spapr_cpu_core.h +++ b/include/hw/ppc/spapr_cpu_core.h @@ -28,7 +28,6 @@ struct SpaprCpuCore { /*< public >*/ PowerPCCPU **threads; int node_id; - bool pre_3_0_migration; /* older machine don't know about SpaprCpuState */ }; struct SpaprCpuCoreClass { diff --git a/include/hw/ppc/spapr_nested.h b/include/hw/ppc/spapr_nested.h index 93ef14adcc..e420220484 100644 --- a/include/hw/ppc/spapr_nested.h +++ b/include/hw/ppc/spapr_nested.h @@ -99,7 +99,8 @@ #define GSB_VCPU_SPR_HASHKEYR 0x1050 #define GSB_VCPU_SPR_HASHPKEYR 0x1051 #define GSB_VCPU_SPR_CTRL 0x1052 - /* RESERVED 0x1053 - 0x1FFF */ +#define GSB_VCPU_SPR_DPDES 0x1053 + /* RESERVED 0x1054 - 0x1FFF */ #define GSB_VCPU_SPR_CR 0x2000 #define GSB_VCPU_SPR_PIDR 0x2001 #define GSB_VCPU_SPR_DSISR 0x2002 @@ -210,11 +211,14 @@ typedef struct SpaprMachineStateNestedGuest { #define H_GUEST_CAPABILITIES_COPY_MEM 0x8000000000000000 #define H_GUEST_CAPABILITIES_P9_MODE 0x4000000000000000 #define H_GUEST_CAPABILITIES_P10_MODE 0x2000000000000000 -#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P10_MODE | \ +#define H_GUEST_CAPABILITIES_P11_MODE 0x1000000000000000 +#define H_GUEST_CAP_VALID_MASK (H_GUEST_CAPABILITIES_P11_MODE | \ + H_GUEST_CAPABILITIES_P10_MODE | \ H_GUEST_CAPABILITIES_P9_MODE) #define H_GUEST_CAP_COPY_MEM_BMAP 0 #define H_GUEST_CAP_P9_MODE_BMAP 1 #define H_GUEST_CAP_P10_MODE_BMAP 2 +#define H_GUEST_CAP_P11_MODE_BMAP 3 #define PAPR_NESTED_GUEST_MAX 4096 #define H_GUEST_DELETE_ALL_FLAG 0x8000000000000000ULL #define PAPR_NESTED_GUEST_VCPU_MAX 2048 diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 28c181faa2..ebee982528 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -218,7 +218,7 @@ static inline bool xive_source_esb_has_2page(XiveSource *xsrc) xsrc->esb_shift == XIVE_ESB_4K_2PAGE; } -static inline size_t xive_source_esb_len(XiveSource *xsrc) +static inline uint64_t xive_source_esb_len(XiveSource *xsrc) { return (1ull << xsrc->esb_shift) * xsrc->nr_irqs; } @@ -533,7 +533,7 @@ Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp); void xive_tctx_reset(XiveTCTX *tctx); void xive_tctx_destroy(XiveTCTX *tctx); void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb); -void xive_tctx_reset_os_signal(XiveTCTX *tctx); +void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring); /* * KVM XIVE device helpers diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h index ab68f8d157..5bccf41159 100644 --- a/include/hw/ppc/xive2.h +++ b/include/hw/ppc/xive2.h @@ -53,6 +53,12 @@ typedef struct Xive2RouterClass { Xive2Nvp *nvp); int (*write_nvp)(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, Xive2Nvp *nvp, uint8_t word_number); + int (*get_nvgc)(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc); + int (*write_nvgc)(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc); uint8_t (*get_block_id)(Xive2Router *xrtr); uint32_t (*get_config)(Xive2Router *xrtr); } Xive2RouterClass; @@ -67,6 +73,12 @@ int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, Xive2Nvp *nvp); int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx, Xive2Nvp *nvp, uint8_t word_number); +int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc); +int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, + uint8_t nvgc_blk, uint32_t nvgc_idx, + Xive2Nvgc *nvgc); uint32_t xive2_router_get_config(Xive2Router *xrtr); void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); @@ -107,5 +119,11 @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size); +void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); #endif /* PPC_XIVE2_H */ diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h index 4349d009d0..1d00c8df64 100644 --- a/include/hw/ppc/xive2_regs.h +++ b/include/hw/ppc/xive2_regs.h @@ -19,16 +19,18 @@ * mode (P10), the CAM line is slightly different as the VP space was * increased. */ -#define TM2_QW0W2_VU PPC_BIT32(0) +#define TM2_W2_VALID PPC_BIT32(0) +#define TM2_W2_HW PPC_BIT32(1) +#define TM2_QW0W2_VU TM2_W2_VALID #define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31) -#define TM2_QW1W2_VO PPC_BIT32(0) -#define TM2_QW1W2_HO PPC_BIT32(1) +#define TM2_QW1W2_VO TM2_W2_VALID +#define TM2_QW1W2_HO TM2_W2_HW #define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31) -#define TM2_QW2W2_VP PPC_BIT32(0) -#define TM2_QW2W2_HP PPC_BIT32(1) +#define TM2_QW2W2_VP TM2_W2_VALID +#define TM2_QW2W2_HP TM2_W2_HW #define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31) -#define TM2_QW3W2_VT PPC_BIT32(0) -#define TM2_QW3W2_HT PPC_BIT32(1) +#define TM2_QW3W2_VT TM2_W2_VALID +#define TM2_QW3W2_HT TM2_W2_HW #define TM2_QW3W2_LP PPC_BIT32(6) #define TM2_QW3W2_LE PPC_BIT32(7) @@ -151,6 +153,7 @@ typedef struct Xive2Nvp { #define NVP2_W0_VALID PPC_BIT32(0) #define NVP2_W0_HW PPC_BIT32(7) #define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */ +#define NVP2_W0_PGOFIRST PPC_BITMASK32(26, 31) uint32_t w1; #define NVP2_W1_CO PPC_BIT32(13) #define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15) @@ -171,7 +174,9 @@ typedef struct Xive2Nvp { #define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7) #define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31) uint32_t w6; +#define NVP2_W6_REPORTING_LINE PPC_BITMASK32(4, 31) uint32_t w7; +#define NVP2_W7_REPORTING_LINE PPC_BITMASK32(0, 23) } Xive2Nvp; #define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID) @@ -209,6 +214,7 @@ void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf); typedef struct Xive2Nvgc { uint32_t w0; #define NVGC2_W0_VALID PPC_BIT32(0) +#define NVGC2_W0_PGONEXT PPC_BITMASK32(26, 31) uint32_t w1; uint32_t w2; uint32_t w3; @@ -218,4 +224,9 @@ typedef struct Xive2Nvgc { uint32_t w7; } Xive2Nvgc; +#define xive2_nvgc_is_valid(nvgc) (be32_to_cpu((nvgc)->w0) & NVGC2_W0_VALID) + +void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, + GString *buf); + #endif /* PPC_XIVE2_REGS_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h index b9db7abc2e..326327fc79 100644 --- a/include/hw/ppc/xive_regs.h +++ b/include/hw/ppc/xive_regs.h @@ -77,8 +77,11 @@ #define TM_LSMFB 0x3 /* - + + + */ #define TM_ACK_CNT 0x4 /* - + - - */ #define TM_INC 0x5 /* - + - + */ +#define TM_LGS 0x5 /* + + + + */ /* Rename P10 */ #define TM_AGE 0x6 /* - + - + */ +#define TM_T 0x6 /* - + - + */ /* Rename P10 */ #define TM_PIPR 0x7 /* - + - + */ +#define TM_OGEN 0xF /* - + - - */ /* P10 only */ #define TM_WORD0 0x0 #define TM_WORD1 0x4 @@ -98,6 +101,7 @@ #define TM_QW3W2_LP PPC_BIT32(6) #define TM_QW3W2_LE PPC_BIT32(7) #define TM_QW3W2_T PPC_BIT32(31) +#define TM_QW3B8_VT PPC_BIT8(0) /* * In addition to normal loads to "peek" and writes (only when invalid) @@ -114,23 +118,32 @@ * Then we have all these "special" CI ops at these offset that trigger * all sorts of side effects: */ -#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/ -#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */ +#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg */ +#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */ #define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */ -#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user - * context */ -#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */ -#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS - * context to reg */ -#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool - * context to reg*/ -#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */ -#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd - * line */ -#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */ -#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even - * line */ -#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */ +#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user */ + /* context */ +#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */ +#define TM_SPC_PULL_OS_CTX_G2 0x810 /* Load32/Load64 Pull/Invalidate OS */ + /* context to reg */ +#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS */ + /* context to reg */ +#define TM_SPC_PULL_POOL_CTX_G2 0x820 /* Load32/Load64 Pull/Invalidate Pool */ + /* context to reg */ +#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool */ + /* context to reg */ +#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */ +#define TM_SPC_PULL_PHYS_CTX_G2 0x830 /* Load32 Pull phys ctx to reg */ +#define TM_SPC_PULL_PHYS_CTX 0x838 /* Load8 Pull phys ctx to reg */ +#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd */ + /* line */ +#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */ +#define TM_SPC_PULL_OS_CTX_OL 0xc18 /* Pull/Invalidate OS context to */ + /* odd Thread reporting line */ +#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even */ + /* line */ +#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */ +#define TM_SPC_PULL_PHYS_CTX_OL 0xc38 /* Pull phys ctx to odd cache line */ /* XXX more... */ /* NSR fields for the various QW ack types */ diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index f9fa291cc6..5be9844412 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -95,6 +95,7 @@ typedef void (*DeviceUnrealize)(DeviceState *dev); typedef void (*DeviceReset)(DeviceState *dev); typedef void (*BusRealize)(BusState *bus, Error **errp); typedef void (*BusUnrealize)(BusState *bus); +typedef int (*DeviceSyncConfig)(DeviceState *dev, Error **errp); /** * struct DeviceClass - The base class for all devices. @@ -103,6 +104,9 @@ typedef void (*BusUnrealize)(BusState *bus); * property is changed to %true. * @unrealize: Callback function invoked when the #DeviceState:realized * property is changed to %false. + * @sync_config: Callback function invoked when QMP command device-sync-config + * is called. Should synchronize device configuration from host to guest part + * and notify the guest about the change. * @hotpluggable: indicates if #DeviceClass is hotpluggable, available * as readonly "hotpluggable" property of #DeviceState instance * @@ -162,6 +166,7 @@ struct DeviceClass { DeviceReset legacy_reset; DeviceRealize realize; DeviceUnrealize unrealize; + DeviceSyncConfig sync_config; /** * @vmsd: device state serialisation description for @@ -547,6 +552,7 @@ bool qdev_hotplug_allowed(DeviceState *dev, Error **errp); */ HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev); void qdev_unplug(DeviceState *dev, Error **errp); +int qdev_sync_config(DeviceState *dev, Error **errp); void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); void qdev_machine_creation_done(void); diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index fed499b199..e0ce6ec3a9 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -73,6 +73,9 @@ typedef struct VFIOMigration { uint64_t precopy_init_size; uint64_t precopy_dirty_size; bool initial_data_sent; + + bool event_save_iterate_started; + bool event_precopy_empty_hit; } VFIOMigration; struct VFIOGroup; diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h index 324cd8663a..9a3f238b43 100644 --- a/include/hw/virtio/vhost-user.h +++ b/include/hw/virtio/vhost-user.h @@ -54,6 +54,7 @@ typedef struct VhostUserHostNotifier { void *addr; void *unmap_addr; int idx; + bool destroy; } VhostUserHostNotifier; /** diff --git a/include/hw/virtio/virtio-pci.h b/include/hw/virtio/virtio-pci.h index 9e67ba38c7..971c5fabd4 100644 --- a/include/hw/virtio/virtio-pci.h +++ b/include/hw/virtio/virtio-pci.h @@ -147,6 +147,9 @@ struct VirtIOPCIProxy { }; MemoryRegion modern_bar; MemoryRegion io_bar; + /* address space for VirtIOPCIRegions */ + AddressSpace modern_cfg_mem_as; + AddressSpace modern_cfg_io_as; uint32_t legacy_io_bar_idx; uint32_t msix_bar_idx; uint32_t modern_io_bar_idx; diff --git a/linux-user/arm/nwfpe/fpa11.c b/linux-user/arm/nwfpe/fpa11.c index 9a93610d24..8356beb52c 100644 --- a/linux-user/arm/nwfpe/fpa11.c +++ b/linux-user/arm/nwfpe/fpa11.c @@ -51,6 +51,24 @@ void resetFPA11(void) #ifdef MAINTAIN_FPCR fpa11->fpcr = MASK_RESET; #endif + + /* + * Real FPA11 hardware does not handle NaNs, but always takes an + * exception for them to be software-emulated (ARM7500FE datasheet + * section 10.4). There is no documented architectural requirement + * for NaN propagation rules and it will depend on how the OS + * level software emulation opted to do it. We here use prop_s_ab + * which matches the later VFP hardware choice and how QEMU's + * fpa11 emulation has worked in the past. The real Linux kernel + * does something slightly different: arch/arm/nwfpe/softfloat-specialize + * propagateFloat64NaN() has the curious behaviour that it prefers + * the QNaN over the SNaN, but if both are QNaN it picks A and + * if both are SNaN it picks B. In theory we could add this as + * a NaN propagation rule, but in practice FPA11 emulation is so + * close to totally dead that it's not worth trying to match it at + * this late date. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &fpa11->fp_status); } void SetRoundingMode(const unsigned int opcode) diff --git a/linux-user/gen-vdso-elfn.c.inc b/linux-user/gen-vdso-elfn.c.inc index 95856eb839..b47019e136 100644 --- a/linux-user/gen-vdso-elfn.c.inc +++ b/linux-user/gen-vdso-elfn.c.inc @@ -68,28 +68,45 @@ static void elfN(search_symtab)(ElfN(Shdr) *shdr, unsigned sym_idx, void *buf, bool need_bswap) { unsigned str_idx = shdr[sym_idx].sh_link; - ElfN(Sym) *sym = buf + shdr[sym_idx].sh_offset; - unsigned sym_n = shdr[sym_idx].sh_size / sizeof(*sym); + ElfN(Sym) *target_sym = buf + shdr[sym_idx].sh_offset; + unsigned sym_n = shdr[sym_idx].sh_size / sizeof(*target_sym); const char *str = buf + shdr[str_idx].sh_offset; for (unsigned i = 0; i < sym_n; ++i) { const char *name; + ElfN(Sym) sym; + memcpy(&sym, &target_sym[i], sizeof(sym)); if (need_bswap) { - elfN(bswap_sym)(sym + i); + elfN(bswap_sym)(&sym); } - name = str + sym[i].st_name; + name = str + sym.st_name; if (sigreturn_sym && strcmp(sigreturn_sym, name) == 0) { - sigreturn_addr = sym[i].st_value; + sigreturn_addr = sym.st_value; } if (rt_sigreturn_sym && strcmp(rt_sigreturn_sym, name) == 0) { - rt_sigreturn_addr = sym[i].st_value; + rt_sigreturn_addr = sym.st_value; } } } -static void elfN(process)(FILE *outf, void *buf, bool need_bswap) +static void elfN(bswap_ps_hdrs)(ElfN(Ehdr) *ehdr) +{ + ElfN(Phdr) *phdr = (void *)ehdr + ehdr->e_phoff; + ElfN(Shdr) *shdr = (void *)ehdr + ehdr->e_shoff; + ElfN(Half) i; + + for (i = 0; i < ehdr->e_phnum; ++i) { + elfN(bswap_phdr)(&phdr[i]); + } + + for (i = 0; i < ehdr->e_shnum; ++i) { + elfN(bswap_shdr)(&shdr[i]); + } +} + +static void elfN(process)(FILE *outf, void *buf, long len, bool need_bswap) { ElfN(Ehdr) *ehdr = buf; ElfN(Phdr) *phdr; @@ -103,24 +120,14 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) int errors = 0; if (need_bswap) { - elfN(bswap_ehdr)(ehdr); + elfN(bswap_ehdr)(buf); + elfN(bswap_ps_hdrs)(buf); } phnum = ehdr->e_phnum; phdr = buf + ehdr->e_phoff; - if (need_bswap) { - for (unsigned i = 0; i < phnum; ++i) { - elfN(bswap_phdr)(phdr + i); - } - } - shnum = ehdr->e_shnum; shdr = buf + ehdr->e_shoff; - if (need_bswap) { - for (unsigned i = 0; i < shnum; ++i) { - elfN(bswap_shdr)(shdr + i); - } - } for (unsigned i = 0; i < shnum; ++i) { switch (shdr[i].sh_type) { case SHT_SYMTAB: @@ -154,7 +161,24 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) fprintf(stderr, "LOAD segment not loaded at address 0\n"); errors++; } - first_segsz = phdr[i].p_filesz; + /* + * Extend the program header to cover the entire VDSO, so that + * load_elf_vdso() loads everything, including section headers. + * + * Require that there is no .bss, since it would break this + * approach. + */ + if (phdr[i].p_filesz != phdr[i].p_memsz) { + fprintf(stderr, "LOAD segment's filesz and memsz differ\n"); + errors++; + } + if (phdr[i].p_filesz > len) { + fprintf(stderr, "LOAD segment is larger than the whole VDSO\n"); + errors++; + } + phdr[i].p_filesz = len; + phdr[i].p_memsz = len; + first_segsz = len; if (first_segsz < ehdr->e_phoff + phnum * sizeof(*phdr)) { fprintf(stderr, "LOAD segment does not cover PHDRs\n"); errors++; @@ -197,17 +221,24 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) output_reloc(outf, buf, &phdr[i].p_paddr); } + /* Relocate the section headers. */ + for (unsigned i = 0; i < shnum; ++i) { + output_reloc(outf, buf, &shdr[i].sh_addr); + } + /* Relocate the DYNAMIC entries. */ if (dynamic_addr) { - ElfN(Dyn) *dyn = buf + dynamic_ofs; - __typeof(dyn->d_tag) tag; + ElfN(Dyn) *target_dyn = buf + dynamic_ofs; + __typeof(((ElfN(Dyn) *)target_dyn)->d_tag) tag; do { + ElfN(Dyn) dyn; + memcpy(&dyn, target_dyn, sizeof(dyn)); if (need_bswap) { - elfN(bswap_dyn)(dyn); + elfN(bswap_dyn)(&dyn); } - tag = dyn->d_tag; + tag = dyn.d_tag; switch (tag) { case DT_HASH: @@ -218,7 +249,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) case DT_PLTGOT: case DT_ADDRRNGLO ... DT_ADDRRNGHI: /* These entries store an address in the entry. */ - output_reloc(outf, buf, &dyn->d_un.d_val); + output_reloc(outf, buf, &target_dyn->d_un.d_val); break; case DT_NULL: @@ -235,7 +266,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) break; case DT_SYMENT: - if (dyn->d_un.d_val != sizeof(ElfN(Sym))) { + if (dyn.d_un.d_val != sizeof(ElfN(Sym))) { fprintf(stderr, "VDSO has incorrect dynamic symbol size\n"); errors++; } @@ -251,7 +282,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) * ??? The RISC-V toolchain will emit these even when there * are no relocations. Validate zeros. */ - if (dyn->d_un.d_val != 0) { + if (dyn.d_un.d_val != 0) { fprintf(stderr, "VDSO has dynamic relocations\n"); errors++; } @@ -287,7 +318,7 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) errors++; break; } - dyn++; + target_dyn++; } while (tag != DT_NULL); if (errors) { exit(EXIT_FAILURE); @@ -296,11 +327,11 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) /* Relocate the dynamic symbol table. */ if (dynsym_idx) { - ElfN(Sym) *sym = buf + shdr[dynsym_idx].sh_offset; - unsigned sym_n = shdr[dynsym_idx].sh_size / sizeof(*sym); + ElfN(Sym) *target_sym = buf + shdr[dynsym_idx].sh_offset; + unsigned sym_n = shdr[dynsym_idx].sh_size / sizeof(*target_sym); for (unsigned i = 0; i < sym_n; ++i) { - output_reloc(outf, buf, &sym[i].st_value); + output_reloc(outf, buf, &target_sym[i].st_value); } } @@ -311,4 +342,9 @@ static void elfN(process)(FILE *outf, void *buf, bool need_bswap) if (symtab_idx) { elfN(search_symtab)(shdr, symtab_idx, buf, need_bswap); } + + if (need_bswap) { + elfN(bswap_ps_hdrs)(buf); + elfN(bswap_ehdr)(buf); + } } diff --git a/linux-user/gen-vdso.c b/linux-user/gen-vdso.c index 31e333be80..721f38d5a3 100644 --- a/linux-user/gen-vdso.c +++ b/linux-user/gen-vdso.c @@ -132,23 +132,6 @@ int main(int argc, char **argv) fclose(inf); /* - * Write out the vdso image now, before we make local changes. - */ - - fprintf(outf, - "/* Automatically generated from linux-user/gen-vdso.c. */\n" - "\n" - "static const uint8_t %s_image[] = {", - prefix); - for (long i = 0; i < total_len; ++i) { - if (i % 12 == 0) { - fputs("\n ", outf); - } - fprintf(outf, " 0x%02x,", buf[i]); - } - fprintf(outf, "\n};\n\n"); - - /* * Identify which elf flavor we're processing. * The first 16 bytes of the file are e_ident. */ @@ -179,14 +162,17 @@ int main(int argc, char **argv) * Output relocation addresses as we go. */ - fprintf(outf, "static const unsigned %s_relocs[] = {\n", prefix); + fprintf(outf, + "/* Automatically generated by linux-user/gen-vdso.c. */\n" + "\n" + "static const unsigned %s_relocs[] = {\n", prefix); switch (buf[EI_CLASS]) { case ELFCLASS32: - elf32_process(outf, buf, need_bswap); + elf32_process(outf, buf, total_len, need_bswap); break; case ELFCLASS64: - elf64_process(outf, buf, need_bswap); + elf64_process(outf, buf, total_len, need_bswap); break; default: fprintf(stderr, "%s: invalid elf EI_CLASS (%u)\n", @@ -196,6 +182,20 @@ int main(int argc, char **argv) fprintf(outf, "};\n\n"); /* end vdso_relocs. */ + /* + * Write out the vdso image now, after we made local changes. + */ + fprintf(outf, + "static const uint8_t %s_image[] = {", + prefix); + for (long i = 0; i < total_len; ++i) { + if (i % 12 == 0) { + fputs("\n ", outf); + } + fprintf(outf, " 0x%02x,", buf[i]); + } + fprintf(outf, "\n};\n\n"); + fprintf(outf, "static const VdsoImageInfo %s_image_info = {\n", prefix); fprintf(outf, " .image = %s_image,\n", prefix); fprintf(outf, " .relocs = %s_relocs,\n", prefix); diff --git a/linux-user/main.c b/linux-user/main.c index 8143a0d4b0..b09af8d436 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -412,6 +412,13 @@ static void handle_arg_reserved_va(const char *arg) reserved_va = val ? val - 1 : 0; } +static const char *rtsig_map = CONFIG_QEMU_RTSIG_MAP; + +static void handle_arg_rtsig_map(const char *arg) +{ + rtsig_map = arg; +} + static void handle_arg_one_insn_per_tb(const char *arg) { opt_one_insn_per_tb = true; @@ -494,6 +501,9 @@ static const struct qemu_argument arg_table[] = { "address", "set guest_base address to 'address'"}, {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va, "size", "reserve 'size' bytes for guest virtual address space"}, + {"t", "QEMU_RTSIG_MAP", true, handle_arg_rtsig_map, + "tsig hsig n[,...]", + "map target rt signals [tsig,tsig+n) to [hsig,hsig+n]"}, {"d", "QEMU_LOG", true, handle_arg_log, "item[,...]", "enable logging of specified items " "(use '-d help' for a list of items)"}, @@ -1002,7 +1012,7 @@ int main(int argc, char **argv, char **envp) target_set_brk(info->brk); syscall_init(); - signal_init(); + signal_init(rtsig_map); /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay generating the prologue until now so that the prologue can take diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h index f4cbe6185e..8584d9ecc2 100644 --- a/linux-user/signal-common.h +++ b/linux-user/signal-common.h @@ -56,7 +56,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUArchState *env); void process_pending_signals(CPUArchState *cpu_env); -void signal_init(void); +void signal_init(const char *rtsig_map); void queue_signal(CPUArchState *env, int sig, int si_type, target_siginfo_t *info); void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info); diff --git a/linux-user/signal.c b/linux-user/signal.c index 63ac2df53b..9b6d772882 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu/bitops.h" +#include "qemu/cutils.h" #include "gdbstub/user.h" #include "exec/page-protection.h" #include "hw/core/tcg-cpu-ops.h" @@ -513,20 +514,81 @@ static int core_dump_signal(int sig) } } -static void signal_table_init(void) +static void signal_table_init(const char *rtsig_map) { int hsig, tsig, count; + if (rtsig_map) { + /* + * Map host RT signals to target RT signals according to the + * user-provided specification. + */ + const char *s = rtsig_map; + + while (true) { + int i; + + if (qemu_strtoi(s, &s, 10, &tsig) || *s++ != ' ') { + fprintf(stderr, "Malformed target signal in QEMU_RTSIG_MAP\n"); + exit(EXIT_FAILURE); + } + if (qemu_strtoi(s, &s, 10, &hsig) || *s++ != ' ') { + fprintf(stderr, "Malformed host signal in QEMU_RTSIG_MAP\n"); + exit(EXIT_FAILURE); + } + if (qemu_strtoi(s, &s, 10, &count) || (*s && *s != ',')) { + fprintf(stderr, "Malformed signal count in QEMU_RTSIG_MAP\n"); + exit(EXIT_FAILURE); + } + + for (i = 0; i < count; i++, tsig++, hsig++) { + if (tsig < TARGET_SIGRTMIN || tsig > TARGET_NSIG) { + fprintf(stderr, "%d is not a target rt signal\n", tsig); + exit(EXIT_FAILURE); + } + if (hsig < SIGRTMIN || hsig > SIGRTMAX) { + fprintf(stderr, "%d is not a host rt signal\n", hsig); + exit(EXIT_FAILURE); + } + if (host_to_target_signal_table[hsig]) { + fprintf(stderr, "%d already maps %d\n", + hsig, host_to_target_signal_table[hsig]); + exit(EXIT_FAILURE); + } + host_to_target_signal_table[hsig] = tsig; + } + + if (*s) { + s++; + } else { + break; + } + } + } else { + /* + * Default host-to-target RT signal mapping. + * + * Signals are supported starting from TARGET_SIGRTMIN and going up + * until we run out of host realtime signals. Glibc uses the lower 2 + * RT signals and (hopefully) nobody uses the upper ones. + * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32). + * To fix this properly we would need to do manual signal delivery + * multiplexed over a single host signal. + * Attempts for configure "missing" signals via sigaction will be + * silently ignored. + * + * Reserve one signal for internal usage (see below). + */ + + hsig = SIGRTMIN + 1; + for (tsig = TARGET_SIGRTMIN; + hsig <= SIGRTMAX && tsig <= TARGET_NSIG; + hsig++, tsig++) { + host_to_target_signal_table[hsig] = tsig; + } + } + /* - * Signals are supported starting from TARGET_SIGRTMIN and going up - * until we run out of host realtime signals. Glibc uses the lower 2 - * RT signals and (hopefully) nobody uses the upper ones. - * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32). - * To fix this properly we would need to do manual signal delivery - * multiplexed over a single host signal. - * Attempts for configure "missing" signals via sigaction will be - * silently ignored. - * * Remap the target SIGABRT, so that we can distinguish host abort * from guest abort. When the guest registers a signal handler or * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest @@ -536,21 +598,27 @@ static void signal_table_init(void) * parent sees the correct mapping from wait status. */ - hsig = SIGRTMIN; host_to_target_signal_table[SIGABRT] = 0; - host_to_target_signal_table[hsig++] = TARGET_SIGABRT; - - for (tsig = TARGET_SIGRTMIN; - hsig <= SIGRTMAX && tsig <= TARGET_NSIG; - hsig++, tsig++) { - host_to_target_signal_table[hsig] = tsig; + for (hsig = SIGRTMIN; hsig <= SIGRTMAX; hsig++) { + if (!host_to_target_signal_table[hsig]) { + host_to_target_signal_table[hsig] = TARGET_SIGABRT; + break; + } + } + if (hsig > SIGRTMAX) { + fprintf(stderr, "No rt signals left for SIGABRT mapping\n"); + exit(EXIT_FAILURE); } /* Invert the mapping that has already been assigned. */ for (hsig = 1; hsig < _NSIG; hsig++) { tsig = host_to_target_signal_table[hsig]; if (tsig) { - assert(target_to_host_signal_table[tsig] == 0); + if (target_to_host_signal_table[tsig]) { + fprintf(stderr, "%d is already mapped to %d\n", + tsig, target_to_host_signal_table[tsig]); + exit(EXIT_FAILURE); + } target_to_host_signal_table[tsig] = hsig; } } @@ -573,13 +641,13 @@ static void signal_table_init(void) trace_signal_table_init(count); } -void signal_init(void) +void signal_init(const char *rtsig_map) { TaskState *ts = get_task_state(thread_cpu); struct sigaction act, oact; /* initialize signal conversion tables */ - signal_table_init(); + signal_table_init(rtsig_map); /* Set the signal mask from the host mask. */ sigprocmask(0, 0, &ts->signal_mask); diff --git a/meson.build b/meson.build index dfa37e0caa..e0b880e4e1 100644 --- a/meson.build +++ b/meson.build @@ -1785,6 +1785,7 @@ gcrypt = not_found nettle = not_found hogweed = not_found crypto_sm4 = not_found +crypto_sm3 = not_found xts = 'none' if get_option('nettle').enabled() and get_option('gcrypt').enabled() @@ -1820,6 +1821,17 @@ if not gnutls_crypto.found() }''', dependencies: gcrypt) crypto_sm4 = not_found endif + crypto_sm3 = gcrypt + # SM3 ALG is available in libgcrypt >= 1.9 + if gcrypt.found() and not cc.links(''' + #include <gcrypt.h> + int main(void) { + gcry_md_hd_t handler; + gcry_md_open(&handler, GCRY_MD_SM3, 0); + return 0; + }''', dependencies: gcrypt) + crypto_sm3 = not_found + endif endif if (not get_option('nettle').auto() or have_system) and not gcrypt.found() nettle = dependency('nettle', version: '>=3.4', @@ -1840,6 +1852,31 @@ if not gnutls_crypto.found() }''', dependencies: nettle) crypto_sm4 = not_found endif + crypto_sm3 = nettle + # SM3 ALG is available in nettle >= 3.8 + if nettle.found() and not cc.links(''' + #include <nettle/sm3.h> + #include <nettle/hmac.h> + int main(void) { + struct sm3_ctx ctx; + struct hmac_sm3_ctx hmac_ctx; + unsigned char data[64] = {0}; + unsigned char output[32]; + + // SM3 hash function test + sm3_init(&ctx); + sm3_update(&ctx, 64, data); + sm3_digest(&ctx, 32, data); + + // HMAC-SM3 test + hmac_sm3_set_key(&hmac_ctx, 32, data); + hmac_sm3_update(&hmac_ctx, 64, data); + hmac_sm3_digest(&hmac_ctx, 32, output); + + return 0; + }''', dependencies: nettle) + crypto_sm3 = not_found + endif endif endif @@ -2487,6 +2524,7 @@ config_host_data.set('CONFIG_TASN1', tasn1.found()) config_host_data.set('CONFIG_GCRYPT', gcrypt.found()) config_host_data.set('CONFIG_NETTLE', nettle.found()) config_host_data.set('CONFIG_CRYPTO_SM4', crypto_sm4.found()) +config_host_data.set('CONFIG_CRYPTO_SM3', crypto_sm3.found()) config_host_data.set('CONFIG_HOGWEED', hogweed.found()) config_host_data.set('CONFIG_QEMU_PRIVATE_XTS', xts == 'private') config_host_data.set('CONFIG_MALLOC_TRIM', has_malloc_trim) @@ -3203,7 +3241,8 @@ foreach target : target_dirs config_target += { 'CONFIG_USER_ONLY': 'y', 'CONFIG_QEMU_INTERP_PREFIX': - get_option('interp_prefix').replace('%M', config_target['TARGET_NAME']) + get_option('interp_prefix').replace('%M', config_target['TARGET_NAME']), + 'CONFIG_QEMU_RTSIG_MAP': get_option('rtsig_map'), } endif @@ -3538,6 +3577,7 @@ if have_system 'hw/s390x', 'hw/scsi', 'hw/sd', + 'hw/sensor', 'hw/sh4', 'hw/sparc', 'hw/sparc64', @@ -3744,6 +3784,10 @@ subdir('accel') subdir('plugins') subdir('ebpf') +if 'CONFIG_TCG' in config_all_accel + subdir('contrib/plugins') +endif + common_user_inc = [] subdir('common-user') @@ -4651,6 +4695,7 @@ if nettle.found() summary_info += {' XTS': xts != 'private'} endif summary_info += {'SM4 ALG support': crypto_sm4} +summary_info += {'SM3 ALG support': crypto_sm3} summary_info += {'AF_ALG support': have_afalg} summary_info += {'rng-none': get_option('rng_none')} summary_info += {'Linux keyring': have_keyring} diff --git a/meson_options.txt b/meson_options.txt index 502efe605b..5eeaf3eee5 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -27,6 +27,8 @@ option('block_drv_ro_whitelist', type : 'string', value : '', description: 'set block driver read-only whitelist (by default affects only QEMU, not tools like qemu-img)') option('interp_prefix', type : 'string', value : '/usr/gnemul/qemu-%M', description: 'where to find shared libraries etc., use %M for cpu name') +option('rtsig_map', type : 'string', value : 'NULL', + description: 'default value of QEMU_RTSIG_MAP') option('fuzzing_engine', type : 'string', value : '', description: 'fuzzing engine library for OSS-Fuzz') option('trace_file', type: 'string', value: 'trace', diff --git a/migration/savevm.c b/migration/savevm.c index e796436979..f4e4876f72 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -860,25 +860,6 @@ static void vmstate_check(const VMStateDescription *vmsd) } } -/* - * See comment in hw/intc/xics.c:icp_realize() - * - * This function can be removed when - * pre_2_10_vmstate_register_dummy_icp() is removed. - */ -int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id, - const VMStateDescription *vmsd, - void *opaque) -{ - SaveStateEntry *se = find_se(vmsd->name, instance_id); - - if (se) { - savevm_state_handler_remove(se); - g_free(se->compat); - g_free(se); - } - return vmstate_register(obj, instance_id, vmsd, opaque); -} int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, const VMStateDescription *vmsd, diff --git a/qapi/crypto.json b/qapi/crypto.json index 9431522768..c9d967d782 100644 --- a/qapi/crypto.json +++ b/qapi/crypto.json @@ -55,11 +55,12 @@ # @sha512: SHA-512. (since 2.7) # # @ripemd160: RIPEMD-160. (since 2.7) +# @sm3: SM3. (since 9.2.0) # # Since: 2.6 ## { 'enum': 'QCryptoHashAlgo', - 'data': ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'ripemd160']} + 'data': ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'ripemd160', 'sm3']} ## # @QCryptoCipherAlgo: @@ -419,11 +420,6 @@ # # Properties for objects of classes derived from secret-common. # -# @loaded: if true, the secret is loaded immediately when applying -# this option and will probably fail when processing the next -# option. Don't use; only provided for compatibility. -# (default: false) -# # @format: the data format that the secret is provided in # (default: raw) # @@ -436,16 +432,10 @@ # 16-byte IV. Mandatory if @keyid is given. Ignored if @keyid is # absent. # -# Features: -# -# @deprecated: Member @loaded is deprecated. Setting true doesn't -# make sense, and false is already the default. -# # Since: 2.6 ## { 'struct': 'SecretCommonProperties', - 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] }, - '*format': 'QCryptoSecretFormat', + 'data': { '*format': 'QCryptoSecretFormat', '*keyid': 'str', '*iv': 'str' } } @@ -512,58 +502,32 @@ # # Properties for tls-creds-anon objects. # -# @loaded: if true, the credentials are loaded immediately when -# applying this option and will ignore options that are processed -# later. Don't use; only provided for compatibility. -# (default: false) -# -# Features: -# -# @deprecated: Member @loaded is deprecated. Setting true doesn't -# make sense, and false is already the default. -# # Since: 2.5 ## { 'struct': 'TlsCredsAnonProperties', 'base': 'TlsCredsProperties', - 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] } } } + 'data': { } } ## # @TlsCredsPskProperties: # # Properties for tls-creds-psk objects. # -# @loaded: if true, the credentials are loaded immediately when -# applying this option and will ignore options that are processed -# later. Don't use; only provided for compatibility. -# (default: false) -# # @username: the username which will be sent to the server. For # clients only. If absent, "qemu" is sent and the property will # read back as an empty string. # -# Features: -# -# @deprecated: Member @loaded is deprecated. Setting true doesn't -# make sense, and false is already the default. -# # Since: 3.0 ## { 'struct': 'TlsCredsPskProperties', 'base': 'TlsCredsProperties', - 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] }, - '*username': 'str' } } + 'data': { '*username': 'str' } } ## # @TlsCredsX509Properties: # # Properties for tls-creds-x509 objects. # -# @loaded: if true, the credentials are loaded immediately when -# applying this option and will ignore options that are processed -# later. Don't use; only provided for compatibility. -# (default: false) -# # @sanity-check: if true, perform some sanity checks before using the # credentials (default: true) # @@ -573,17 +537,11 @@ # provides the ID of a previously created secret object containing # the password for decryption. # -# Features: -# -# @deprecated: Member @loaded is deprecated. Setting true doesn't -# make sense, and false is already the default. -# # Since: 2.5 ## { 'struct': 'TlsCredsX509Properties', 'base': 'TlsCredsProperties', - 'data': { '*loaded': { 'type': 'bool', 'features': ['deprecated'] }, - '*sanity-check': 'bool', + 'data': { '*sanity-check': 'bool', '*passwordid': 'str' } } ## # @QCryptoAkCipherAlgo: diff --git a/qapi/machine-common.json b/qapi/machine-common.json index b64e4895cf..298e51f373 100644 --- a/qapi/machine-common.json +++ b/qapi/machine-common.json @@ -5,7 +5,7 @@ # See the COPYING file in the top-level directory. ## -# = Machines S390 data types +# = Common machine types ## ## @@ -18,3 +18,95 @@ ## { 'enum': 'S390CpuEntitlement', 'data': [ 'auto', 'low', 'medium', 'high' ] } + +## +# @CpuTopologyLevel: +# +# An enumeration of CPU topology levels. +# +# @thread: thread level, which would also be called SMT level or +# logical processor level. The @threads option in +# SMPConfiguration is used to configure the topology of this +# level. +# +# @core: core level. The @cores option in SMPConfiguration is used +# to configure the topology of this level. +# +# @module: module level. The @modules option in SMPConfiguration is +# used to configure the topology of this level. +# +# @cluster: cluster level. The @clusters option in SMPConfiguration +# is used to configure the topology of this level. +# +# @die: die level. The @dies option in SMPConfiguration is used to +# configure the topology of this level. +# +# @socket: socket level, which would also be called package level. +# The @sockets option in SMPConfiguration is used to configure +# the topology of this level. +# +# @book: book level. The @books option in SMPConfiguration is used +# to configure the topology of this level. +# +# @drawer: drawer level. The @drawers option in SMPConfiguration is +# used to configure the topology of this level. +# +# @default: default level. Some architectures will have default +# topology settings (e.g., cache topology), and this special +# level means following the architecture-specific settings. +# +# Since: 9.2 +## +{ 'enum': 'CpuTopologyLevel', + 'data': [ 'thread', 'core', 'module', 'cluster', 'die', + 'socket', 'book', 'drawer', 'default' ] } + +## +# @CacheLevelAndType: +# +# Caches a system may have. The enumeration value here is the +# combination of cache level and cache type. +# +# @l1d: L1 data cache. +# +# @l1i: L1 instruction cache. +# +# @l2: L2 (unified) cache. +# +# @l3: L3 (unified) cache +# +# Since: 9.2 +## +{ 'enum': 'CacheLevelAndType', + 'data': [ 'l1d', 'l1i', 'l2', 'l3' ] } + +## +# @SmpCacheProperties: +# +# Cache information for SMP system. +# +# @cache: Cache name, which is the combination of cache level +# and cache type. +# +# @topology: Cache topology level. It accepts the CPU topology +# enumeration as the parameter, i.e., CPUs in the same +# topology container share the same cache. +# +# Since: 9.2 +## +{ 'struct': 'SmpCacheProperties', + 'data': { + 'cache': 'CacheLevelAndType', + 'topology': 'CpuTopologyLevel' } } + +## +# @SmpCachePropertiesWrapper: +# +# List wrapper of SmpCacheProperties. +# +# @caches: the list of SmpCacheProperties. +# +# Since 9.2 +## +{ 'struct': 'SmpCachePropertiesWrapper', + 'data': { 'caches': ['SmpCacheProperties'] } } diff --git a/qapi/qdev.json b/qapi/qdev.json index 53d147c7b4..2a581129c9 100644 --- a/qapi/qdev.json +++ b/qapi/qdev.json @@ -163,3 +163,27 @@ ## { 'event': 'DEVICE_UNPLUG_GUEST_ERROR', 'data': { '*device': 'str', 'path': 'str' } } + +## +# @device-sync-config: +# +# Synchronize device configuration from host to guest part. First, +# copy the configuration from the host part (backend) to the guest +# part (frontend). Then notify guest software that device +# configuration changed. +# +# The command may be used to notify the guest about block device +# capcity change. Currently only vhost-user-blk device supports +# this. +# +# @id: the device's ID or QOM path +# +# Features: +# +# @unstable: The command is experimental. +# +# Since: 9.1 +## +{ 'command': 'device-sync-config', + 'features': [ 'unstable' ], + 'data': {'id': 'str'} } diff --git a/qapi/qom.json b/qapi/qom.json index 321ccd708a..a8beeabf1f 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -845,6 +845,45 @@ 'node': 'uint32' } } ## +# @AcpiGenericPortProperties: +# +# Properties for acpi-generic-port objects. +# +# @pci-bus: QOM path of the PCI bus of the hostbridge associated with +# this SRAT Generic Port Affinity Structure. This is the same as +# the bus parameter for the root ports attached to this host +# bridge. The resulting SRAT Generic Port Affinity Structure will +# refer to the ACPI object in DSDT that represents the host bridge +# (e.g. ACPI0016 for CXL host bridges). See ACPI 6.5 Section +# 5.2.16.7 for more information. +# +# @node: Similar to a NUMA node ID, but instead of providing a +# reference point used for defining NUMA distances and access +# characteristics to memory or from an initiator (e.g. CPU), this +# node defines the boundary point between non-discoverable system +# buses which must be described by firmware, and a discoverable +# bus. NUMA distances and access characteristics are defined to +# and from that point. For system software to establish full +# initiator to target characteristics this information must be +# combined with information retrieved from the discoverable part +# of the path. An example would use CDAT (see UEFI.org) +# information read from devices and switches in conjunction with +# link characteristics read from PCIe Configuration space. +# To get the full path latency from CPU to CXL attached DRAM +# CXL device: Add the latency from CPU to Generic Port (from +# HMAT indexed via the the node ID in this SRAT structure) to +# that for CXL bus links, the latency across intermediate switches +# and from the EP port to the actual memory. Bandwidth is more +# complex as there may be interleaving across multiple devices +# and shared links in the path. +# +# Since: 9.1 +## +{ 'struct': 'AcpiGenericPortProperties', + 'data': { 'pci-bus': 'str', + 'node': 'uint32' } } + +## # @RngProperties: # # Properties for objects of classes derived from rng. @@ -1043,6 +1082,7 @@ { 'enum': 'ObjectType', 'data': [ 'acpi-generic-initiator', + 'acpi-generic-port', 'authz-list', 'authz-listfile', 'authz-pam', @@ -1118,6 +1158,7 @@ 'discriminator': 'qom-type', 'data': { 'acpi-generic-initiator': 'AcpiGenericInitiatorProperties', + 'acpi-generic-port': 'AcpiGenericPortProperties', 'authz-list': 'AuthZListProperties', 'authz-listfile': 'AuthZListFileProperties', 'authz-pam': 'AuthZPAMProperties', diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 389c5eeb5d..636307bedf 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -84,7 +84,7 @@ static ssize_t ga_pipe_read_str(int fd[2], char **str) *str = g_realloc(*str, len + n + 1); memcpy(*str + len, buf, n); len += n; - *str[len] = '\0'; + (*str)[len] = '\0'; } close(fd[0]); fd[0] = -1; diff --git a/qga/commands-windows-ssh.c b/qga/commands-windows-ssh.c index 6a642e3ba8..df45c17b75 100644 --- a/qga/commands-windows-ssh.c +++ b/qga/commands-windows-ssh.c @@ -377,7 +377,7 @@ error: static bool set_file_permissions(PWindowsUserInfo userInfo, Error **errp) { PACL pACL = NULL; - PSID userPSID; + PSID userPSID = NULL; /* Creates the access control structure */ if (!create_acl(userInfo, &pACL, errp)) { diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index 84944133f7..5cea5bcf74 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -39,7 +39,7 @@ const GUID CLSID_WbemLocator = { 0x4590f811, 0x1d3a, 0x11d0, const GUID IID_IWbemLocator = { 0xdc12a687, 0x737f, 0x11cf, {0x88, 0x4d, 0x00, 0xaa, 0x00, 0x4b, 0x2e, 0x24} }; -void errmsg(DWORD err, const char *text) +static void errmsg(DWORD err, const char *text) { /* * `text' contains function call statement when errmsg is called via chk(). @@ -242,6 +242,7 @@ out: } /* Unregister this module from COM+ Applications Catalog */ +STDAPI COMUnregister(void); STDAPI COMUnregister(void) { qga_debug_begin; @@ -256,6 +257,7 @@ out: } /* Register this module to COM+ Applications Catalog */ +STDAPI COMRegister(void); STDAPI COMRegister(void) { qga_debug_begin; @@ -380,11 +382,13 @@ out: return hr; } +STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int); STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int) { COMRegister(); } +STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int); STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int) { COMUnregister(); diff --git a/qga/vss-win32/provider.cpp b/qga/vss-win32/provider.cpp index cc72e5ef1b..a102a23fbf 100644 --- a/qga/vss-win32/provider.cpp +++ b/qga/vss-win32/provider.cpp @@ -45,7 +45,7 @@ const IID IID_IVssEnumObject = { 0xAE1C7110, 0x2F60, 0x11d3, {0x8A, 0x39, 0x00, 0xC0, 0x4F, 0x72, 0xD8, 0xE3} }; -void LockModule(BOOL lock) +static void LockModule(BOOL lock) { if (lock) { InterlockedIncrement(&g_nComObjsInUse); @@ -528,6 +528,9 @@ STDAPI DllCanUnloadNow() } EXTERN_C +BOOL WINAPI DllMain(HINSTANCE hinstDll, DWORD dwReason, LPVOID lpReserved); + +EXTERN_C BOOL WINAPI DllMain(HINSTANCE hinstDll, DWORD dwReason, LPVOID lpReserved) { qga_debug("begin, reason = %lu", dwReason); diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp index 9884c65e70..4401d55e3a 100644 --- a/qga/vss-win32/requester.cpp +++ b/qga/vss-win32/requester.cpp @@ -254,8 +254,8 @@ out: qga_debug_end; } -DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName, - DWORD defaultData) +static DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName, + DWORD defaultData) { qga_debug_begin; @@ -272,12 +272,12 @@ DWORD get_reg_dword_value(HKEY baseKey, LPCSTR subKey, LPCSTR valueName, return dwordData; } -bool is_valid_vss_backup_type(VSS_BACKUP_TYPE vssBT) +static bool is_valid_vss_backup_type(VSS_BACKUP_TYPE vssBT) { return (vssBT > VSS_BT_UNDEFINED && vssBT < VSS_BT_OTHER); } -VSS_BACKUP_TYPE get_vss_backup_type( +static VSS_BACKUP_TYPE get_vss_backup_type( VSS_BACKUP_TYPE defaultVssBT = DEFAULT_VSS_BACKUP_TYPE) { qga_debug_begin; diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index f9693d226b..a8066aab03 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -74,6 +74,7 @@ meson_options_help() { printf "%s\n" ' "manufacturer" name for qemu-ga registry entries' printf "%s\n" ' [QEMU]' printf "%s\n" ' --qemu-ga-version=VALUE version number for qemu-ga installer' + printf "%s\n" ' --rtsig-map=VALUE default value of QEMU_RTSIG_MAP [NULL]' printf "%s\n" ' --smbd=VALUE Path to smbd for slirp networking' printf "%s\n" ' --sysconfdir=VALUE Sysconf data directory [etc]' printf "%s\n" ' --tls-priority=VALUE Default TLS protocol/cipher priority string' @@ -462,6 +463,7 @@ _meson_option_parse() { --disable-replication) printf "%s" -Dreplication=disabled ;; --enable-rng-none) printf "%s" -Drng_none=true ;; --disable-rng-none) printf "%s" -Drng_none=false ;; + --rtsig-map=*) quote_sh "-Drtsig_map=$2" ;; --enable-rust) printf "%s" -Drust=enabled ;; --disable-rust) printf "%s" -Drust=disabled ;; --enable-rutabaga-gfx) printf "%s" -Drutabaga_gfx=enabled ;; diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c index 21c6d16fb4..4c09b38ffb 100644 --- a/system/qdev-monitor.c +++ b/system/qdev-monitor.c @@ -23,6 +23,7 @@ #include "monitor/monitor.h" #include "monitor/qdev.h" #include "sysemu/arch_init.h" +#include "sysemu/runstate.h" #include "qapi/error.h" #include "qapi/qapi-commands-qdev.h" #include "qapi/qmp/dispatch.h" @@ -885,13 +886,20 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp) object_unref(OBJECT(dev)); } -static DeviceState *find_device_state(const char *id, Error **errp) +/* + * Note that creating new APIs using error classes other than GenericError is + * not recommended. Set use_generic_error=true for new interfaces. + */ +static DeviceState *find_device_state(const char *id, bool use_generic_error, + Error **errp) { Object *obj = object_resolve_path_at(qdev_get_peripheral(), id); DeviceState *dev; if (!obj) { - error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, + error_set(errp, + (use_generic_error ? + ERROR_CLASS_GENERIC_ERROR : ERROR_CLASS_DEVICE_NOT_FOUND), "Device '%s' not found", id); return NULL; } @@ -956,7 +964,7 @@ void qdev_unplug(DeviceState *dev, Error **errp) void qmp_device_del(const char *id, Error **errp) { - DeviceState *dev = find_device_state(id, errp); + DeviceState *dev = find_device_state(id, false, errp); if (dev != NULL) { if (dev->pending_deleted_event && (dev->pending_deleted_expires_ms == 0 || @@ -970,6 +978,43 @@ void qmp_device_del(const char *id, Error **errp) } } +int qdev_sync_config(DeviceState *dev, Error **errp) +{ + DeviceClass *dc = DEVICE_GET_CLASS(dev); + + if (!dc->sync_config) { + error_setg(errp, "device-sync-config is not supported for '%s'", + object_get_typename(OBJECT(dev))); + return -ENOTSUP; + } + + return dc->sync_config(dev, errp); +} + +void qmp_device_sync_config(const char *id, Error **errp) +{ + DeviceState *dev; + + /* + * During migration there is a race between syncing`configuration + * and migrating it (if migrate first, that target would get + * outdated version), so let's just not allow it. + */ + + if (migration_is_running()) { + error_setg(errp, "Config synchronization is not allowed " + "during migration"); + return; + } + + dev = find_device_state(id, true, errp); + if (!dev) { + return; + } + + qdev_sync_config(dev, errp); +} + void hmp_device_add(Monitor *mon, const QDict *qdict) { Error *err = NULL; @@ -1076,7 +1121,7 @@ BlockBackend *blk_by_qdev_id(const char *id, Error **errp) GLOBAL_STATE_CODE(); - dev = find_device_state(id, errp); + dev = find_device_state(id, false, errp); if (dev == NULL) { return NULL; } diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 9db1dffc03..5d75c941f7 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -24,6 +24,7 @@ #include "qemu/qemu-print.h" #include "cpu.h" #include "exec/exec-all.h" +#include "fpu/softfloat.h" static void alpha_cpu_set_pc(CPUState *cs, vaddr value) @@ -187,7 +188,17 @@ static void alpha_cpu_initfn(Object *obj) { CPUAlphaState *env = cpu_env(CPU(obj)); + /* TODO all this should be done in reset, not init */ + env->lock_addr = -1; + + /* + * TODO: this is incorrect. The Alpha Architecture Handbook version 4 + * describes NaN propagation in section 4.7.10.4. We should prefer + * the operand in Fb (whether it is a QNaN or an SNaN), then the + * operand in Fa. That is float_2nan_prop_ba. + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); #if defined(CONFIG_USER_ONLY) env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN; cpu_alpha_store_fpcr(env, (uint64_t)(FPCR_INVD | FPCR_DZED | FPCR_OVFD diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index 04ce281826..e806f138b8 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -802,6 +802,11 @@ static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, TIDCP1) != 0; } +static inline bool isar_feature_aa64_cmow(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, CMOW) != 0; +} + static inline bool isar_feature_aa64_hafs(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) != 0; diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 5b751439bd..6938161b95 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -168,6 +168,18 @@ void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node); } +/* + * Set the float_status behaviour to match the Arm defaults: + * * tininess-before-rounding + * * 2-input NaN propagation prefers SNaN over QNaN, and then + * operand A over operand B (see FPProcessNaNs() pseudocode) + */ +static void arm_set_default_fp_behaviours(float_status *s) +{ + set_float_detect_tininess(float_tininess_before_rounding, s); + set_float_2nan_prop_rule(float_2nan_prop_s_ab, s); +} + static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) { /* Reset a single ARMCPRegInfo register */ @@ -549,14 +561,11 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); set_default_nan_mode(1, &env->vfp.standard_fp_status); set_default_nan_mode(1, &env->vfp.standard_fp_status_f16); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.fp_status); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.standard_fp_status); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.fp_status_f16); - set_float_detect_tininess(float_tininess_before_rounding, - &env->vfp.standard_fp_status_f16); + arm_set_default_fp_behaviours(&env->vfp.fp_status); + arm_set_default_fp_behaviours(&env->vfp.standard_fp_status); + arm_set_default_fp_behaviours(&env->vfp.fp_status_f16); + arm_set_default_fp_behaviours(&env->vfp.standard_fp_status_f16); + #ifndef CONFIG_USER_ONLY if (kvm_enabled()) { kvm_arm_reset_vcpu(cpu); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 8fc8b6398f..d86e641280 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1367,6 +1367,7 @@ void pmu_init(ARMCPU *cpu); #define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */ #define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */ #define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */ +#define SCTLR_CMOW (1ULL << 32) /* FEAT_CMOW */ #define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */ #define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */ #define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */ @@ -2805,38 +2806,38 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); * The only use of stage 2 translations is either as part of an s1+2 * lookup or when loading the descriptors during a stage 1 page table walk, * and in both those cases we don't use the TLB. - * 4. we want to be able to use the TLB for accesses done as part of a + * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" + * translation regimes, because they map reasonably well to each other + * and they can't both be active at the same time. + * 5. we want to be able to use the TLB for accesses done as part of a * stage1 page table walk, rather than having to walk the stage2 page * table over and over. - * 5. we need separate EL1/EL2 mmu_idx for handling the Privileged Access + * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access * Never (PAN) bit within PSTATE. - * 6. we fold together most secure and non-secure regimes for A-profile, + * 7. we fold together most secure and non-secure regimes for A-profile, * because there are no banked system registers for aarch64, so the * process of switching between secure and non-secure is * already heavyweight. - * 7. we cannot fold together Stage 2 Secure and Stage 2 NonSecure, + * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure, * because both are in use simultaneously for Secure EL2. * * This gives us the following list of cases: * - * EL0 EL1&0 stage 1+2 (or AArch32 PL0 PL1&0 stage 1+2) - * EL1 EL1&0 stage 1+2 (or AArch32 PL1 PL1&0 stage 1+2) - * EL1 EL1&0 stage 1+2 +PAN (or AArch32 PL1 PL1&0 stage 1+2 +PAN) + * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2) + * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2) + * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN) * EL0 EL2&0 * EL2 EL2&0 * EL2 EL2&0 +PAN * EL2 (aka NS PL2) - * EL3 (not used when EL3 is AArch32) + * EL3 (aka AArch32 S PL1 PL1&0) + * AArch32 S PL0 PL1&0 (we call this EL30_0) + * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN) * Stage2 Secure * Stage2 NonSecure * plus one TLB per Physical address space: S, NS, Realm, Root * - * for a total of 14 different mmu_idx. - * - * Note that when EL3 is AArch32, the usage is potentially confusing - * because the MMU indexes are named for their AArch64 use, so code - * using the ARMMMUIdx_E10_1 might be at EL3, not EL1. This is because - * Secure PL1 is always at EL3. + * for a total of 16 different mmu_idx. * * R profile CPUs have an MPU, but can use the same set of MMU indexes * as A profile. They only need to distinguish EL0 and EL1 (and @@ -2900,6 +2901,8 @@ typedef enum ARMMMUIdx { ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A, ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A, ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A, + ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A, + ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A, /* * Used for second stage of an S12 page table walk, or for descriptor @@ -2907,14 +2910,14 @@ typedef enum ARMMMUIdx { * are in use simultaneously for SecureEL2: the security state for * the S2 ptw is selected by the NS bit from the S1 ptw. */ - ARMMMUIdx_Stage2_S = 8 | ARM_MMU_IDX_A, - ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A, + ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A, + ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A, /* TLBs with 1-1 mapping to the physical address spaces. */ - ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_Root = 12 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_Realm = 13 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A, /* * These are not allocated TLBs and are used only for AT system @@ -2953,6 +2956,8 @@ typedef enum ARMMMUIdxBit { TO_CORE_BIT(E20_2), TO_CORE_BIT(E20_2_PAN), TO_CORE_BIT(E3), + TO_CORE_BIT(E30_0), + TO_CORE_BIT(E30_3_PAN), TO_CORE_BIT(Stage2), TO_CORE_BIT(Stage2_S), @@ -3130,10 +3135,6 @@ FIELD(TBFLAG_A32, NS, 10, 1) * This requires an SME trap from AArch32 mode when using NEON. */ FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1) -/* - * Indicates whether we are in the Secure PL1&0 translation regime - */ -FIELD(TBFLAG_A32, S_PL1_0, 12, 1) /* * Bit usage when in AArch32 state, for M-profile only. diff --git a/target/arm/helper.c b/target/arm/helper.c index 0a731a38e8..f38eb054c0 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -444,6 +444,9 @@ static int alle1_tlbmask(CPUARMState *env) * Note that the 'ALL' scope must invalidate both stage 1 and * stage 2 translations, whereas most other scopes only invalidate * stage 1 translations. + * + * For AArch32 this is only used for TLBIALLNSNH and VTTBR + * writes, so only needs to apply to NS PL1&0, not S PL1&0. */ return (ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | @@ -3701,7 +3704,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, */ format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); - if (arm_feature(env, ARM_FEATURE_EL2) && !arm_aa32_secure_pl1_0(env)) { + if (arm_feature(env, ARM_FEATURE_EL2)) { if (mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_E10_1 || mmu_idx == ARMMMUIdx_E10_1_PAN) { @@ -3775,11 +3778,17 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) case 0: /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ switch (el) { + case 3: + if (ri->crm == 9 && arm_pan_enabled(env)) { + mmu_idx = ARMMMUIdx_E30_3_PAN; + } else { + mmu_idx = ARMMMUIdx_E3; + } + break; case 2: g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ /* fall through */ case 1: - case 3: if (ri->crm == 9 && arm_pan_enabled(env)) { mmu_idx = ARMMMUIdx_Stage1_E1_PAN; } else { @@ -3794,7 +3803,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ switch (el) { case 3: - mmu_idx = ARMMMUIdx_E10_0; + mmu_idx = ARMMMUIdx_E30_0; break; case 2: g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ @@ -4904,11 +4913,14 @@ static int vae1_tlbmask(CPUARMState *env) uint64_t hcr = arm_hcr_el2_eff(env); uint16_t mask; + assert(arm_feature(env, ARM_FEATURE_AARCH64)); + if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { mask = ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | ARMMMUIdxBit_E20_0; } else { + /* This is AArch64 only, so we don't need to touch the EL30_x TLBs */ mask = ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | ARMMMUIdxBit_E10_0; @@ -4947,6 +4959,8 @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr) uint64_t hcr = arm_hcr_el2_eff(env); ARMMMUIdx mmu_idx; + assert(arm_feature(env, ARM_FEATURE_AARCH64)); + /* Only the regime of the mmu_idx below is significant. */ if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { mmu_idx = ARMMMUIdx_E20_0; @@ -6215,6 +6229,11 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, if (cpu_isar_feature(aa64_nmi, cpu)) { valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI; } + /* FEAT_CMOW adds CMOW */ + + if (cpu_isar_feature(aa64_cmow, cpu)) { + valid_mask |= HCRX_CMOW; + } /* Clear RES0 bits. */ env->cp15.hcrx_el2 = value & valid_mask; @@ -11860,13 +11879,20 @@ void arm_cpu_do_interrupt(CPUState *cs) uint64_t arm_sctlr(CPUARMState *env, int el) { - if (arm_aa32_secure_pl1_0(env)) { - /* In Secure PL1&0 SCTLR_S is always controlling */ - el = 3; - } else if (el == 0) { - /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ + /* Only EL0 needs to be adjusted for EL1&0 or EL2&0 or EL3&0 */ + if (el == 0) { ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); - el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1; + switch (mmu_idx) { + case ARMMMUIdx_E20_0: + el = 2; + break; + case ARMMMUIdx_E30_0: + el = 3; + break; + default: + el = 1; + break; + } } return env->cp15.sctlr_el[el]; } @@ -12524,12 +12550,8 @@ int fp_exception_el(CPUARMState *env, int cur_el) return 0; } -/* - * Return the exception level we're running at if this is our mmu_idx. - * s_pl1_0 should be true if this is the AArch32 Secure PL1&0 translation - * regime. - */ -int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0) +/* Return the exception level we're running at if this is our mmu_idx */ +int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) { if (mmu_idx & ARM_MMU_IDX_M) { return mmu_idx & ARM_MMU_IDX_M_PRIV; @@ -12538,15 +12560,17 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0) switch (mmu_idx) { case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: + case ARMMMUIdx_E30_0: return 0; case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: - return s_pl1_0 ? 3 : 1; + return 1; case ARMMMUIdx_E2: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: return 2; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_3_PAN: return 3; default: g_assert_not_reached(); @@ -12575,19 +12599,13 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) hcr = arm_hcr_el2_eff(env); if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { idx = ARMMMUIdx_E20_0; + } else if (arm_is_secure_below_el3(env) && + !arm_el_is_aa64(env, 3)) { + idx = ARMMMUIdx_E30_0; } else { idx = ARMMMUIdx_E10_0; } break; - case 3: - /* - * AArch64 EL3 has its own translation regime; AArch32 EL3 - * uses the Secure PL1&0 translation regime. - */ - if (arm_el_is_aa64(env, 3)) { - return ARMMMUIdx_E3; - } - /* fall through */ case 1: if (arm_pan_enabled(env)) { idx = ARMMMUIdx_E10_1_PAN; @@ -12607,6 +12625,11 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) idx = ARMMMUIdx_E2; } break; + case 3: + if (!arm_el_is_aa64(env, 3) && arm_pan_enabled(env)) { + return ARMMMUIdx_E30_3_PAN; + } + return ARMMMUIdx_E3; default: g_assert_not_reached(); } diff --git a/target/arm/internals.h b/target/arm/internals.h index fd8f7c82aa..e37f459af3 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -276,20 +276,6 @@ FIELD(CNTHCTL, CNTPMASK, 19, 1) #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ /** - * arm_aa32_secure_pl1_0(): Return true if in Secure PL1&0 regime - * - * Return true if the CPU is in the Secure PL1&0 translation regime. - * This requires that EL3 exists and is AArch32 and we are currently - * Secure. If this is the case then the ARMMMUIdx_E10* apply and - * mean we are in EL3, not EL1. - */ -static inline bool arm_aa32_secure_pl1_0(CPUARMState *env) -{ - return arm_feature(env, ARM_FEATURE_EL3) && - !arm_el_is_aa64(env, 3) && arm_is_secure(env); -} - -/** * raise_exception: Raise the specified exception. * Raise a guest exception with the specified value, syndrome register * and target exception level. This should be called from helper functions, @@ -841,12 +827,7 @@ static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) return mmu_idx | ARM_MMU_IDX_A; } -/** - * Return the exception level we're running at if our current MMU index - * is @mmu_idx. @s_pl1_0 should be true if this is the AArch32 - * Secure PL1&0 translation regime. - */ -int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0); +int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); /* Return the MMU index for a v7M CPU in the specified security state */ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); @@ -890,7 +871,16 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu) } } -/* Return true if this address translation regime has two ranges. */ +/* + * Return true if this address translation regime has two ranges. + * Note that this will not return the correct answer for AArch32 + * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is + * never called from a context where EL3 can be AArch32. (The + * correct return value for ARMMMUIdx_E3 would be different for + * that case, so we can't just make the function return the + * correct value anyway; we would need an extra "bool e3_is_aarch32" + * argument which all the current callsites would pass as 'false'.) + */ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) { switch (mmu_idx) { @@ -915,6 +905,7 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_E30_3_PAN: return true; default: return false; @@ -938,14 +929,15 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) case ARMMMUIdx_E2: return 2; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: return 3; case ARMMMUIdx_E10_0: case ARMMMUIdx_Stage1_E0: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: - return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_MPrivNegPri: case ARMMMUIdx_MUserNegPri: case ARMMMUIdx_MPriv: @@ -965,6 +957,7 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) switch (mmu_idx) { case ARMMMUIdx_E10_0: case ARMMMUIdx_E20_0: + case ARMMMUIdx_E30_0: case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_MUser: case ARMMMUIdx_MSUser: diff --git a/target/arm/ptw.c b/target/arm/ptw.c index dd40268397..9849949508 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -280,6 +280,8 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_E2: case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: break; case ARMMMUIdx_Phys_S: @@ -3607,11 +3609,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address, case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: case ARMMMUIdx_E2: - if (arm_aa32_secure_pl1_0(env)) { - ss = ARMSS_Secure; - } else { - ss = arm_security_space_below_el3(env); - } + ss = arm_security_space_below_el3(env); break; case ARMMMUIdx_Stage2: /* @@ -3639,6 +3637,8 @@ bool get_phys_addr(CPUARMState *env, vaddr address, ss = ARMSS_Secure; break; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: if (arm_feature(env, ARM_FEATURE_AARCH64) && cpu_isar_feature(aa64_rme, env_archcpu(env))) { ss = ARMSS_Root; diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index 0168920828..2963d7510f 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1218,6 +1218,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 2); /* FEAT_ETS2 */ t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */ t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1); /* FEAT_TIDCP1 */ + t = FIELD_DP64(t, ID_AA64MMFR1, CMOW, 1); /* FEAT_CMOW */ cpu->isar.id_aa64mmfr1 = t; t = cpu->isar.id_aa64mmfr2; diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index bab7822ef6..f03977b4b0 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -198,10 +198,6 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1); } - if (arm_aa32_secure_pl1_0(env)) { - DP_TBFLAG_A32(flags, S_PL1_0, 1); - } - return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index c083e5cfb8..1ecb465988 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -912,7 +912,19 @@ void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome) { /* See arm_sctlr(), but we also need the sctlr el. */ ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); - int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1; + int target_el; + + switch (mmu_idx) { + case ARMMMUIdx_E20_0: + target_el = 2; + break; + case ARMMMUIdx_E30_0: + target_el = 3; + break; + default: + target_el = 1; + break; + } /* * The bit is not valid unless the target el is aa64, but since the diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index ec0b1ee252..b2851ea503 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -11690,7 +11690,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->tbii = EX_TBFLAG_A64(tb_flags, TBII); dc->tbid = EX_TBFLAG_A64(tb_flags, TBID); dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA); - dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx, false); + dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); #if !defined(CONFIG_USER_ONLY) dc->user = (dc->current_el == 0); #endif diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index e2748ff2bb..9ee761fc64 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -228,6 +228,9 @@ static inline int get_a32_user_mem_index(DisasContext *s) */ switch (s->mmu_idx) { case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: + return arm_to_core_mmu_idx(ARMMMUIdx_E30_0); case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */ case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: @@ -7546,6 +7549,10 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX); dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx); + dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); +#if !defined(CONFIG_USER_ONLY) + dc->user = (dc->current_el == 0); +#endif dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); @@ -7576,12 +7583,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) } dc->sme_trap_nonstreaming = EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING); - dc->s_pl1_0 = EX_TBFLAG_A32(tb_flags, S_PL1_0); } - dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx, dc->s_pl1_0); -#if !defined(CONFIG_USER_ONLY) - dc->user = (dc->current_el == 0); -#endif dc->lse2 = false; /* applies only to aarch64 */ dc->cp_regs = cpu->cp_regs; dc->features = env->features; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 5a2e10d64d..20cd0e851c 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -165,8 +165,6 @@ typedef struct DisasContext { uint8_t gm_blocksize; /* True if the current insn_start has been updated. */ bool insn_start_updated; - /* True if this is the AArch32 Secure PL1&0 translation regime */ - bool s_pl1_0; /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ int c15_cpar; /* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */ diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index 22ddb96881..e825d501a2 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -836,6 +836,13 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ { \ intptr_t i = 0, opr_sz = simd_oprsz(desc); \ intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \ + /* \ + * Special case: opr_sz == 8 from AA64/AA32 advsimd means the \ + * first iteration might not be a full 16 byte segment. But \ + * for vector lengths beyond that this must be SVE and we know \ + * opr_sz is a multiple of 16, so we need not clamp segend \ + * to opr_sz_n when we advance it at the end of the loop. \ + */ \ intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \ intptr_t index = simd_data(desc); \ TYPED *d = vd, *a = va; \ @@ -853,7 +860,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ n[i * 4 + 2] * m2 + \ n[i * 4 + 3] * m3); \ } while (++i < segend); \ - segend = i + 4; \ + segend = i + (16 / sizeof(TYPED)); \ } while (i < opr_sz_n); \ clear_tail(d, opr_sz, simd_maxsz(desc)); \ } diff --git a/target/hppa/fpu_helper.c b/target/hppa/fpu_helper.c index deaed2b65d..0e44074ba8 100644 --- a/target/hppa/fpu_helper.c +++ b/target/hppa/fpu_helper.c @@ -49,6 +49,12 @@ void HELPER(loaded_fr0)(CPUHPPAState *env) d = FIELD_EX32(shadow, FPSR, D); set_flush_to_zero(d, &env->fp_status); set_flush_inputs_to_zero(d, &env->fp_status); + + /* + * TODO: we only need to do this at CPU reset, but currently + * HPPA does note implement a CPU reset method at all... + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status); } void cpu_hppa_loaded_fr0(CPUHPPAState *env) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 3baa95481f..58c96eafea 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -238,23 +238,23 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 0 /* Invalid value */) static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel share_level) + enum CpuTopologyLevel share_level) { uint32_t num_ids = 0; switch (share_level) { - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: num_ids = 1 << apicid_core_offset(topo_info); break; - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_DIE: num_ids = 1 << apicid_die_offset(topo_info); break; - case CPU_TOPO_LEVEL_PACKAGE: + case CPU_TOPOLOGY_LEVEL_SOCKET: num_ids = 1 << apicid_pkg_offset(topo_info); break; default: /* - * Currently there is no use case for SMT and MODULE, so use + * Currently there is no use case for THREAD and MODULE, so use * assert directly to facilitate debugging. */ g_assert_not_reached(); @@ -303,19 +303,19 @@ static void encode_cache_cpuid4(CPUCacheInfo *cache, } static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel topo_level) + enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return 1; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return topo_info->threads_per_core; - case CPU_TOPO_LEVEL_MODULE: + case CPU_TOPOLOGY_LEVEL_MODULE: return topo_info->threads_per_core * topo_info->cores_per_module; - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_DIE: return topo_info->threads_per_core * topo_info->cores_per_module * topo_info->modules_per_die; - case CPU_TOPO_LEVEL_PACKAGE: + case CPU_TOPOLOGY_LEVEL_SOCKET: return topo_info->threads_per_core * topo_info->cores_per_module * topo_info->modules_per_die * topo_info->dies_per_pkg; default: @@ -325,18 +325,18 @@ static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info, } static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info, - enum CPUTopoLevel topo_level) + enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return 0; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return apicid_core_offset(topo_info); - case CPU_TOPO_LEVEL_MODULE: + case CPU_TOPOLOGY_LEVEL_MODULE: return apicid_module_offset(topo_info); - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_DIE: return apicid_die_offset(topo_info); - case CPU_TOPO_LEVEL_PACKAGE: + case CPU_TOPOLOGY_LEVEL_SOCKET: return apicid_pkg_offset(topo_info); default: g_assert_not_reached(); @@ -344,18 +344,18 @@ static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info, return 0; } -static uint32_t cpuid1f_topo_type(enum CPUTopoLevel topo_level) +static uint32_t cpuid1f_topo_type(enum CpuTopologyLevel topo_level) { switch (topo_level) { - case CPU_TOPO_LEVEL_INVALID: + case CPU_TOPOLOGY_LEVEL_INVALID: return CPUID_1F_ECX_TOPO_LEVEL_INVALID; - case CPU_TOPO_LEVEL_SMT: + case CPU_TOPOLOGY_LEVEL_THREAD: return CPUID_1F_ECX_TOPO_LEVEL_SMT; - case CPU_TOPO_LEVEL_CORE: + case CPU_TOPOLOGY_LEVEL_CORE: return CPUID_1F_ECX_TOPO_LEVEL_CORE; - case CPU_TOPO_LEVEL_MODULE: + case CPU_TOPOLOGY_LEVEL_MODULE: return CPUID_1F_ECX_TOPO_LEVEL_MODULE; - case CPU_TOPO_LEVEL_DIE: + case CPU_TOPOLOGY_LEVEL_DIE: return CPUID_1F_ECX_TOPO_LEVEL_DIE; default: /* Other types are not supported in QEMU. */ @@ -370,38 +370,41 @@ static void encode_topo_cpuid1f(CPUX86State *env, uint32_t count, uint32_t *ecx, uint32_t *edx) { X86CPU *cpu = env_archcpu(env); - unsigned long level, next_level; + unsigned long level, base_level, next_level; uint32_t num_threads_next_level, offset_next_level; - assert(count + 1 < CPU_TOPO_LEVEL_MAX); + assert(count <= CPU_TOPOLOGY_LEVEL_SOCKET); /* * Find the No.(count + 1) topology level in avail_cpu_topo bitmap. - * The search starts from bit 1 (CPU_TOPO_LEVEL_INVALID + 1). + * The search starts from bit 0 (CPU_TOPOLOGY_LEVEL_THREAD). */ - level = CPU_TOPO_LEVEL_INVALID; + level = CPU_TOPOLOGY_LEVEL_THREAD; + base_level = level; for (int i = 0; i <= count; i++) { level = find_next_bit(env->avail_cpu_topo, - CPU_TOPO_LEVEL_PACKAGE, - level + 1); + CPU_TOPOLOGY_LEVEL_SOCKET, + base_level); /* * CPUID[0x1f] doesn't explicitly encode the package level, * and it just encodes the invalid level (all fields are 0) * into the last subleaf of 0x1f. */ - if (level == CPU_TOPO_LEVEL_PACKAGE) { - level = CPU_TOPO_LEVEL_INVALID; + if (level == CPU_TOPOLOGY_LEVEL_SOCKET) { + level = CPU_TOPOLOGY_LEVEL_INVALID; break; } + /* Search the next level. */ + base_level = level + 1; } - if (level == CPU_TOPO_LEVEL_INVALID) { + if (level == CPU_TOPOLOGY_LEVEL_INVALID) { num_threads_next_level = 0; offset_next_level = 0; } else { next_level = find_next_bit(env->avail_cpu_topo, - CPU_TOPO_LEVEL_PACKAGE, + CPU_TOPOLOGY_LEVEL_SOCKET, level + 1); num_threads_next_level = num_threads_by_topo_level(topo_info, next_level); @@ -577,7 +580,7 @@ static CPUCacheInfo legacy_l1d_cache = { .sets = 64, .partitions = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ @@ -592,7 +595,7 @@ static CPUCacheInfo legacy_l1d_cache_amd = { .partitions = 1, .lines_per_tag = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /* L1 instruction cache: */ @@ -606,7 +609,7 @@ static CPUCacheInfo legacy_l1i_cache = { .sets = 64, .partitions = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ @@ -621,7 +624,7 @@ static CPUCacheInfo legacy_l1i_cache_amd = { .partitions = 1, .lines_per_tag = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /* Level 2 unified cache: */ @@ -635,7 +638,7 @@ static CPUCacheInfo legacy_l2_cache = { .sets = 4096, .partitions = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ @@ -645,7 +648,7 @@ static CPUCacheInfo legacy_l2_cache_cpuid2 = { .size = 2 * MiB, .line_size = 64, .associativity = 8, - .share_level = CPU_TOPO_LEVEL_INVALID, + .share_level = CPU_TOPOLOGY_LEVEL_INVALID, }; @@ -659,7 +662,7 @@ static CPUCacheInfo legacy_l2_cache_amd = { .associativity = 16, .sets = 512, .partitions = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }; /* Level 3 unified cache: */ @@ -675,7 +678,7 @@ static CPUCacheInfo legacy_l3_cache = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }; /* TLB definitions: */ @@ -2082,7 +2085,7 @@ static const CPUCaches epyc_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2095,7 +2098,7 @@ static const CPUCaches epyc_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2106,7 +2109,7 @@ static const CPUCaches epyc_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2120,7 +2123,7 @@ static const CPUCaches epyc_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2136,7 +2139,7 @@ static CPUCaches epyc_v4_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2149,7 +2152,7 @@ static CPUCaches epyc_v4_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2160,7 +2163,7 @@ static CPUCaches epyc_v4_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2174,7 +2177,7 @@ static CPUCaches epyc_v4_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2190,7 +2193,7 @@ static const CPUCaches epyc_rome_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2203,7 +2206,7 @@ static const CPUCaches epyc_rome_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2214,7 +2217,7 @@ static const CPUCaches epyc_rome_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2228,7 +2231,7 @@ static const CPUCaches epyc_rome_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2244,7 +2247,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2257,7 +2260,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2268,7 +2271,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2282,7 +2285,7 @@ static const CPUCaches epyc_rome_v3_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2298,7 +2301,7 @@ static const CPUCaches epyc_milan_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2311,7 +2314,7 @@ static const CPUCaches epyc_milan_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2322,7 +2325,7 @@ static const CPUCaches epyc_milan_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2336,7 +2339,7 @@ static const CPUCaches epyc_milan_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = true, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2352,7 +2355,7 @@ static const CPUCaches epyc_milan_v2_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2365,7 +2368,7 @@ static const CPUCaches epyc_milan_v2_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2376,7 +2379,7 @@ static const CPUCaches epyc_milan_v2_cache_info = { .partitions = 1, .sets = 1024, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2390,7 +2393,7 @@ static const CPUCaches epyc_milan_v2_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -2406,7 +2409,7 @@ static const CPUCaches epyc_genoa_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l1i_cache = &(CPUCacheInfo) { .type = INSTRUCTION_CACHE, @@ -2419,7 +2422,7 @@ static const CPUCaches epyc_genoa_cache_info = { .lines_per_tag = 1, .self_init = 1, .no_invd_sharing = true, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2430,7 +2433,7 @@ static const CPUCaches epyc_genoa_cache_info = { .partitions = 1, .sets = 2048, .lines_per_tag = 1, - .share_level = CPU_TOPO_LEVEL_CORE, + .share_level = CPU_TOPOLOGY_LEVEL_CORE, }, .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, @@ -2444,7 +2447,7 @@ static const CPUCaches epyc_genoa_cache_info = { .self_init = true, .inclusive = true, .complex_indexing = false, - .share_level = CPU_TOPO_LEVEL_DIE, + .share_level = CPU_TOPOLOGY_LEVEL_DIE, }, }; @@ -6588,7 +6591,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, /* Share the cache at package level. */ *eax |= max_thread_ids_for_cache(&topo_info, - CPU_TOPO_LEVEL_PACKAGE) << 14; + CPU_TOPOLOGY_LEVEL_SOCKET) << 14; } } } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) { @@ -7200,6 +7203,10 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type) memset(env, 0, offsetof(CPUX86State, end_reset_fields)); + if (tcg_enabled()) { + cpu_init_fp_statuses(env); + } + env->old_exception = -1; /* init to reset state */ @@ -8166,10 +8173,10 @@ static void x86_cpu_init_default_topo(X86CPU *cpu) env->nr_modules = 1; env->nr_dies = 1; - /* SMT, core and package levels are set by default. */ - set_bit(CPU_TOPO_LEVEL_SMT, env->avail_cpu_topo); - set_bit(CPU_TOPO_LEVEL_CORE, env->avail_cpu_topo); - set_bit(CPU_TOPO_LEVEL_PACKAGE, env->avail_cpu_topo); + /* thread, core and socket levels are set by default. */ + set_bit(CPU_TOPOLOGY_LEVEL_THREAD, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_CORE, env->avail_cpu_topo); + set_bit(CPU_TOPOLOGY_LEVEL_SOCKET, env->avail_cpu_topo); } static void x86_cpu_initfn(Object *obj) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 59959b8b7a..b65eedb617 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1716,7 +1716,7 @@ typedef struct CPUCacheInfo { * Used to encode CPUID[4].EAX[bits 25:14] or * CPUID[0x8000001D].EAX[bits 25:14]. */ - enum CPUTopoLevel share_level; + CpuTopologyLevel share_level; } CPUCacheInfo; @@ -2051,7 +2051,7 @@ typedef struct CPUArchState { unsigned nr_modules; /* Bitmap of available CPU topology levels for this CPU. */ - DECLARE_BITMAP(avail_cpu_topo, CPU_TOPO_LEVEL_MAX); + DECLARE_BITMAP(avail_cpu_topo, CPU_TOPOLOGY_LEVEL__MAX); } CPUX86State; struct kvm_msrs; @@ -2614,6 +2614,9 @@ static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) int get_pg_mode(CPUX86State *env); /* fpu_helper.c */ + +/* Set all non-runtime-variable float_status fields to x86 handling */ +void cpu_init_fp_statuses(CPUX86State *env); void update_fp_status(CPUX86State *env); void update_mxcsr_status(CPUX86State *env); void update_mxcsr_from_sse_status(CPUX86State *env); diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c index e1b850f3fc..53b49bb297 100644 --- a/target/i386/tcg/fpu_helper.c +++ b/target/i386/tcg/fpu_helper.c @@ -135,6 +135,46 @@ static void fpu_set_exception(CPUX86State *env, int mask) } } +void cpu_init_fp_statuses(CPUX86State *env) +{ + /* + * Initialise the non-runtime-varying fields of the various + * float_status words to x86 behaviour. This must be called at + * CPU reset because the float_status words are in the + * "zeroed on reset" portion of the CPU state struct. + * Fields in float_status that vary under guest control are set + * via the codepath for setting that register, eg cpu_set_fpuc(). + */ + /* + * Use x87 NaN propagation rules: + * SNaN + QNaN => return the QNaN + * two SNaNs => return the one with the larger significand, silenced + * two QNaNs => return the one with the larger significand + * SNaN and a non-NaN => return the SNaN, silenced + * QNaN and a non-NaN => return the QNaN + * + * If we get down to comparing significands and they are the same, + * return the NaN with the positive sign bit (if any). + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); + /* + * TODO: These are incorrect: the x86 Software Developer's Manual vol 1 + * section 4.8.3.5 "Operating on SNaNs and QNaNs" says that the + * "larger significand" behaviour is only used for x87 FPU operations. + * For SSE the required behaviour is to always return the first NaN, + * which is float_2nan_prop_ab. + * + * mmx_status is used only for the AMD 3DNow! instructions, which + * are documented in the "3DNow! Technology Manual" as not supporting + * NaNs or infinities as inputs. The result of passing two NaNs is + * documented as "undefined", so we can do what we choose. + * (Strictly there is some behaviour we don't implement correctly + * for these "unsupported" NaN and Inf values, like "NaN * 0 == 0".) + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->mmx_status); + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->sse_status); +} + static inline uint8_t save_exception_flags(CPUX86State *env) { uint8_t old_flags = get_float_exception_flags(&env->fp_status); diff --git a/target/loongarch/tcg/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c index f6753c5875..21bc3b04a9 100644 --- a/target/loongarch/tcg/fpu_helper.c +++ b/target/loongarch/tcg/fpu_helper.c @@ -31,6 +31,7 @@ void restore_fp_status(CPULoongArchState *env) set_float_rounding_mode(ieee_rm[(env->fcsr0 >> FCSR0_RM) & 0x3], &env->fp_status); set_flush_to_zero(0, &env->fp_status); + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fp_status); } int ieee_ex_to_loongarch(int xcpt) diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 1d49f4cb23..5fe335558a 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -93,6 +93,22 @@ static void m68k_cpu_reset_hold(Object *obj, ResetType type) env->fregs[i].d = nan; } cpu_m68k_set_fpcr(env, 0); + /* + * M68000 FAMILY PROGRAMMER'S REFERENCE MANUAL + * 3.4 FLOATING-POINT INSTRUCTION DETAILS + * If either operand, but not both operands, of an operation is a + * nonsignaling NaN, then that NaN is returned as the result. If both + * operands are nonsignaling NaNs, then the destination operand + * nonsignaling NaN is returned as the result. + * If either operand to an operation is a signaling NaN (SNaN), then the + * SNaN bit is set in the FPSR EXC byte. If the SNaN exception enable bit + * is set in the FPCR ENABLE byte, then the exception is taken and the + * destination is not modified. If the SNaN exception enable bit is not + * set, setting the SNaN bit in the operand to a one converts the SNaN to + * a nonsignaling NaN. The operation then continues as described in the + * preceding paragraph for nonsignaling NaNs. + */ + set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status); env->fpsr = 0; /* TODO: We should set PC from the interrupt vector. */ diff --git a/target/m68k/fpu_helper.c b/target/m68k/fpu_helper.c index 8314791f50..a605162b71 100644 --- a/target/m68k/fpu_helper.c +++ b/target/m68k/fpu_helper.c @@ -620,6 +620,7 @@ void HELPER(frem)(CPUM68KState *env, FPReg *res, FPReg *val0, FPReg *val1) int sign; /* Calculate quotient directly using round to nearest mode */ + set_float_2nan_prop_rule(float_2nan_prop_ab, &fp_status); set_float_rounding_mode(float_round_nearest_even, &fp_status); set_floatx80_rounding_precision( get_floatx80_rounding_precision(&env->fp_status), &fp_status); diff --git a/target/m68k/helper.c b/target/m68k/helper.c index 9d3db8419d..9bfc6ae97c 100644 --- a/target/m68k/helper.c +++ b/target/m68k/helper.c @@ -36,7 +36,7 @@ static int cf_fpu_gdb_get_reg(CPUState *cs, GByteArray *mem_buf, int n) CPUM68KState *env = &cpu->env; if (n < 8) { - float_status s; + float_status s = {}; return gdb_get_reg64(mem_buf, floatx80_to_float64(env->fregs[n].d, &s)); } switch (n) { @@ -56,7 +56,7 @@ static int cf_fpu_gdb_set_reg(CPUState *cs, uint8_t *mem_buf, int n) CPUM68KState *env = &cpu->env; if (n < 8) { - float_status s; + float_status s = {}; env->fregs[n].d = float64_to_floatx80(ldq_be_p(mem_buf), &s); return 8; } diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index 135947ee80..710eb1146c 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -201,6 +201,13 @@ static void mb_cpu_reset_hold(Object *obj, ResetType type) env->pc = cpu->cfg.base_vectors; + set_float_rounding_mode(float_round_nearest_even, &env->fp_status); + /* + * TODO: this is probably not the correct NaN propagation rule for + * this architecture. + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); + #if defined(CONFIG_USER_ONLY) /* start in user mode with interrupts enabled. */ mb_cpu_write_msr(env, MSR_EE | MSR_IE | MSR_VM | MSR_UM); @@ -311,15 +318,12 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp) static void mb_cpu_initfn(Object *obj) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj); - CPUMBState *env = &cpu->env; gdb_register_coprocessor(CPU(cpu), mb_cpu_gdb_read_stack_protect, mb_cpu_gdb_write_stack_protect, gdb_find_static_feature("microblaze-stack-protect.xml"), 0); - set_float_rounding_mode(float_round_nearest_even, &env->fp_status); - #ifndef CONFIG_USER_ONLY /* Inbound IRQ and FIR lines */ qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2); @@ -328,9 +332,16 @@ static void mb_cpu_initfn(Object *obj) qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_dc, "ns_axi_dc", 1); qdev_init_gpio_in_named(DEVICE(cpu), mb_cpu_ns_axi_ic, "ns_axi_ic", 1); #endif + + /* Restricted 'endianness' property is equivalent of 'little-endian' */ + object_property_add_alias(obj, "little-endian", obj, "endianness"); } static Property mb_properties[] = { + /* + * Following properties are used by Xilinx DTS conversion tool + * do not rename them. + */ DEFINE_PROP_UINT32("base-vectors", MicroBlazeCPU, cfg.base_vectors, 0), DEFINE_PROP_BOOL("use-stack-protection", MicroBlazeCPU, cfg.stackprot, false), @@ -387,6 +398,9 @@ static Property mb_properties[] = { DEFINE_PROP_UINT8("pvr", MicroBlazeCPU, cfg.pvr, C_PVR_FULL), DEFINE_PROP_UINT8("pvr-user1", MicroBlazeCPU, cfg.pvr_user1, 0), DEFINE_PROP_UINT32("pvr-user2", MicroBlazeCPU, cfg.pvr_user2, 0), + /* + * End of properties reserved by Xilinx DTS conversion tool. + */ DEFINE_PROP_END_OF_LIST(), }; diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc index fbf787d8ce..922fc39138 100644 --- a/target/mips/cpu-defs.c.inc +++ b/target/mips/cpu-defs.c.inc @@ -314,7 +314,7 @@ const mips_def_t mips_defs[] = (0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13), .SEGBITS = 32, .PABITS = 32, - .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP, .mmu_type = MMU_TYPE_R4000, }, { @@ -478,14 +478,15 @@ const mips_def_t mips_defs[] = (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), .CP0_Config2 = MIPS_CONFIG2, - .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_BP) | (1 << CP0C3_BI) | + .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_MSAP) | + (1 << CP0C3_BP) | (1 << CP0C3_BI) | (2 << CP0C3_ISA) | (1 << CP0C3_ULRI) | (1 << CP0C3_RXI) | (1U << CP0C3_M), .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) | (3 << CP0C4_IE) | (1U << CP0C4_M), .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB), - .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) | - (1 << CP0C5_UFE), + .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) | + (1 << CP0C5_FRE) | (1 << CP0C5_SBRI), .CP0_LLAddr_rw_bitmask = 0, .CP0_LLAddr_shift = 0, .SYNCI_Step = 32, @@ -499,6 +500,7 @@ const mips_def_t mips_defs[] = (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), .CP1_fcr31_rw_bitmask = 0x0103FFFF, + .MSAIR = 0x03 << MSAIR_ProcID, .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS, @@ -541,7 +543,7 @@ const mips_def_t mips_defs[] = .SEGBITS = 32, .PABITS = 32, .insn_flags = CPU_MIPS32R6 | ISA_NANOMIPS32 | - ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3 | ASE_MT, + ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3, .mmu_type = MMU_TYPE_R4000, }, #if defined(TARGET_MIPS64) @@ -661,7 +663,7 @@ const mips_def_t mips_defs[] = .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 40, .PABITS = 36, - .insn_flags = CPU_MIPS64R1 | ASE_MIPS3D, + .insn_flags = CPU_MIPS64R1, .mmu_type = MMU_TYPE_R4000, }, { @@ -690,7 +692,7 @@ const mips_def_t mips_defs[] = .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, - .insn_flags = CPU_MIPS64R2 | ASE_MIPS3D, + .insn_flags = CPU_MIPS64R2, .mmu_type = MMU_TYPE_R4000, }, { diff --git a/target/mips/cpu.c b/target/mips/cpu.c index 9724e71a5e..d0a43b6d5c 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -407,9 +407,9 @@ static void mips_cpu_reset_hold(Object *obj, ResetType type) } msa_reset(env); + fp_reset(env); compute_hflags(env); - restore_fp_status(env); restore_pamask(env); cs->exception_index = EXCP_NONE; diff --git a/target/mips/cpu.h b/target/mips/cpu.h index a4a46ebbe9..f6877ece8b 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -530,7 +530,6 @@ typedef struct CPUArchState { CPUMIPSFPUContext active_fpu; uint32_t current_tc; - uint32_t current_fpu; uint32_t SEGBITS; uint32_t PABITS; @@ -1319,6 +1318,12 @@ bool cpu_type_supports_cps_smp(const char *cpu_type); bool cpu_supports_isa(const CPUMIPSState *env, uint64_t isa_mask); bool cpu_type_supports_isa(const char *cpu_type, uint64_t isa); +/* Check presence of MIPS-3D ASE */ +static inline bool ase_3d_available(const CPUMIPSState *env) +{ + return env->active_fpu.fcr0 & (1 << FCR0_3D); +} + /* Check presence of MSA implementation */ static inline bool ase_msa_available(CPUMIPSState *env) { diff --git a/target/mips/fpu_helper.h b/target/mips/fpu_helper.h index ad1116e8c1..7c3c7897b4 100644 --- a/target/mips/fpu_helper.h +++ b/target/mips/fpu_helper.h @@ -44,6 +44,28 @@ static inline void restore_fp_status(CPUMIPSState *env) restore_snan_bit_mode(env); } +static inline void fp_reset(CPUMIPSState *env) +{ + restore_fp_status(env); + + /* + * According to MIPS specifications, if one of the two operands is + * a sNaN, a new qNaN has to be generated. This is done in + * floatXX_silence_nan(). For qNaN inputs the specifications + * says: "When possible, this QNaN result is one of the operand QNaN + * values." In practice it seems that most implementations choose + * the first operand if both operands are qNaN. In short this gives + * the following rules: + * 1. A if it is signaling + * 2. B if it is signaling + * 3. A (quiet) + * 4. B (quiet) + * A signaling NaN is always silenced before returning it. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, + &env->active_fpu.fp_status); +} + /* MSA */ enum CPUMIPSMSADataFormat { diff --git a/target/mips/mips-defs.h b/target/mips/mips-defs.h index a6cebe0265..9d4d292586 100644 --- a/target/mips/mips-defs.h +++ b/target/mips/mips-defs.h @@ -26,12 +26,10 @@ * bits 24-39: MIPS ASEs */ #define ASE_MIPS16 0x0000000001000000ULL -#define ASE_MIPS3D 0x0000000002000000ULL #define ASE_MDMX 0x0000000004000000ULL #define ASE_DSP 0x0000000008000000ULL #define ASE_DSP_R2 0x0000000010000000ULL #define ASE_DSP_R3 0x0000000020000000ULL -#define ASE_MT 0x0000000040000000ULL #define ASE_SMARTMIPS 0x0000000080000000ULL #define ASE_MICROMIPS 0x0000000100000000ULL /* diff --git a/target/mips/msa.c b/target/mips/msa.c index 61f1a9a593..9dffc428f5 100644 --- a/target/mips/msa.c +++ b/target/mips/msa.c @@ -49,6 +49,23 @@ void msa_reset(CPUMIPSState *env) set_float_detect_tininess(float_tininess_after_rounding, &env->active_tc.msa_fp_status); + /* + * According to MIPS specifications, if one of the two operands is + * a sNaN, a new qNaN has to be generated. This is done in + * floatXX_silence_nan(). For qNaN inputs the specifications + * says: "When possible, this QNaN result is one of the operand QNaN + * values." In practice it seems that most implementations choose + * the first operand if both operands are qNaN. In short this gives + * the following rules: + * 1. A if it is signaling + * 2. B if it is signaling + * 3. A (quiet) + * 4. B (quiet) + * A signaling NaN is always silenced before returning it. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ab, + &env->active_tc.msa_fp_status); + /* clear float_status exception flags */ set_float_exception_flags(0, &env->active_tc.msa_fp_status); diff --git a/target/mips/sysemu/machine.c b/target/mips/sysemu/machine.c index 213fd637fc..8af11fd896 100644 --- a/target/mips/sysemu/machine.c +++ b/target/mips/sysemu/machine.c @@ -142,6 +142,7 @@ static int get_tlb(QEMUFile *f, void *pv, size_t size, qemu_get_betls(f, &v->VPN); qemu_get_be32s(f, &v->PageMask); qemu_get_be16s(f, &v->ASID); + qemu_get_be32s(f, &v->MMID); qemu_get_be16s(f, &flags); v->G = (flags >> 10) & 1; v->C0 = (flags >> 7) & 3; @@ -167,6 +168,7 @@ static int put_tlb(QEMUFile *f, void *pv, size_t size, r4k_tlb_t *v = pv; uint16_t asid = v->ASID; + uint32_t mmid = v->MMID; uint16_t flags = ((v->EHINV << 15) | (v->RI1 << 14) | (v->RI0 << 13) | @@ -183,6 +185,7 @@ static int put_tlb(QEMUFile *f, void *pv, size_t size, qemu_put_betls(f, &v->VPN); qemu_put_be32s(f, &v->PageMask); qemu_put_be16s(f, &asid); + qemu_put_be32s(f, &mmid); qemu_put_be16s(f, &flags); qemu_put_be64s(f, &v->PFN[0]); qemu_put_be64s(f, &v->PFN[1]); @@ -204,8 +207,8 @@ static const VMStateInfo vmstate_info_tlb = { static const VMStateDescription vmstate_tlb = { .name = "cpu/tlb", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, .fields = (const VMStateField[]) { VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext), VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext), @@ -239,7 +242,7 @@ const VMStateDescription vmstate_mips_cpu = { /* CPU metastate */ VMSTATE_UINT32(env.current_tc, MIPSCPU), - VMSTATE_UINT32(env.current_fpu, MIPSCPU), + VMSTATE_UNUSED(sizeof(uint32_t)), /* was current_fpu */ VMSTATE_INT32(env.error_code, MIPSCPU), VMSTATE_UINTTL(env.btarget, MIPSCPU), VMSTATE_UINTTL(env.bcond, MIPSCPU), diff --git a/target/mips/tcg/godson2.decode b/target/mips/tcg/godson2.decode new file mode 100644 index 0000000000..25b396b682 --- /dev/null +++ b/target/mips/tcg/godson2.decode @@ -0,0 +1,27 @@ +# Godson2 64-bit Integer instructions +# +# Copyright (C) 2021 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Reference: +# Godson-2E Software Manual +# (Document Number: godson2e-user-manual-V0.6) +# + +&muldiv rs rt rd + +@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &muldiv + +MULTu_G 011111 ..... ..... ..... 00000 01100- @rs_rt_rd +DMULTu_G 011111 ..... ..... ..... 00000 01110- @rs_rt_rd + +DIV_G 011111 ..... ..... ..... 00000 011010 @rs_rt_rd +DIVU_G 011111 ..... ..... ..... 00000 011011 @rs_rt_rd +DDIV_G 011111 ..... ..... ..... 00000 011110 @rs_rt_rd +DDIVU_G 011111 ..... ..... ..... 00000 011111 @rs_rt_rd + +MOD_G 011111 ..... ..... ..... 00000 100010 @rs_rt_rd +MODU_G 011111 ..... ..... ..... 00000 100011 @rs_rt_rd +DMOD_G 011111 ..... ..... ..... 00000 100110 @rs_rt_rd +DMODU_G 011111 ..... ..... ..... 00000 100111 @rs_rt_rd diff --git a/target/mips/tcg/loong-ext.decode b/target/mips/tcg/loong-ext.decode new file mode 100644 index 0000000000..b43979d0ef --- /dev/null +++ b/target/mips/tcg/loong-ext.decode @@ -0,0 +1,28 @@ +# Loongson 64-bit Extension instructions +# +# Copyright (C) 2021 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Reference: +# STLS2F01 User Manual +# Appendix A: new integer instructions +# (Document Number: UM0447) +# + +&muldiv rs rt rd !extern + +@rs_rt_rd ...... rs:5 rt:5 rd:5 ..... ...... &muldiv + +MULTu_G 011100 ..... ..... ..... 00000 0100-0 @rs_rt_rd +DMULTu_G 011100 ..... ..... ..... 00000 0100-1 @rs_rt_rd + +DIV_G 011100 ..... ..... ..... 00000 010100 @rs_rt_rd +DDIV_G 011100 ..... ..... ..... 00000 010101 @rs_rt_rd +DIVU_G 011100 ..... ..... ..... 00000 010110 @rs_rt_rd +DDIVU_G 011100 ..... ..... ..... 00000 010111 @rs_rt_rd + +MOD_G 011100 ..... ..... ..... 00000 011100 @rs_rt_rd +DMOD_G 011100 ..... ..... ..... 00000 011101 @rs_rt_rd +MODU_G 011100 ..... ..... ..... 00000 011110 @rs_rt_rd +DMODU_G 011100 ..... ..... ..... 00000 011111 @rs_rt_rd diff --git a/target/mips/tcg/loong_translate.c b/target/mips/tcg/loong_translate.c new file mode 100644 index 0000000000..7d74cc34f8 --- /dev/null +++ b/target/mips/tcg/loong_translate.c @@ -0,0 +1,271 @@ +/* + * MIPS Loongson 64-bit translation routines + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2006 Marius Groeger (FPU operations) + * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) + * Copyright (c) 2011 Richard Henderson <rth@twiddle.net> + * Copyright (c) 2021 Philippe Mathieu-Daudé + * + * This code is licensed under the GNU GPLv2 and later. + */ + +#include "qemu/osdep.h" +#include "translate.h" + +/* Include the auto-generated decoder. */ +#include "decode-godson2.c.inc" +#include "decode-loong-ext.c.inc" + +/* + * Word or double-word Fixed-point instructions. + * --------------------------------------------- + * + * Fixed-point multiplies and divisions write only + * one result into general-purpose registers. + */ + +static bool gen_lext_DIV_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + TCGLabel *l1, *l2, *l3; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + l1 = gen_new_label(); + l2 = gen_new_label(); + l3 = gen_new_label(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + if (!is_double) { + tcg_gen_ext32s_tl(t0, t0); + tcg_gen_ext32s_tl(t1, t1); + } + tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(cpu_gpr[rd], 0); + tcg_gen_br(l3); + gen_set_label(l1); + + tcg_gen_brcondi_tl(TCG_COND_NE, t0, is_double ? LLONG_MIN : INT_MIN, l2); + tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); + tcg_gen_mov_tl(cpu_gpr[rd], t0); + + tcg_gen_br(l3); + gen_set_label(l2); + tcg_gen_div_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + gen_set_label(l3); + + return true; +} + +static bool trans_DIV_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_DIV_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DDIV_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_DIV_G(s, a->rd, a->rs, a->rt, true); +} + +static bool gen_lext_DIVU_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + TCGLabel *l1, *l2; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + l1 = gen_new_label(); + l2 = gen_new_label(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + if (!is_double) { + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_ext32u_tl(t1, t1); + } + tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(cpu_gpr[rd], 0); + + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + gen_set_label(l2); + + return true; +} + +static bool trans_DIVU_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_DIVU_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DDIVU_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_DIVU_G(s, a->rd, a->rs, a->rt, true); +} + +static bool gen_lext_MOD_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + TCGLabel *l1, *l2, *l3; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + l1 = gen_new_label(); + l2 = gen_new_label(); + l3 = gen_new_label(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + if (!is_double) { + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_ext32u_tl(t1, t1); + } + tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); + tcg_gen_brcondi_tl(TCG_COND_NE, t0, is_double ? LLONG_MIN : INT_MIN, l2); + tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); + gen_set_label(l1); + tcg_gen_movi_tl(cpu_gpr[rd], 0); + tcg_gen_br(l3); + gen_set_label(l2); + tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + gen_set_label(l3); + + return true; +} + +static bool trans_MOD_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MOD_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DMOD_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MOD_G(s, a->rd, a->rs, a->rt, true); +} + +static bool gen_lext_MODU_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + TCGLabel *l1, *l2; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + l1 = gen_new_label(); + l2 = gen_new_label(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + if (!is_double) { + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_ext32u_tl(t1, t1); + } + tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(cpu_gpr[rd], 0); + tcg_gen_br(l2); + gen_set_label(l1); + tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + gen_set_label(l2); + + return true; +} + +static bool trans_MODU_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MODU_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DMODU_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MODU_G(s, a->rd, a->rs, a->rt, true); +} + +static bool gen_lext_MULT_G(DisasContext *s, int rd, int rs, int rt, + bool is_double) +{ + TCGv t0, t1; + + if (rd == 0) { + /* Treat as NOP. */ + return true; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); + if (!is_double) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + + return true; +} + +static bool trans_MULTu_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MULT_G(s, a->rd, a->rs, a->rt, false); +} + +static bool trans_DMULTu_G(DisasContext *s, arg_muldiv *a) +{ + return gen_lext_MULT_G(s, a->rd, a->rs, a->rt, true); +} + +bool decode_ext_loongson(DisasContext *ctx, uint32_t insn) +{ + if (!decode_64bit_enabled(ctx)) { + return false; + } + if ((ctx->insn_flags & INSN_LOONGSON2E) && decode_godson2(ctx, ctx->opcode)) { + return true; + } + if ((ctx->insn_flags & ASE_LEXT) && decode_loong_ext(ctx, ctx->opcode)) { + return true; + } + return false; +} diff --git a/target/mips/tcg/meson.build b/target/mips/tcg/meson.build index ea7fb582f2..7b18e6c4c8 100644 --- a/target/mips/tcg/meson.build +++ b/target/mips/tcg/meson.build @@ -5,6 +5,8 @@ gen = [ decodetree.process('vr54xx.decode', extra_args: '--decode=decode_ext_vr54xx'), decodetree.process('octeon.decode', extra_args: '--decode=decode_ext_octeon'), decodetree.process('lcsr.decode', extra_args: '--decode=decode_ase_lcsr'), + decodetree.process('godson2.decode', extra_args: ['--static-decode=decode_godson2']), + decodetree.process('loong-ext.decode', extra_args: ['--static-decode=decode_loong_ext']), ] mips_ss.add(gen) @@ -28,6 +30,7 @@ mips_ss.add(when: 'TARGET_MIPS64', if_true: files( 'tx79_translate.c', 'octeon_translate.c', 'lcsr_translate.c', + 'loong_translate.c', ), if_false: files( 'mxu_translate.c', )) diff --git a/target/mips/tcg/micromips_translate.c.inc b/target/mips/tcg/micromips_translate.c.inc index 3cbf53bf2b..c479bec108 100644 --- a/target/mips/tcg/micromips_translate.c.inc +++ b/target/mips/tcg/micromips_translate.c.inc @@ -2484,7 +2484,10 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) mips32_op = OPC_BC1TANY4; do_cp1mips3d: check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); + if (!ase_3d_available(env)) { + gen_reserved_instruction(ctx); + break; + } /* Fall through */ do_cp1branch: if (env->CP0_Config1 & (1 << CP0C1_FP)) { diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index d92fc418ed..de7045874d 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -327,19 +327,6 @@ enum { OPC_MUL = 0x02 | OPC_SPECIAL2, OPC_MSUB = 0x04 | OPC_SPECIAL2, OPC_MSUBU = 0x05 | OPC_SPECIAL2, - /* Loongson 2F */ - OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2, - OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2, - OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2, - OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2, - OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2, - OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2, - OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2, - OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2, - OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2, - OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2, - OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2, - OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2, /* Misc */ OPC_CLZ = 0x20 | OPC_SPECIAL2, OPC_CLO = 0x21 | OPC_SPECIAL2, @@ -368,20 +355,6 @@ enum { OPC_RDHWR = 0x3B | OPC_SPECIAL3, OPC_GINV = 0x3D | OPC_SPECIAL3, - /* Loongson 2E */ - OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3, - OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3, - OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3, - OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3, - OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3, - OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3, - OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3, - OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3, - OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3, - OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3, - OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3, - OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3, - /* MIPS DSP Load */ OPC_LX_DSP = 0x0A | OPC_SPECIAL3, /* MIPS DSP Arithmetic */ @@ -389,16 +362,14 @@ enum { OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3, OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3, OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3, - /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */ - /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */ + OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3, OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3, /* MIPS DSP GPR-Based Shift Sub-class */ OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3, OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3, /* MIPS DSP Multiply Sub-class insns */ - /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */ - /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */ + OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3, OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3, /* DSP Bit/Manipulation Sub-class */ @@ -556,7 +527,6 @@ enum { OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP, }; -#define OPC_ADDUH_QB_DSP OPC_MULT_G_2E #define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) enum { /* MIPS DSP Arithmetic Sub-class */ @@ -1645,13 +1615,18 @@ static inline void check_ps(DisasContext *ctx) check_cp1_64bitmode(ctx); } +bool decode_64bit_enabled(DisasContext *ctx) +{ + return ctx->hflags & MIPS_HFLAG_64; +} + /* * This code generates a "reserved instruction" exception if cpu is not * 64-bit or 64-bit instructions are not enabled. */ void check_mips_64(DisasContext *ctx) { - if (unlikely((TARGET_LONG_BITS != 64) || !(ctx->hflags & MIPS_HFLAG_64))) { + if (unlikely((TARGET_LONG_BITS != 64) || !decode_64bit_enabled(ctx))) { gen_reserved_instruction(ctx); } } @@ -3586,184 +3561,6 @@ static void gen_cl(DisasContext *ctx, uint32_t opc, } } -/* Godson integer instructions */ -static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, - int rd, int rs, int rt) -{ - TCGv t0, t1; - - if (rd == 0) { - /* Treat as NOP. */ - return; - } - - t0 = tcg_temp_new(); - t1 = tcg_temp_new(); - gen_load_gpr(t0, rs); - gen_load_gpr(t1, rt); - - switch (opc) { - case OPC_MULT_G_2E: - case OPC_MULT_G_2F: - tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - break; - case OPC_MULTU_G_2E: - case OPC_MULTU_G_2F: - tcg_gen_ext32u_tl(t0, t0); - tcg_gen_ext32u_tl(t1, t1); - tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - break; - case OPC_DIV_G_2E: - case OPC_DIV_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGLabel *l3 = gen_new_label(); - tcg_gen_ext32s_tl(t0, t0); - tcg_gen_ext32s_tl(t1, t1); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l3); - gen_set_label(l1); - tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2); - tcg_gen_mov_tl(cpu_gpr[rd], t0); - tcg_gen_br(l3); - gen_set_label(l2); - tcg_gen_div_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - gen_set_label(l3); - } - break; - case OPC_DIVU_G_2E: - case OPC_DIVU_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_ext32u_tl(t0, t0); - tcg_gen_ext32u_tl(t1, t1); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - gen_set_label(l2); - } - break; - case OPC_MOD_G_2E: - case OPC_MOD_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGLabel *l3 = gen_new_label(); - tcg_gen_ext32u_tl(t0, t0); - tcg_gen_ext32u_tl(t1, t1); - tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); - tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l3); - gen_set_label(l2); - tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - gen_set_label(l3); - } - break; - case OPC_MODU_G_2E: - case OPC_MODU_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_ext32u_tl(t0, t0); - tcg_gen_ext32u_tl(t1, t1); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); - tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); - gen_set_label(l2); - } - break; -#if defined(TARGET_MIPS64) - case OPC_DMULT_G_2E: - case OPC_DMULT_G_2F: - tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); - break; - case OPC_DMULTU_G_2E: - case OPC_DMULTU_G_2F: - tcg_gen_mul_tl(cpu_gpr[rd], t0, t1); - break; - case OPC_DDIV_G_2E: - case OPC_DDIV_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGLabel *l3 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l3); - gen_set_label(l1); - tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); - tcg_gen_mov_tl(cpu_gpr[rd], t0); - tcg_gen_br(l3); - gen_set_label(l2); - tcg_gen_div_tl(cpu_gpr[rd], t0, t1); - gen_set_label(l3); - } - break; - case OPC_DDIVU_G_2E: - case OPC_DDIVU_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); - gen_set_label(l2); - } - break; - case OPC_DMOD_G_2E: - case OPC_DMOD_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - TCGLabel *l3 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); - tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2); - gen_set_label(l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l3); - gen_set_label(l2); - tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); - gen_set_label(l3); - } - break; - case OPC_DMODU_G_2E: - case OPC_DMODU_G_2F: - { - TCGLabel *l1 = gen_new_label(); - TCGLabel *l2 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1); - tcg_gen_movi_tl(cpu_gpr[rd], 0); - tcg_gen_br(l2); - gen_set_label(l1); - tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); - gen_set_label(l2); - } - break; -#endif - } -} - /* Loongson multimedia instructions */ static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) { @@ -5315,17 +5112,17 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Index"; break; case CP0_REG00__MVPCONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpcontrol(arg, tcg_env); register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpconf0(arg, tcg_env); register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpconf1(arg, tcg_env); register_name = "MVPConf1"; break; @@ -5346,37 +5143,37 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Random"; break; case CP0_REG01__VPECONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl)); register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0)); register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1)); register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_YQMask)); register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPESchedule)); register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt)); register_name = "VPEOpt"; break; @@ -5403,37 +5200,37 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcstatus(arg, tcg_env); register_name = "TCStatus"; break; case CP0_REG02__TCBIND: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcbind(arg, tcg_env); register_name = "TCBind"; break; case CP0_REG02__TCRESTART: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcrestart(arg, tcg_env); register_name = "TCRestart"; break; case CP0_REG02__TCHALT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tchalt(arg, tcg_env); register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tccontext(arg, tcg_env); register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcschedule(arg, tcg_env); register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcschefback(arg, tcg_env); register_name = "TCScheFBack"; break; @@ -6072,17 +5869,17 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Index"; break; case CP0_REG00__MVPCONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_mvpcontrol(tcg_env, arg); register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); /* ignored */ register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); /* ignored */ register_name = "MVPConf1"; break; @@ -6102,39 +5899,39 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Random"; break; case CP0_REG01__VPECONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpecontrol(tcg_env, arg); register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeconf0(tcg_env, arg); register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeconf1(tcg_env, arg); register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_yqmask(tcg_env, arg); register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPESchedule)); register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeopt(tcg_env, arg); register_name = "VPEOpt"; break; @@ -6149,37 +5946,37 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcstatus(tcg_env, arg); register_name = "TCStatus"; break; case CP0_REG02__TCBIND: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcbind(tcg_env, arg); register_name = "TCBind"; break; case CP0_REG02__TCRESTART: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcrestart(tcg_env, arg); register_name = "TCRestart"; break; case CP0_REG02__TCHALT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tchalt(tcg_env, arg); register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tccontext(tcg_env, arg); register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcschedule(tcg_env, arg); register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcschefback(tcg_env, arg); register_name = "TCScheFBack"; break; @@ -6822,17 +6619,17 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Index"; break; case CP0_REG00__MVPCONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpcontrol(arg, tcg_env); register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpconf0(arg, tcg_env); register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_mvpconf1(arg, tcg_env); register_name = "MVPConf1"; break; @@ -6853,40 +6650,40 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Random"; break; case CP0_REG01__VPECONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl)); register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0)); register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1)); register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_YQMask)); register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPESchedule)); register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_ld_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt)); register_name = "VPEOpt"; break; @@ -6902,37 +6699,37 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcstatus(arg, tcg_env); register_name = "TCStatus"; break; case CP0_REG02__TCBIND: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mfc0_tcbind(arg, tcg_env); register_name = "TCBind"; break; case CP0_REG02__TCRESTART: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tcrestart(arg, tcg_env); register_name = "TCRestart"; break; case CP0_REG02__TCHALT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tchalt(arg, tcg_env); register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tccontext(arg, tcg_env); register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tcschedule(arg, tcg_env); register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_dmfc0_tcschefback(arg, tcg_env); register_name = "TCScheFBack"; break; @@ -7539,17 +7336,17 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Index"; break; case CP0_REG00__MVPCONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_mvpcontrol(tcg_env, arg); register_name = "MVPControl"; break; case CP0_REG00__MVPCONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); /* ignored */ register_name = "MVPConf0"; break; case CP0_REG00__MVPCONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); /* ignored */ register_name = "MVPConf1"; break; @@ -7569,39 +7366,39 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "Random"; break; case CP0_REG01__VPECONTROL: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpecontrol(tcg_env, arg); register_name = "VPEControl"; break; case CP0_REG01__VPECONF0: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeconf0(tcg_env, arg); register_name = "VPEConf0"; break; case CP0_REG01__VPECONF1: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeconf1(tcg_env, arg); register_name = "VPEConf1"; break; case CP0_REG01__YQMASK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_yqmask(tcg_env, arg); register_name = "YQMask"; break; case CP0_REG01__VPESCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPESchedule)); register_name = "VPESchedule"; break; case CP0_REG01__VPESCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); tcg_gen_st_tl(arg, tcg_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); register_name = "VPEScheFBack"; break; case CP0_REG01__VPEOPT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_vpeopt(tcg_env, arg); register_name = "VPEOpt"; break; @@ -7616,37 +7413,37 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) register_name = "EntryLo0"; break; case CP0_REG02__TCSTATUS: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcstatus(tcg_env, arg); register_name = "TCStatus"; break; case CP0_REG02__TCBIND: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcbind(tcg_env, arg); register_name = "TCBind"; break; case CP0_REG02__TCRESTART: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcrestart(tcg_env, arg); register_name = "TCRestart"; break; case CP0_REG02__TCHALT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tchalt(tcg_env, arg); register_name = "TCHalt"; break; case CP0_REG02__TCCONTEXT: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tccontext(tcg_env, arg); register_name = "TCContext"; break; case CP0_REG02__TCSCHEDULE: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcschedule(tcg_env, arg); register_name = "TCSchedule"; break; case CP0_REG02__TCSCHEFBACK: - CP0_CHECK(ctx->insn_flags & ASE_MT); + CP0_CHECK(disas_mt_available(ctx)); gen_helper_mtc0_tcschefback(tcg_env, arg); register_name = "TCScheFBack"; break; @@ -11584,8 +11381,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_load_gpr(v2_t, v2); switch (op1) { - /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */ - case OPC_MULT_G_2E: + case OPC_ADDUH_QB_DSP: check_dsp_r2(ctx); switch (op2) { case OPC_ADDUH_QB: @@ -12268,11 +12064,7 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_load_gpr(v2_t, v2); switch (op1) { - /* - * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have - * the same mask and op1. - */ - case OPC_MULT_G_2E: + case OPC_MUL_PH_DSP: check_dsp_r2(ctx); switch (op2) { case OPC_MUL_PH: @@ -13624,15 +13416,6 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) case OPC_MUL: gen_arith(ctx, op1, rd, rs, rt); break; - case OPC_DIV_G_2F: - case OPC_DIVU_G_2F: - case OPC_MULT_G_2F: - case OPC_MULTU_G_2F: - case OPC_MOD_G_2F: - case OPC_MODU_G_2F: - check_insn(ctx, INSN_LOONGSON2F | ASE_LEXT); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; case OPC_CLO: case OPC_CLZ: check_insn(ctx, ISA_MIPS_R1); @@ -13657,15 +13440,6 @@ static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) check_mips_64(ctx); gen_cl(ctx, op1, rd, rs); break; - case OPC_DMULT_G_2F: - case OPC_DMULTU_G_2F: - case OPC_DDIV_G_2F: - case OPC_DDIVU_G_2F: - case OPC_DMOD_G_2F: - case OPC_DMODU_G_2F: - check_insn(ctx, INSN_LOONGSON2F | ASE_LEXT); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; #endif default: /* Invalid */ MIPS_INVAL("special2_legacy"); @@ -13798,17 +13572,12 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) op1 = MASK_SPECIAL3(ctx->opcode); switch (op1) { - case OPC_DIV_G_2E: - case OPC_DIVU_G_2E: - case OPC_MOD_G_2E: - case OPC_MODU_G_2E: - case OPC_MULT_G_2E: - case OPC_MULTU_G_2E: + case OPC_MUL_PH_DSP: /* - * OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have + * OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have * the same mask and op1. */ - if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MULT_G_2E)) { + if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MUL_PH_DSP)) { op2 = MASK_ADDUH_QB(ctx->opcode); switch (op2) { case OPC_ADDUH_QB: @@ -13836,8 +13605,6 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) gen_reserved_instruction(ctx); break; } - } else if (ctx->insn_flags & INSN_LOONGSON2E) { - gen_loongson_integer(ctx, op1, rd, rs, rt); } else { gen_reserved_instruction(ctx); } @@ -14066,15 +13833,6 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) } break; #if defined(TARGET_MIPS64) - case OPC_DDIV_G_2E: - case OPC_DDIVU_G_2E: - case OPC_DMULT_G_2E: - case OPC_DMULTU_G_2E: - case OPC_DMOD_G_2E: - case OPC_DMODU_G_2E: - check_insn(ctx, INSN_LOONGSON2E); - gen_loongson_integer(ctx, op1, rd, rs, rt); - break; case OPC_ABSQ_S_QH_DSP: op2 = MASK_ABSQ_S_QH(ctx->opcode); switch (op2) { @@ -14952,7 +14710,9 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) } else { /* OPC_BC1ANY2 */ check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); + if (!ase_3d_available(env)) { + return false; + } gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), (rt >> 2) & 0x7, imm << 2); } @@ -14967,7 +14727,9 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) check_cp1_enabled(ctx); check_insn_opc_removed(ctx, ISA_MIPS_R6); check_cop1x(ctx); - check_insn(ctx, ASE_MIPS3D); + if (!ase_3d_available(env)) { + return false; + } /* fall through */ case OPC_BC1: check_cp1_enabled(ctx); @@ -15267,6 +15029,9 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) if (cpu_supports_isa(env, INSN_VR54XX) && decode_ext_vr54xx(ctx, ctx->opcode)) { return; } + if (TARGET_LONG_BITS == 64 && decode_ext_loongson(ctx, ctx->opcode)) { + return; + } #if defined(TARGET_MIPS64) if (ase_lcsr_available(env) && decode_ase_lcsr(ctx, ctx->opcode)) { return; diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h index 5d196e69ac..1bf153d183 100644 --- a/target/mips/tcg/translate.h +++ b/target/mips/tcg/translate.h @@ -217,10 +217,13 @@ void msa_translate_init(void); void mxu_translate_init(void); bool decode_ase_mxu(DisasContext *ctx, uint32_t insn); +bool decode_64bit_enabled(DisasContext *ctx); + /* decodetree generated */ bool decode_isa_rel6(DisasContext *ctx, uint32_t insn); bool decode_ase_msa(DisasContext *ctx, uint32_t insn); bool decode_ext_txx9(DisasContext *ctx, uint32_t insn); +bool decode_ext_loongson(DisasContext *ctx, uint32_t insn); #if defined(TARGET_MIPS64) bool decode_ase_lcsr(DisasContext *ctx, uint32_t insn); bool decode_ext_tx79(DisasContext *ctx, uint32_t insn); @@ -228,6 +231,11 @@ bool decode_ext_octeon(DisasContext *ctx, uint32_t insn); #endif bool decode_ext_vr54xx(DisasContext *ctx, uint32_t insn); +static inline bool disas_mt_available(DisasContext *ctx) +{ + return ctx->CP0_Config3 & (1 << CP0C3_MT); +} + /* * Helpers for implementing sets of trans_* functions. * Defer the implementation of NAME to FUNC, with optional extra arguments. diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index 6ec54ad7a6..b96561d1f2 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -105,6 +105,12 @@ static void openrisc_cpu_reset_hold(Object *obj, ResetType type) set_float_detect_tininess(float_tininess_before_rounding, &cpu->env.fp_status); + /* + * TODO: this is probably not the correct NaN propagation rule for + * this architecture. + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &cpu->env.fp_status); + #ifndef CONFIG_USER_ONLY cpu->env.picmr = 0x00000000; diff --git a/target/ppc/compat.c b/target/ppc/compat.c index ebef2cccec..0cec1bde91 100644 --- a/target/ppc/compat.c +++ b/target/ppc/compat.c @@ -100,6 +100,13 @@ static const CompatInfo compat_table[] = { .pcr_level = PCR_COMPAT_3_10, .max_vthreads = 8, }, + { /* POWER11, ISA3.10 */ + .name = "power11", + .pvr = CPU_POWERPC_LOGICAL_3_10_P11, + .pcr = PCR_COMPAT_3_10, + .pcr_level = PCR_COMPAT_3_10, + .max_vthreads = 8, + }, }; static const CompatInfo *compat_by_pvr(uint32_t pvr) @@ -132,6 +139,10 @@ static bool pcc_compat(PowerPCCPUClass *pcc, uint32_t compat_pvr, /* Outside specified range */ return false; } + if (compat->pvr > pcc->spapr_logical_pvr) { + /* Older CPU cannot support a newer processor's compat mode */ + return false; + } if (!(pcc->pcr_supported & compat->pcr_level)) { /* Not supported by this CPU */ return false; diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index f2301b43f7..ece3481781 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -734,6 +734,8 @@ "POWER9 v2.2") POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10, "POWER10 v2.0") + POWERPC_DEF("power11_v2.0", CPU_POWERPC_POWER11_DD20, POWER11, + "POWER11_v2.0") #endif /* defined (TARGET_PPC64) */ /***************************************************************************/ @@ -909,6 +911,7 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "power8nvl", "power8nvl_v1.0" }, { "power9", "power9_v2.2" }, { "power10", "power10_v2.0" }, + { "power11", "power11_v2.0" }, #endif /* Generic PowerPCs */ diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index 0229ef3a9a..72ad31ba50 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -354,6 +354,8 @@ enum { CPU_POWERPC_POWER10_BASE = 0x00800000, CPU_POWERPC_POWER10_DD1 = 0x00801100, CPU_POWERPC_POWER10_DD20 = 0x00801200, + CPU_POWERPC_POWER11_BASE = 0x00820000, + CPU_POWERPC_POWER11_DD20 = 0x00821200, CPU_POWERPC_970_v22 = 0x00390202, CPU_POWERPC_970FX_v10 = 0x00391100, CPU_POWERPC_970FX_v20 = 0x003C0200, @@ -391,6 +393,7 @@ enum { CPU_POWERPC_LOGICAL_2_07 = 0x0F000004, CPU_POWERPC_LOGICAL_3_00 = 0x0F000005, CPU_POWERPC_LOGICAL_3_10 = 0x0F000006, + CPU_POWERPC_LOGICAL_3_10_P11 = 0x0F000007, }; /* System version register (used on MPC 8xxx) */ diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 321ed2da75..945af07a64 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -40,6 +40,7 @@ #define PPC_BIT_NR(bit) (63 - (bit)) #define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) +#define PPC_BIT32_NR(bit) (31 - (bit)) #define PPC_BIT32(bit) (0x80000000 >> (bit)) #define PPC_BIT8(bit) (0x80 >> (bit)) #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) @@ -215,6 +216,8 @@ typedef enum powerpc_excp_t { POWERPC_EXCP_POWER9, /* POWER10 exception model */ POWERPC_EXCP_POWER10, + /* POWER11 exception model */ + POWERPC_EXCP_POWER11, } powerpc_excp_t; /*****************************************************************************/ @@ -634,8 +637,8 @@ FIELD(MSR, LE, MSR_LE, 1) #define PSSCR_EC PPC_BIT(43) /* Exit Criterion */ /* HFSCR bits */ -#define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */ -#define HFSCR_BHRB PPC_BIT(59) /* BHRB Instructions */ +#define HFSCR_MSGP PPC_BIT_NR(53) /* Privileged Message Send Facilities */ +#define HFSCR_BHRB PPC_BIT_NR(59) /* BHRB Instructions */ #define HFSCR_IC_MSGP 0xA #define DBCR0_ICMP (1 << 27) @@ -1454,16 +1457,6 @@ struct ArchCPU { /* Those resources are used only during code translation */ /* opcode handlers */ opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN]; - - /* Fields related to migration compatibility hacks */ - bool pre_2_8_migration; - target_ulong mig_msr_mask; - uint64_t mig_insns_flags; - uint64_t mig_insns_flags2; - uint32_t mig_nb_BATs; - bool pre_2_10_migration; - bool pre_3_0_migration; - int32_t mig_slb_nr; }; /** @@ -1482,6 +1475,7 @@ struct PowerPCCPUClass { void (*parent_parse_features)(const char *type, char *str, Error **errp); uint32_t pvr; + uint32_t spapr_logical_pvr; /* * If @best is false, match if pcc is in the family of pvr * Else match only if pcc is the best match for pvr in this family. diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 23881d09e9..efcb80d1c2 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -52,6 +52,7 @@ #include "kvm_ppc.h" #endif +#include "cpu_init.h" /* #define PPC_DEBUG_SPR */ /* #define USE_APPLE_GDB */ @@ -6153,6 +6154,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) dc->fw_name = "PowerPC,POWER7"; dc->desc = "POWER7"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_2_06_PLUS; pcc->pvr_match = ppc_pvr_match_power7; pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05; pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05; @@ -6316,6 +6318,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) dc->fw_name = "PowerPC,POWER8"; dc->desc = "POWER8"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_2_07; pcc->pvr_match = ppc_pvr_match_power8; pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; @@ -6407,7 +6410,7 @@ static struct ppc_radix_page_info POWER9_radix_page_info = { #endif /* CONFIG_USER_ONLY */ #define POWER9_BHRB_ENTRIES_LOG2 5 -static void init_proc_POWER9(CPUPPCState *env) +static void register_power9_common_sprs(CPUPPCState *env) { /* Common Registers */ init_proc_book3s_common(env); @@ -6426,7 +6429,6 @@ static void init_proc_POWER9(CPUPPCState *env) register_power5p_ear_sprs(env); register_power5p_tb_sprs(env); register_power6_common_sprs(env); - register_HEIR32_spr(env); register_power6_dbg_sprs(env); register_power7_common_sprs(env); register_power8_tce_address_control_sprs(env); @@ -6444,16 +6446,21 @@ static void init_proc_POWER9(CPUPPCState *env) register_power8_rpr_sprs(env); register_power9_mmu_sprs(env); - /* POWER9 Specific registers */ - spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, - spr_read_generic, spr_write_generic, - KVM_REG_PPC_TIDR, 0); - /* FIXME: Filter fields properly based on privilege level */ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, spr_read_generic, spr_write_generic, KVM_REG_PPC_PSSCR, 0); +} + +static void init_proc_POWER9(CPUPPCState *env) +{ + register_power9_common_sprs(env); + register_HEIR32_spr(env); + /* POWER9 Specific registers */ + spr_register_kvm(env, SPR_TIDR, "TIDR", NULL, NULL, + spr_read_generic, spr_write_generic, + KVM_REG_PPC_TIDR, 0); /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; @@ -6509,59 +6516,17 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) dc->fw_name = "PowerPC,POWER9"; dc->desc = "POWER9"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_00; pcc->pvr_match = ppc_pvr_match_power9; - pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07; - pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | - PCR_COMPAT_2_05; + pcc->pcr_mask = PPC_PCR_MASK_POWER9; + pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER9; pcc->init_proc = init_proc_POWER9; pcc->check_pow = check_pow_nocheck; pcc->check_attn = check_attn_hid0_power9; - pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | - PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | - PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | - PPC_FLOAT_FRSQRTES | - PPC_FLOAT_STFIWX | - PPC_FLOAT_EXT | - PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | - PPC_SEGMENT_64B | PPC_SLBI | - PPC_POPCNTB | PPC_POPCNTWD | - PPC_CILDST; - pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | - PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | - PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | - PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | - PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | - PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_MEM_LWSYNC | - PPC2_BCDA_ISA206; - pcc->msr_mask = (1ull << MSR_SF) | - (1ull << MSR_HV) | - (1ull << MSR_TM) | - (1ull << MSR_VR) | - (1ull << MSR_VSX) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_SE) | - (1ull << MSR_DE) | - (1ull << MSR_FE1) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_PMM) | - (1ull << MSR_RI) | - (1ull << MSR_LE); - pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | - (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | - LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | - (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | - LPCR_DEE | LPCR_OEE)) - | LPCR_MER | LPCR_GTSE | LPCR_TC | - LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; + pcc->insns_flags = PPC_INSNS_FLAGS_POWER9; + pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER9; + pcc->msr_mask = PPC_MSR_MASK_POWER9; + pcc->lpcr_mask = PPC_LPCR_MASK_POWER9; pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; pcc->mmu_model = POWERPC_MMU_3_00; #if !defined(CONFIG_USER_ONLY) @@ -6574,10 +6539,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) pcc->excp_model = POWERPC_EXCP_POWER9; pcc->bus_model = PPC_FLAGS_INPUT_POWER9; pcc->bfd_mach = bfd_mach_ppc64; - pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | - POWERPC_FLAG_BE | POWERPC_FLAG_PMM | - POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | - POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV; + pcc->flags = POWERPC_FLAGS_POWER9; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; } @@ -6604,50 +6566,12 @@ static struct ppc_radix_page_info POWER10_radix_page_info = { #define POWER10_BHRB_ENTRIES_LOG2 5 static void init_proc_POWER10(CPUPPCState *env) { - /* Common Registers */ - init_proc_book3s_common(env); - register_book3s_207_dbg_sprs(env); - - /* Common TCG PMU */ - init_tcg_pmu_power8(env); - - /* POWER8 Specific Registers */ - register_book3s_ids_sprs(env); - register_amr_sprs(env); - register_iamr_sprs(env); - register_book3s_purr_sprs(env); - register_power5p_common_sprs(env); - register_power5p_lpar_sprs(env); - register_power5p_ear_sprs(env); - register_power5p_tb_sprs(env); - register_power6_common_sprs(env); + register_power9_common_sprs(env); register_HEIR64_spr(env); - register_power6_dbg_sprs(env); - register_power7_common_sprs(env); - register_power8_tce_address_control_sprs(env); - register_power8_ids_sprs(env); - register_power8_ebb_sprs(env); - register_power8_fscr_sprs(env); - register_power8_pmu_sup_sprs(env); - register_power8_pmu_user_sprs(env); - register_power8_tm_sprs(env); - register_power8_pspb_sprs(env); - register_power8_dpdes_sprs(env); - register_vtb_sprs(env); - register_power8_ic_sprs(env); - register_power9_book4_sprs(env); - register_power8_rpr_sprs(env); - register_power9_mmu_sprs(env); register_power10_hash_sprs(env); register_power10_dexcr_sprs(env); register_power10_pmu_sup_sprs(env); register_power10_pmu_user_sprs(env); - - /* FIXME: Filter fields properly based on privilege level */ - spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, - spr_read_generic, spr_write_generic, - KVM_REG_PPC_PSSCR, 0); - /* env variables */ env->dcache_line_size = 128; env->icache_line_size = 128; @@ -6689,61 +6613,17 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) dc->fw_name = "PowerPC,POWER10"; dc->desc = "POWER10"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_10; pcc->pvr_match = ppc_pvr_match_power10; - pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07 | - PCR_COMPAT_3_00; - pcc->pcr_supported = PCR_COMPAT_3_10 | PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | - PCR_COMPAT_2_06 | PCR_COMPAT_2_05; + pcc->pcr_mask = PPC_PCR_MASK_POWER10; + pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER10; pcc->init_proc = init_proc_POWER10; pcc->check_pow = check_pow_nocheck; pcc->check_attn = check_attn_hid0_power9; - pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | - PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | - PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | - PPC_FLOAT_FRSQRTES | - PPC_FLOAT_STFIWX | - PPC_FLOAT_EXT | - PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | - PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | - PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | - PPC_SEGMENT_64B | PPC_SLBI | - PPC_POPCNTB | PPC_POPCNTWD | - PPC_CILDST; - pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | - PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | - PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | - PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | - PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | - PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 | - PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206; - pcc->msr_mask = (1ull << MSR_SF) | - (1ull << MSR_HV) | - (1ull << MSR_VR) | - (1ull << MSR_VSX) | - (1ull << MSR_EE) | - (1ull << MSR_PR) | - (1ull << MSR_FP) | - (1ull << MSR_ME) | - (1ull << MSR_FE0) | - (1ull << MSR_SE) | - (1ull << MSR_DE) | - (1ull << MSR_FE1) | - (1ull << MSR_IR) | - (1ull << MSR_DR) | - (1ull << MSR_PMM) | - (1ull << MSR_RI) | - (1ull << MSR_LE); - pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | - (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | - LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | - (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | - LPCR_DEE | LPCR_OEE)) - | LPCR_MER | LPCR_GTSE | LPCR_TC | - LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE; - /* DD2 adds an extra HAIL bit */ - pcc->lpcr_mask |= LPCR_HAIL; + pcc->insns_flags = PPC_INSNS_FLAGS_POWER10; + pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER10; + pcc->msr_mask = PPC_MSR_MASK_POWER10; + pcc->lpcr_mask = PPC_LPCR_MASK_POWER10; pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; pcc->mmu_model = POWERPC_MMU_3_00; @@ -6756,11 +6636,67 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) pcc->excp_model = POWERPC_EXCP_POWER10; pcc->bus_model = PPC_FLAGS_INPUT_POWER9; pcc->bfd_mach = bfd_mach_ppc64; - pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | - POWERPC_FLAG_BE | POWERPC_FLAG_PMM | - POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | - POWERPC_FLAG_VSX | POWERPC_FLAG_SCV | - POWERPC_FLAG_BHRB; + pcc->flags = POWERPC_FLAGS_POWER10; + pcc->l1_dcache_size = 0x8000; + pcc->l1_icache_size = 0x8000; +} + +static void init_proc_POWER11(CPUPPCState *env) +{ + init_proc_POWER10(env); +} + +static bool ppc_pvr_match_power11(PowerPCCPUClass *pcc, uint32_t pvr, bool best) +{ + uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK; + uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK; + + if (!best && (base == CPU_POWERPC_POWER11_BASE)) { + return true; + } + + if (base != pcc_base) { + return false; + } + + if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) { + return true; + } + + return false; +} + +POWERPC_FAMILY(POWER11)(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + + dc->fw_name = "PowerPC,POWER11"; + dc->desc = "POWER11"; + pcc->spapr_logical_pvr = CPU_POWERPC_LOGICAL_3_10_P11; + pcc->pvr_match = ppc_pvr_match_power11; + pcc->pcr_mask = PPC_PCR_MASK_POWER11; + pcc->pcr_supported = PPC_PCR_SUPPORTED_POWER11; + pcc->init_proc = init_proc_POWER11; + pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_hid0_power9; + pcc->insns_flags = PPC_INSNS_FLAGS_POWER11; + pcc->insns_flags2 = PPC_INSNS_FLAGS2_POWER11; + pcc->msr_mask = PPC_MSR_MASK_POWER11; + pcc->lpcr_mask = PPC_LPCR_MASK_POWER11; + + pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE; + pcc->mmu_model = POWERPC_MMU_3_00; +#if !defined(CONFIG_USER_ONLY) + /* segment page size remain the same */ + pcc->hash64_opts = &ppc_hash64_opts_POWER7; + pcc->radix_page_info = &POWER10_radix_page_info; + pcc->lrg_decr_bits = 56; +#endif + pcc->excp_model = POWERPC_EXCP_POWER11; + pcc->bus_model = PPC_FLAGS_INPUT_POWER9; + pcc->bfd_mach = bfd_mach_ppc64; + pcc->flags = POWERPC_FLAGS_POWER11; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; } @@ -7326,6 +7262,14 @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type) /* tininess for underflow is detected before rounding */ set_float_detect_tininess(float_tininess_before_rounding, &env->fp_status); + /* + * PowerPC propagation rules: + * 1. A if it sNaN or qNaN + * 2. B if it sNaN or qNaN + * A signaling NaN is always silenced before returning it. + */ + set_float_2nan_prop_rule(float_2nan_prop_ab, &env->fp_status); + set_float_2nan_prop_rule(float_2nan_prop_ab, &env->vec_status); for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) { ppc_spr_t *spr = &env->spr_cb[i]; @@ -7452,11 +7396,7 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info) } static Property ppc_cpu_properties[] = { - DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false), - DEFINE_PROP_BOOL("pre-2.10-migration", PowerPCCPU, pre_2_10_migration, - false), - DEFINE_PROP_BOOL("pre-3.0-migration", PowerPCCPU, pre_3_0_migration, - false), + /* add default property here */ DEFINE_PROP_END_OF_LIST(), }; diff --git a/target/ppc/cpu_init.h b/target/ppc/cpu_init.h new file mode 100644 index 0000000000..f8fd6ff5cd --- /dev/null +++ b/target/ppc/cpu_init.h @@ -0,0 +1,91 @@ +#ifndef TARGET_PPC_CPU_INIT_H +#define TARGET_PPC_CPU_INIT_H + +#define PPC_INSNS_FLAGS_POWER9 \ + (PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | \ + PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | \ + PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | \ + PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | \ + PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | \ + PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | \ + PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | \ + PPC_CILDST) + +#define PPC_INSNS_FLAGS_POWER10 PPC_INSNS_FLAGS_POWER9 +#define PPC_INSNS_FLAGS_POWER11 PPC_INSNS_FLAGS_POWER10 + +#define PPC_INSNS_FLAGS2_POWER_COMMON \ + (PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | \ + PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | \ + PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | \ + PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | \ + PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_ISA300 | PPC2_PRCNTL | \ + PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206) + +#define PPC_INSNS_FLAGS2_POWER9 \ + (PPC_INSNS_FLAGS2_POWER_COMMON | PPC2_TM) +#define PPC_INSNS_FLAGS2_POWER10 \ + (PPC_INSNS_FLAGS2_POWER_COMMON | PPC2_ISA310) +#define PPC_INSNS_FLAGS2_POWER11 PPC_INSNS_FLAGS2_POWER10 + +#define PPC_MSR_MASK_POWER_COMMON \ + ((1ull << MSR_SF) | \ + (1ull << MSR_HV) | \ + (1ull << MSR_VR) | \ + (1ull << MSR_VSX) | \ + (1ull << MSR_EE) | \ + (1ull << MSR_PR) | \ + (1ull << MSR_FP) | \ + (1ull << MSR_ME) | \ + (1ull << MSR_FE0) | \ + (1ull << MSR_SE) | \ + (1ull << MSR_DE) | \ + (1ull << MSR_FE1) | \ + (1ull << MSR_IR) | \ + (1ull << MSR_DR) | \ + (1ull << MSR_PMM) | \ + (1ull << MSR_RI) | \ + (1ull << MSR_LE)) + +#define PPC_MSR_MASK_POWER9 \ + (PPC_MSR_MASK_POWER_COMMON | (1ull << MSR_TM)) +#define PPC_MSR_MASK_POWER10 \ + PPC_MSR_MASK_POWER_COMMON +#define PPC_MSR_MASK_POWER11 PPC_MSR_MASK_POWER10 + +#define PPC_PCR_MASK_POWER9 \ + (PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07) +#define PPC_PCR_MASK_POWER10 \ + (PPC_PCR_MASK_POWER9 | PCR_COMPAT_3_00) +#define PPC_PCR_MASK_POWER11 PPC_PCR_MASK_POWER10 + +#define PPC_PCR_SUPPORTED_POWER9 \ + (PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05) +#define PPC_PCR_SUPPORTED_POWER10 \ + (PPC_PCR_SUPPORTED_POWER9 | PCR_COMPAT_3_10) +#define PPC_PCR_SUPPORTED_POWER11 PPC_PCR_SUPPORTED_POWER10 + +#define PPC_LPCR_MASK_POWER9 \ + (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD | \ + (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL | \ + LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD | \ + (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | \ + LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC | \ + LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE) +/* DD2 adds an extra HAIL bit */ +#define PPC_LPCR_MASK_POWER10 \ + (PPC_LPCR_MASK_POWER9 | LPCR_HAIL) +#define PPC_LPCR_MASK_POWER11 PPC_LPCR_MASK_POWER10 + +#define POWERPC_FLAGS_POWER_COMMON \ + (POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | \ + POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | \ + POWERPC_FLAG_VSX | POWERPC_FLAG_SCV) + +#define POWERPC_FLAGS_POWER9 \ + (POWERPC_FLAGS_POWER_COMMON | POWERPC_FLAG_TM) +#define POWERPC_FLAGS_POWER10 \ + (POWERPC_FLAGS_POWER_COMMON | POWERPC_FLAG_BHRB) +#define POWERPC_FLAGS_POWER11 POWERPC_FLAGS_POWER10 + +#endif /* TARGET_PPC_CPU_INIT_H */ diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index f33fc36db2..70daa5076a 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -324,10 +324,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, } ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; - if (ail == 0) { - return; - } - if (ail == 1) { + if (ail == 0 || ail == 1) { /* AIL=1 is reserved, treat it like AIL=0 */ return; } @@ -351,10 +348,7 @@ static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr, } else { ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT; } - if (ail == 0) { - return; - } - if (ail == 1 || ail == 2) { + if (ail == 0 || ail == 1 || ail == 2) { /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */ return; } @@ -1661,6 +1655,7 @@ static void powerpc_excp(PowerPCCPU *cpu, int excp) case POWERPC_EXCP_POWER8: case POWERPC_EXCP_POWER9: case POWERPC_EXCP_POWER10: + case POWERPC_EXCP_POWER11: powerpc_excp_books(cpu, excp); break; default: @@ -1682,51 +1677,54 @@ void ppc_cpu_do_interrupt(CPUState *cs) PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \ PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB) -static int p7_interrupt_powersave(CPUPPCState *env) +static int p7_interrupt_powersave(uint32_t pending_interrupts, + target_ulong lpcr) { - if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) { + if ((pending_interrupts & PPC_INTERRUPT_EXT) && + (lpcr & LPCR_P7_PECE0)) { return PPC_INTERRUPT_EXT; } - if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) { + if ((pending_interrupts & PPC_INTERRUPT_DECR) && + (lpcr & LPCR_P7_PECE1)) { return PPC_INTERRUPT_DECR; } - if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { + if ((pending_interrupts & PPC_INTERRUPT_MCK) && + (lpcr & LPCR_P7_PECE2)) { return PPC_INTERRUPT_MCK; } - if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && - (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) { + if ((pending_interrupts & PPC_INTERRUPT_HMI) && + (lpcr & LPCR_P7_PECE2)) { return PPC_INTERRUPT_HMI; } - if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; } return 0; } -static int p7_next_unmasked_interrupt(CPUPPCState *env) +static int p7_next_unmasked_interrupt(CPUPPCState *env, + uint32_t pending_interrupts, + target_ulong lpcr) { CPUState *cs = env_cpu(env); /* Ignore MSR[EE] when coming out of some power management states */ bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; - assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0); + assert((pending_interrupts & P7_UNUSED_INTERRUPTS) == 0); if (cs->halted) { /* LPCR[PECE] controls which interrupts can exit power-saving mode */ - return p7_interrupt_powersave(env); + return p7_interrupt_powersave(pending_interrupts, lpcr); } /* Machine check exception */ - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } /* Hypervisor decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + if (pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { @@ -1736,9 +1734,9 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env) } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & PPC_INTERRUPT_EXT) { - bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(lpcr & LPCR_LPES0); + bool heic = !!(lpcr & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && !FIELD_EX64(env->msr, MSR, PR))) || @@ -1748,10 +1746,10 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env) } if (msr_ee != 0) { /* Decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + if (pending_interrupts & PPC_INTERRUPT_DECR) { return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + if (pending_interrupts & PPC_INTERRUPT_PERFM) { return PPC_INTERRUPT_PERFM; } } @@ -1764,39 +1762,42 @@ static int p7_next_unmasked_interrupt(CPUPPCState *env) PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \ PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) -static int p8_interrupt_powersave(CPUPPCState *env) +static int p8_interrupt_powersave(uint32_t pending_interrupts, + target_ulong lpcr) { - if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) { + if ((pending_interrupts & PPC_INTERRUPT_EXT) && + (lpcr & LPCR_P8_PECE2)) { return PPC_INTERRUPT_EXT; } - if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) { + if ((pending_interrupts & PPC_INTERRUPT_DECR) && + (lpcr & LPCR_P8_PECE3)) { return PPC_INTERRUPT_DECR; } - if ((env->pending_interrupts & PPC_INTERRUPT_MCK) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { + if ((pending_interrupts & PPC_INTERRUPT_MCK) && + (lpcr & LPCR_P8_PECE4)) { return PPC_INTERRUPT_MCK; } - if ((env->pending_interrupts & PPC_INTERRUPT_HMI) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) { + if ((pending_interrupts & PPC_INTERRUPT_HMI) && + (lpcr & LPCR_P8_PECE4)) { return PPC_INTERRUPT_HMI; } - if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) { + if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) && + (lpcr & LPCR_P8_PECE0)) { return PPC_INTERRUPT_DOORBELL; } - if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && - (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) { + if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) && + (lpcr & LPCR_P8_PECE1)) { return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; } return 0; } -static int p8_next_unmasked_interrupt(CPUPPCState *env) +static int p8_next_unmasked_interrupt(CPUPPCState *env, + uint32_t pending_interrupts, + target_ulong lpcr) { CPUState *cs = env_cpu(env); @@ -1807,18 +1808,18 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env) if (cs->halted) { /* LPCR[PECE] controls which interrupts can exit power-saving mode */ - return p8_interrupt_powersave(env); + return p8_interrupt_powersave(pending_interrupts, lpcr); } /* Machine check exception */ - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } /* Hypervisor decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + if (pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ - bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + bool hdice = !!(lpcr & LPCR_HDICE); if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { /* HDEC clears on delivery */ return PPC_INTERRUPT_HDECR; @@ -1826,9 +1827,9 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env) } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & PPC_INTERRUPT_EXT) { - bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(lpcr & LPCR_LPES0); + bool heic = !!(lpcr & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && !FIELD_EX64(env->msr, MSR, PR))) || @@ -1838,20 +1839,20 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env) } if (msr_ee != 0) { /* Decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + if (pending_interrupts & PPC_INTERRUPT_DECR) { return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_DOORBELL) { return PPC_INTERRUPT_DOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) { return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + if (pending_interrupts & PPC_INTERRUPT_PERFM) { return PPC_INTERRUPT_PERFM; } /* EBB exception */ - if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + if (pending_interrupts & PPC_INTERRUPT_EBB) { /* * EBB exception must be taken in problem state and * with BESCR_GE set. @@ -1871,60 +1872,65 @@ static int p8_next_unmasked_interrupt(CPUPPCState *env) PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \ PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM) -static int p9_interrupt_powersave(CPUPPCState *env) +static int p9_interrupt_powersave(CPUPPCState *env, + uint32_t pending_interrupts, + target_ulong lpcr) { + /* External Exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_EXT) && - (env->spr[SPR_LPCR] & LPCR_EEE)) { - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if ((pending_interrupts & PPC_INTERRUPT_EXT) && + (lpcr & LPCR_EEE)) { + bool heic = !!(lpcr & LPCR_HEIC); if (!heic || !FIELD_EX64_HV(env->msr) || FIELD_EX64(env->msr, MSR, PR)) { return PPC_INTERRUPT_EXT; } } /* Decrementer Exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_DECR) && - (env->spr[SPR_LPCR] & LPCR_DEE)) { + if ((pending_interrupts & PPC_INTERRUPT_DECR) && + (lpcr & LPCR_DEE)) { return PPC_INTERRUPT_DECR; } /* Machine Check or Hypervisor Maintenance Exception */ - if (env->spr[SPR_LPCR] & LPCR_OEE) { - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (lpcr & LPCR_OEE) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } - if (env->pending_interrupts & PPC_INTERRUPT_HMI) { + if (pending_interrupts & PPC_INTERRUPT_HMI) { return PPC_INTERRUPT_HMI; } } /* Privileged Doorbell Exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) && - (env->spr[SPR_LPCR] & LPCR_PDEE)) { + if ((pending_interrupts & PPC_INTERRUPT_DOORBELL) && + (lpcr & LPCR_PDEE)) { return PPC_INTERRUPT_DOORBELL; } /* Hypervisor Doorbell Exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) && - (env->spr[SPR_LPCR] & LPCR_HDEE)) { + if ((pending_interrupts & PPC_INTERRUPT_HDOORBELL) && + (lpcr & LPCR_HDEE)) { return PPC_INTERRUPT_HDOORBELL; } /* Hypervisor virtualization exception */ - if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) && - (env->spr[SPR_LPCR] & LPCR_HVEE)) { + if ((pending_interrupts & PPC_INTERRUPT_HVIRT) && + (lpcr & LPCR_HVEE)) { return PPC_INTERRUPT_HVIRT; } - if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; } return 0; } -static int p9_next_unmasked_interrupt(CPUPPCState *env) +static int p9_next_unmasked_interrupt(CPUPPCState *env, + uint32_t pending_interrupts, + target_ulong lpcr) { CPUState *cs = env_cpu(env); /* Ignore MSR[EE] when coming out of some power management states */ bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; - assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0); + assert((pending_interrupts & P9_UNUSED_INTERRUPTS) == 0); if (cs->halted) { if (env->spr[SPR_PSSCR] & PSSCR_EC) { @@ -1932,7 +1938,7 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can * wakeup the processor */ - return p9_interrupt_powersave(env); + return p9_interrupt_powersave(env, pending_interrupts, lpcr); } else { /* * When it's clear, any system-caused exception exits power-saving @@ -1943,14 +1949,14 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) } /* Machine check exception */ - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } /* Hypervisor decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + if (pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ - bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + bool hdice = !!(lpcr & LPCR_HDICE); if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) { /* HDEC clears on delivery */ return PPC_INTERRUPT_HDECR; @@ -1958,18 +1964,18 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) } /* Hypervisor virtualization interrupt */ - if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { + if (pending_interrupts & PPC_INTERRUPT_HVIRT) { /* LPCR will be clear when not supported so this will work */ - bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); + bool hvice = !!(lpcr & LPCR_HVICE); if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) { return PPC_INTERRUPT_HVIRT; } } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & PPC_INTERRUPT_EXT) { - bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(lpcr & LPCR_LPES0); + bool heic = !!(lpcr & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) && !FIELD_EX64(env->msr, MSR, PR))) || @@ -1979,20 +1985,20 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) } if (msr_ee != 0) { /* Decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + if (pending_interrupts & PPC_INTERRUPT_DECR) { return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_DOORBELL) { return PPC_INTERRUPT_DOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) { return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + if (pending_interrupts & PPC_INTERRUPT_PERFM) { return PPC_INTERRUPT_PERFM; } /* EBB exception */ - if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + if (pending_interrupts & PPC_INTERRUPT_EBB) { /* * EBB exception must be taken in problem state and * with BESCR_GE set. @@ -2010,27 +2016,31 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env) static int ppc_next_unmasked_interrupt(CPUPPCState *env) { + uint32_t pending_interrupts = env->pending_interrupts; + target_ulong lpcr = env->spr[SPR_LPCR]; + bool async_deliver; + #ifdef TARGET_PPC64 switch (env->excp_model) { case POWERPC_EXCP_POWER7: - return p7_next_unmasked_interrupt(env); + return p7_next_unmasked_interrupt(env, pending_interrupts, lpcr); case POWERPC_EXCP_POWER8: - return p8_next_unmasked_interrupt(env); + return p8_next_unmasked_interrupt(env, pending_interrupts, lpcr); case POWERPC_EXCP_POWER9: case POWERPC_EXCP_POWER10: - return p9_next_unmasked_interrupt(env); + case POWERPC_EXCP_POWER11: + return p9_next_unmasked_interrupt(env, pending_interrupts, lpcr); default: break; } #endif - bool async_deliver; /* External reset */ - if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; } /* Machine check exception */ - if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + if (pending_interrupts & PPC_INTERRUPT_MCK) { return PPC_INTERRUPT_MCK; } #if 0 /* TODO */ @@ -2049,9 +2059,9 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset; /* Hypervisor decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_HDECR) { + if (pending_interrupts & PPC_INTERRUPT_HDECR) { /* LPCR will be clear when not supported so this will work */ - bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE); + bool hdice = !!(lpcr & LPCR_HDICE); if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) { /* HDEC clears on delivery */ return PPC_INTERRUPT_HDECR; @@ -2059,18 +2069,18 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) } /* Hypervisor virtualization interrupt */ - if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) { + if (pending_interrupts & PPC_INTERRUPT_HVIRT) { /* LPCR will be clear when not supported so this will work */ - bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE); + bool hvice = !!(lpcr & LPCR_HVICE); if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) { return PPC_INTERRUPT_HVIRT; } } /* External interrupt can ignore MSR:EE under some circumstances */ - if (env->pending_interrupts & PPC_INTERRUPT_EXT) { - bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); - bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); + if (pending_interrupts & PPC_INTERRUPT_EXT) { + bool lpes0 = !!(lpcr & LPCR_LPES0); + bool heic = !!(lpcr & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) && !FIELD_EX64(env->msr, MSR, PR))) || @@ -2080,45 +2090,45 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) } if (FIELD_EX64(env->msr, MSR, CE)) { /* External critical interrupt */ - if (env->pending_interrupts & PPC_INTERRUPT_CEXT) { + if (pending_interrupts & PPC_INTERRUPT_CEXT) { return PPC_INTERRUPT_CEXT; } } if (async_deliver != 0) { /* Watchdog timer on embedded PowerPC */ - if (env->pending_interrupts & PPC_INTERRUPT_WDT) { + if (pending_interrupts & PPC_INTERRUPT_WDT) { return PPC_INTERRUPT_WDT; } - if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_CDOORBELL) { return PPC_INTERRUPT_CDOORBELL; } /* Fixed interval timer on embedded PowerPC */ - if (env->pending_interrupts & PPC_INTERRUPT_FIT) { + if (pending_interrupts & PPC_INTERRUPT_FIT) { return PPC_INTERRUPT_FIT; } /* Programmable interval timer on embedded PowerPC */ - if (env->pending_interrupts & PPC_INTERRUPT_PIT) { + if (pending_interrupts & PPC_INTERRUPT_PIT) { return PPC_INTERRUPT_PIT; } /* Decrementer exception */ - if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + if (pending_interrupts & PPC_INTERRUPT_DECR) { return PPC_INTERRUPT_DECR; } - if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_DOORBELL) { return PPC_INTERRUPT_DOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) { + if (pending_interrupts & PPC_INTERRUPT_HDOORBELL) { return PPC_INTERRUPT_HDOORBELL; } - if (env->pending_interrupts & PPC_INTERRUPT_PERFM) { + if (pending_interrupts & PPC_INTERRUPT_PERFM) { return PPC_INTERRUPT_PERFM; } /* Thermal interrupt */ - if (env->pending_interrupts & PPC_INTERRUPT_THERM) { + if (pending_interrupts & PPC_INTERRUPT_THERM) { return PPC_INTERRUPT_THERM; } /* EBB exception */ - if (env->pending_interrupts & PPC_INTERRUPT_EBB) { + if (pending_interrupts & PPC_INTERRUPT_EBB) { /* * EBB exception must be taken in problem state and * with BESCR_GE set. @@ -2187,7 +2197,6 @@ static void p7_deliver_interrupt(CPUPPCState *env, int interrupt) powerpc_excp(cpu, POWERPC_EXCP_DECR); break; case PPC_INTERRUPT_PERFM: - env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; powerpc_excp(cpu, POWERPC_EXCP_PERFM); break; case 0: @@ -2238,7 +2247,9 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) powerpc_excp(cpu, POWERPC_EXCP_DECR); break; case PPC_INTERRUPT_DOORBELL: - env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + if (!env->resume_as_sreset) { + env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + } if (is_book3s_arch2x(env)) { powerpc_excp(cpu, POWERPC_EXCP_SDOOR); } else { @@ -2246,11 +2257,12 @@ static void p8_deliver_interrupt(CPUPPCState *env, int interrupt) } break; case PPC_INTERRUPT_HDOORBELL: - env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + if (!env->resume_as_sreset) { + env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + } powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); break; case PPC_INTERRUPT_PERFM: - env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; powerpc_excp(cpu, POWERPC_EXCP_PERFM); break; case PPC_INTERRUPT_EBB: /* EBB exception */ @@ -2303,6 +2315,7 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */ /* HDEC clears on delivery */ + /* XXX: should not see an HDEC if resume_as_sreset. assert? */ env->pending_interrupts &= ~PPC_INTERRUPT_HDECR; powerpc_excp(cpu, POWERPC_EXCP_HDECR); break; @@ -2322,15 +2335,18 @@ static void p9_deliver_interrupt(CPUPPCState *env, int interrupt) powerpc_excp(cpu, POWERPC_EXCP_DECR); break; case PPC_INTERRUPT_DOORBELL: - env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + if (!env->resume_as_sreset) { + env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL; + } powerpc_excp(cpu, POWERPC_EXCP_SDOOR); break; case PPC_INTERRUPT_HDOORBELL: - env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + if (!env->resume_as_sreset) { + env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL; + } powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); break; case PPC_INTERRUPT_PERFM: - env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; powerpc_excp(cpu, POWERPC_EXCP_PERFM); break; case PPC_INTERRUPT_EBB: /* EBB exception */ @@ -2372,6 +2388,7 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) return p8_deliver_interrupt(env, interrupt); case POWERPC_EXCP_POWER9: case POWERPC_EXCP_POWER10: + case POWERPC_EXCP_POWER11: return p9_deliver_interrupt(env, interrupt); default: break; @@ -2444,7 +2461,6 @@ static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt) powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV); break; case PPC_INTERRUPT_PERFM: - env->pending_interrupts &= ~PPC_INTERRUPT_PERFM; powerpc_excp(cpu, POWERPC_EXCP_PERFM); break; case PPC_INTERRUPT_THERM: /* Thermal interrupt */ @@ -3163,6 +3179,7 @@ void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, case POWERPC_EXCP_POWER8: case POWERPC_EXCP_POWER9: case POWERPC_EXCP_POWER10: + case POWERPC_EXCP_POWER11: /* * Machine check codes can be found in processor User Manual or * Linux or skiboot source. diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 02076e96fb..42c681ca4a 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -83,15 +83,16 @@ static bool hreg_check_bhrb_enable(CPUPPCState *env) static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env) { uint32_t hflags = 0; - #if defined(TARGET_PPC64) - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) { + target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0]; + + if (mmcr0 & MMCR0_PMCC0) { hflags |= 1 << HFLAGS_PMCC0; } - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) { + if (mmcr0 & MMCR0_PMCC1) { hflags |= 1 << HFLAGS_PMCC1; } - if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) { + if (mmcr0 & MMCR0_PMCjCE) { hflags |= 1 << HFLAGS_PMCJCE; } if (hreg_check_bhrb_enable(env)) { @@ -101,9 +102,9 @@ static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env) #ifndef CONFIG_USER_ONLY if (env->pmc_ins_cnt) { hflags |= 1 << HFLAGS_INSN_CNT; - } - if (env->pmc_ins_cnt & 0x1e) { - hflags |= 1 << HFLAGS_PMC_OTHER; + if (env->pmc_ins_cnt & 0x1e) { + hflags |= 1 << HFLAGS_PMC_OTHER; + } } #endif #endif @@ -143,10 +144,10 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) if (ppc_flags & POWERPC_FLAG_DE) { target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; - if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(env->msr, MSR, DE)) { + if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(msr, MSR, DE)) { hflags |= 1 << HFLAGS_SE; } - if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(env->msr, MSR, DE)) { + if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(msr, MSR, DE)) { hflags |= 1 << HFLAGS_BE; } } else { diff --git a/target/ppc/machine.c b/target/ppc/machine.c index d433fd45fc..717bf93e88 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -118,43 +118,11 @@ static const VMStateInfo vmstate_info_vsr = { #define VMSTATE_VSR_ARRAY(_f, _s, _n) \ VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) -static bool cpu_pre_2_8_migration(void *opaque, int version_id) -{ - PowerPCCPU *cpu = opaque; - - return cpu->pre_2_8_migration; -} - -#if defined(TARGET_PPC64) -static bool cpu_pre_3_0_migration(void *opaque, int version_id) -{ - PowerPCCPU *cpu = opaque; - - return cpu->pre_3_0_migration; -} -#endif - static int cpu_pre_save(void *opaque) { PowerPCCPU *cpu = opaque; CPUPPCState *env = &cpu->env; int i; - uint64_t insns_compat_mask = - PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB - | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES - | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES - | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT - | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ - | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC - | PPC_64B | PPC_64BX | PPC_ALTIVEC - | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; - uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX - | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 - | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 - | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 - | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 - | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM - | PPC2_MEM_LWSYNC; env->spr[SPR_LR] = env->lr; env->spr[SPR_CTR] = env->ctr; @@ -177,35 +145,6 @@ static int cpu_pre_save(void *opaque) env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; } - /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ - if (cpu->pre_2_8_migration) { - /* - * Mask out bits that got added to msr_mask since the versions - * which stupidly included it in the migration stream. - */ - target_ulong metamask = 0 -#if defined(TARGET_PPC64) - | (1ULL << MSR_TS0) - | (1ULL << MSR_TS1) -#endif - ; - cpu->mig_msr_mask = env->msr_mask & ~metamask; - cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; - /* - * CPU models supported by old machines all have - * PPC_MEM_TLBIE, so we set it unconditionally to allow - * backward migration from a POWER9 host to a POWER8 host. - */ - cpu->mig_insns_flags |= PPC_MEM_TLBIE; - cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; - cpu->mig_nb_BATs = env->nb_BATs; - } - if (cpu->pre_3_0_migration) { - if (cpu->hash64_opts) { - cpu->mig_slb_nr = cpu->hash64_opts->slb_size; - } - } - /* Used to retain migration compatibility for pre 6.0 for 601 machines. */ env->hflags_compat_nmsr = 0; @@ -549,12 +488,11 @@ static int slb_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_slb = { .name = "cpu/slb", - .version_id = 1, + .version_id = 2, .minimum_version_id = 1, .needed = slb_needed, .post_load = slb_post_load, .fields = (const VMStateField[]) { - VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), VMSTATE_END_OF_LIST() } @@ -676,7 +614,7 @@ static bool compat_needed(void *opaque) PowerPCCPU *cpu = opaque; assert(!(cpu->compat_pvr && !cpu->vhyp)); - return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; + return cpu->compat_pvr != 0; } static const VMStateDescription vmstate_compat = { @@ -760,12 +698,6 @@ const VMStateDescription vmstate_ppc_cpu = { /* Backward compatible internal state */ VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU), - /* Sanity checking */ - VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), - VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, - cpu_pre_2_8_migration), - VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription * const []) { diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index 1b83971375..f0ca80153b 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -288,7 +288,7 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val) PowerPCCPU *ccpu = POWERPC_CPU(ccs); uint32_t thread_id = ppc_cpu_tir(ccpu); - ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id)); + ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id)); } bql_unlock(); } diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index 5e1983e334..c8c2f8910a 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -993,6 +993,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, int exec_prot, pp_prot, amr_prot, prot; int need_prot; hwaddr raddr; + bool vrma = false; /* * Note on LPCR usage: 970 uses HID4, but our special variant of @@ -1022,6 +1023,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, } } else if (ppc_hash64_use_vrma(env)) { /* Emulated VRMA mode */ + vrma = true; slb = &vrma_slbe; if (build_vrma_slbe(cpu, slb) != 0) { /* Invalid VRMA setup, machine check */ @@ -1136,7 +1138,12 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte); pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte); - amr_prot = ppc_hash64_amr_prot(cpu, pte); + if (vrma) { + /* VRMA does not check keys */ + amr_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + } else { + amr_prot = ppc_hash64_amr_prot(cpu, pte); + } prot = exec_prot & pp_prot & amr_prot; need_prot = check_prot_access_type(PAGE_RWX, access_type); diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 7689b2ac2e..47ca50a064 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -1820,7 +1820,7 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); } - if (unlikely(Rc(ctx->opcode) != 0)) { + if (unlikely(compute_rc0)) { gen_set_Rc0(ctx, ret); } } @@ -6423,8 +6423,6 @@ static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn) opc_handler_t **table, *handler; uint32_t inval; - ctx->opcode = insn; - LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn), ctx->le_mode ? "little" : "big"); @@ -6558,6 +6556,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ctx->base.pc_next = pc += 4; if (!is_prefix_insn(ctx, insn)) { + ctx->opcode = insn; ok = (decode_insn32(ctx, insn) || decode_legacy(cpu, ctx, insn)); } else if ((pc & 63) == 0) { diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 36d2a6f189..65a74ce720 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -93,6 +93,13 @@ static void rx_cpu_reset_hold(Object *obj, ResetType type) env->fpsw = 0; set_flush_to_zero(1, &env->fp_status); set_flush_inputs_to_zero(1, &env->fp_status); + /* + * TODO: this is not the correct NaN propagation rule for this + * architecture. The "RX Family User's Manual: Software" table 1.6 + * defines the propagation rules as "prefer SNaN over QNaN; + * then prefer dest over source", which is float_2nan_prop_s_ab. + */ + set_float_2nan_prop_rule(float_2nan_prop_x87, &env->fp_status); } static ObjectClass *rx_cpu_class_by_name(const char *cpu_model) diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 4e41a3dff5..514c70f301 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -205,6 +205,7 @@ static void s390_cpu_reset_hold(Object *obj, ResetType type) /* tininess for underflow is detected before rounding */ set_float_detect_tininess(float_tininess_before_rounding, &env->fpu_status); + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &env->fpu_status); /* fall through */ case RESET_TYPE_S390_CPU_NORMAL: env->psw.mask &= ~PSW_MASK_RI; diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 54cb269e0a..dd7af86de7 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -26,6 +26,7 @@ #include "hw/qdev-properties.h" #include "qapi/visitor.h" #include "tcg/tcg.h" +#include "fpu/softfloat.h" //#define DEBUG_FEATURES @@ -76,6 +77,7 @@ static void sparc_cpu_reset_hold(Object *obj, ResetType type) env->npc = env->pc + 4; #endif env->cache_control = 0; + cpu_put_fsr(env, 0); } #ifndef CONFIG_USER_ONLY @@ -805,7 +807,13 @@ static void sparc_cpu_realizefn(DeviceState *dev, Error **errp) env->version |= env->def.maxtl << 8; env->version |= env->def.nwindows - 1; #endif - cpu_put_fsr(env, 0); + + /* + * Prefer SNaN over QNaN, order B then A. It's OK to do this in realize + * rather than reset, because fp_status is after 'end_reset_fields' in + * the CPU state struct so it won't get zeroed on reset. + */ + set_float_2nan_prop_rule(float_2nan_prop_s_ba, &env->fp_status); cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c index b6692382b3..6f9ccc008a 100644 --- a/target/sparc/fop_helper.c +++ b/target/sparc/fop_helper.c @@ -497,7 +497,10 @@ uint32_t helper_flcmps(float32 src1, float32 src2) * Perform the comparison with a dummy fp environment. */ float_status discard = { }; - FloatRelation r = float32_compare_quiet(src1, src2, &discard); + FloatRelation r; + + set_float_2nan_prop_rule(float_2nan_prop_s_ba, &discard); + r = float32_compare_quiet(src1, src2, &discard); switch (r) { case float_relation_equal: @@ -518,7 +521,10 @@ uint32_t helper_flcmps(float32 src1, float32 src2) uint32_t helper_flcmpd(float64 src1, float64 src2) { float_status discard = { }; - FloatRelation r = float64_compare_quiet(src1, src2, &discard); + FloatRelation r; + + set_float_2nan_prop_rule(float_2nan_prop_s_ba, &discard); + r = float64_compare_quiet(src1, src2, &discard); switch (r) { case float_relation_equal: diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index a08c7a0b1f..6f9039abae 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -134,7 +134,7 @@ static void xtensa_cpu_reset_hold(Object *obj, ResetType type) cs->halted = env->runstall; #endif set_no_signaling_nans(!dfpu, &env->fp_status); - set_use_first_nan(!dfpu, &env->fp_status); + xtensa_use_first_nan(env, !dfpu); } static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model) diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index 9f2341d856..77e48eef19 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -802,4 +802,10 @@ static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, vaddr *pc, XtensaCPU *xtensa_cpu_create_with_clock(const char *cpu_type, Clock *cpu_refclk); +/* + * Set the NaN propagation rule for future FPU operations: + * use_first is true to pick the first NaN as the result if both + * inputs are NaNs, false to pick the second. + */ +void xtensa_use_first_nan(CPUXtensaState *env, bool use_first); #endif diff --git a/target/xtensa/fpu_helper.c b/target/xtensa/fpu_helper.c index 381e83ded8..f2d212d05d 100644 --- a/target/xtensa/fpu_helper.c +++ b/target/xtensa/fpu_helper.c @@ -57,6 +57,13 @@ static const struct { { XTENSA_FP_V, float_flag_invalid, }, }; +void xtensa_use_first_nan(CPUXtensaState *env, bool use_first) +{ + set_use_first_nan(use_first, &env->fp_status); + set_float_2nan_prop_rule(use_first ? float_2nan_prop_ab : float_2nan_prop_ba, + &env->fp_status); +} + void HELPER(wur_fpu2k_fcr)(CPUXtensaState *env, uint32_t v) { static const int rounding_mode[] = { @@ -171,87 +178,87 @@ float32 HELPER(fpu2k_msub_s)(CPUXtensaState *env, float64 HELPER(add_d)(CPUXtensaState *env, float64 a, float64 b) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_add(a, b, &env->fp_status); } float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_add(a, b, &env->fp_status); } float64 HELPER(sub_d)(CPUXtensaState *env, float64 a, float64 b) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_sub(a, b, &env->fp_status); } float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_sub(a, b, &env->fp_status); } float64 HELPER(mul_d)(CPUXtensaState *env, float64 a, float64 b) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_mul(a, b, &env->fp_status); } float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_mul(a, b, &env->fp_status); } float64 HELPER(madd_d)(CPUXtensaState *env, float64 a, float64 b, float64 c) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float64_muladd(b, c, a, 0, &env->fp_status); } float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_muladd(b, c, a, 0, &env->fp_status); } float64 HELPER(msub_d)(CPUXtensaState *env, float64 a, float64 b, float64 c) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float64_muladd(b, c, a, float_muladd_negate_product, &env->fp_status); } float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_muladd(b, c, a, float_muladd_negate_product, &env->fp_status); } float64 HELPER(mkdadj_d)(CPUXtensaState *env, float64 a, float64 b) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_div(b, a, &env->fp_status); } float32 HELPER(mkdadj_s)(CPUXtensaState *env, float32 a, float32 b) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_div(b, a, &env->fp_status); } float64 HELPER(mksadj_d)(CPUXtensaState *env, float64 v) { - set_use_first_nan(true, &env->fp_status); + xtensa_use_first_nan(env, true); return float64_sqrt(v, &env->fp_status); } float32 HELPER(mksadj_s)(CPUXtensaState *env, float32 v) { - set_use_first_nan(env->config->use_first_nan, &env->fp_status); + xtensa_use_first_nan(env, env->config->use_first_nan); return float32_sqrt(v, &env->fp_status); } diff --git a/tests/data/acpi/disassemle-aml.sh b/tests/data/acpi/disassemle-aml.sh index 253b7620a0..89561d233d 100755 --- a/tests/data/acpi/disassemle-aml.sh +++ b/tests/data/acpi/disassemle-aml.sh @@ -14,7 +14,7 @@ while getopts "o:" arg; do esac done -for machine in tests/data/acpi/* +for machine in tests/data/acpi/*/* do if [[ ! -d "$machine" ]]; then diff --git a/tests/data/acpi/x86/pc/DSDT b/tests/data/acpi/x86/pc/DSDT index 92225236e7..f68a32e606 100644 --- a/tests/data/acpi/x86/pc/DSDT +++ b/tests/data/acpi/x86/pc/DSDT Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.acpierst b/tests/data/acpi/x86/pc/DSDT.acpierst index 25b3995505..0fd79699eb 100644 --- a/tests/data/acpi/x86/pc/DSDT.acpierst +++ b/tests/data/acpi/x86/pc/DSDT.acpierst Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.acpihmat b/tests/data/acpi/x86/pc/DSDT.acpihmat index 73a9ce59e9..a4dd09e5ef 100644 --- a/tests/data/acpi/x86/pc/DSDT.acpihmat +++ b/tests/data/acpi/x86/pc/DSDT.acpihmat Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.bridge b/tests/data/acpi/x86/pc/DSDT.bridge index 4cef454e37..7ef58152d2 100644 --- a/tests/data/acpi/x86/pc/DSDT.bridge +++ b/tests/data/acpi/x86/pc/DSDT.bridge Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.cphp b/tests/data/acpi/x86/pc/DSDT.cphp index 1dc928333d..1079ff81c1 100644 --- a/tests/data/acpi/x86/pc/DSDT.cphp +++ b/tests/data/acpi/x86/pc/DSDT.cphp Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.dimmpxm b/tests/data/acpi/x86/pc/DSDT.dimmpxm index 9f71d2e58b..34fe3fcad9 100644 --- a/tests/data/acpi/x86/pc/DSDT.dimmpxm +++ b/tests/data/acpi/x86/pc/DSDT.dimmpxm Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.hpbridge b/tests/data/acpi/x86/pc/DSDT.hpbridge index db420593a3..33c7529f5c 100644 --- a/tests/data/acpi/x86/pc/DSDT.hpbridge +++ b/tests/data/acpi/x86/pc/DSDT.hpbridge Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.hpbrroot b/tests/data/acpi/x86/pc/DSDT.hpbrroot index 31b6adb4eb..2661170c83 100644 --- a/tests/data/acpi/x86/pc/DSDT.hpbrroot +++ b/tests/data/acpi/x86/pc/DSDT.hpbrroot Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.ipmikcs b/tests/data/acpi/x86/pc/DSDT.ipmikcs index c2a0330d97..688faf83cb 100644 --- a/tests/data/acpi/x86/pc/DSDT.ipmikcs +++ b/tests/data/acpi/x86/pc/DSDT.ipmikcs Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.memhp b/tests/data/acpi/x86/pc/DSDT.memhp index c15a9fae94..6ede4361f4 100644 --- a/tests/data/acpi/x86/pc/DSDT.memhp +++ b/tests/data/acpi/x86/pc/DSDT.memhp Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.nohpet b/tests/data/acpi/x86/pc/DSDT.nohpet index dd29f5cb62..9d6040733f 100644 --- a/tests/data/acpi/x86/pc/DSDT.nohpet +++ b/tests/data/acpi/x86/pc/DSDT.nohpet Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.numamem b/tests/data/acpi/x86/pc/DSDT.numamem index 8a6b56fe7d..aa9986f74b 100644 --- a/tests/data/acpi/x86/pc/DSDT.numamem +++ b/tests/data/acpi/x86/pc/DSDT.numamem Binary files differdiff --git a/tests/data/acpi/x86/pc/DSDT.roothp b/tests/data/acpi/x86/pc/DSDT.roothp index a16b0d9d4b..86c2ae11dc 100644 --- a/tests/data/acpi/x86/pc/DSDT.roothp +++ b/tests/data/acpi/x86/pc/DSDT.roothp Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT b/tests/data/acpi/x86/q35/DSDT index fb89ae0ac6..b0bbff7686 100644 --- a/tests/data/acpi/x86/q35/DSDT +++ b/tests/data/acpi/x86/q35/DSDT Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.acpierst b/tests/data/acpi/x86/q35/DSDT.acpierst index 46fd25400b..f91cbe55fc 100644 --- a/tests/data/acpi/x86/q35/DSDT.acpierst +++ b/tests/data/acpi/x86/q35/DSDT.acpierst Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.acpihmat b/tests/data/acpi/x86/q35/DSDT.acpihmat index 61c5bd52a4..0949fb9d67 100644 --- a/tests/data/acpi/x86/q35/DSDT.acpihmat +++ b/tests/data/acpi/x86/q35/DSDT.acpihmat Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator b/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator index 3aaa2bbdf5..0fa4daa35c 100644 --- a/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator +++ b/tests/data/acpi/x86/q35/DSDT.acpihmat-noinitiator Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.applesmc b/tests/data/acpi/x86/q35/DSDT.applesmc index 944209adea..a5d032b7d9 100644 --- a/tests/data/acpi/x86/q35/DSDT.applesmc +++ b/tests/data/acpi/x86/q35/DSDT.applesmc Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.bridge b/tests/data/acpi/x86/q35/DSDT.bridge index d9938dba8f..3464f55297 100644 --- a/tests/data/acpi/x86/q35/DSDT.bridge +++ b/tests/data/acpi/x86/q35/DSDT.bridge Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.core-count b/tests/data/acpi/x86/q35/DSDT.core-count index a24b04cbdb..08f5d5f54b 100644 --- a/tests/data/acpi/x86/q35/DSDT.core-count +++ b/tests/data/acpi/x86/q35/DSDT.core-count Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.core-count2 b/tests/data/acpi/x86/q35/DSDT.core-count2 index 3a0cb8c581..d29a7108f8 100644 --- a/tests/data/acpi/x86/q35/DSDT.core-count2 +++ b/tests/data/acpi/x86/q35/DSDT.core-count2 Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.cphp b/tests/data/acpi/x86/q35/DSDT.cphp index 20955d0aa3..7fd59bf670 100644 --- a/tests/data/acpi/x86/q35/DSDT.cphp +++ b/tests/data/acpi/x86/q35/DSDT.cphp Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.cxl b/tests/data/acpi/x86/q35/DSDT.cxl index f561750cab..613a40b957 100644 --- a/tests/data/acpi/x86/q35/DSDT.cxl +++ b/tests/data/acpi/x86/q35/DSDT.cxl Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.dimmpxm b/tests/data/acpi/x86/q35/DSDT.dimmpxm index 228374b55b..1db0bf454a 100644 --- a/tests/data/acpi/x86/q35/DSDT.dimmpxm +++ b/tests/data/acpi/x86/q35/DSDT.dimmpxm Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.ipmibt b/tests/data/acpi/x86/q35/DSDT.ipmibt index 45f911ada5..25f43ae8ef 100644 --- a/tests/data/acpi/x86/q35/DSDT.ipmibt +++ b/tests/data/acpi/x86/q35/DSDT.ipmibt Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.ipmismbus b/tests/data/acpi/x86/q35/DSDT.ipmismbus index e5d6811bee..32bcd25bda 100644 --- a/tests/data/acpi/x86/q35/DSDT.ipmismbus +++ b/tests/data/acpi/x86/q35/DSDT.ipmismbus Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.ivrs b/tests/data/acpi/x86/q35/DSDT.ivrs index 46fd25400b..f91cbe55fc 100644 --- a/tests/data/acpi/x86/q35/DSDT.ivrs +++ b/tests/data/acpi/x86/q35/DSDT.ivrs Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.memhp b/tests/data/acpi/x86/q35/DSDT.memhp index 5ce081187a..be90eb71d8 100644 --- a/tests/data/acpi/x86/q35/DSDT.memhp +++ b/tests/data/acpi/x86/q35/DSDT.memhp Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.mmio64 b/tests/data/acpi/x86/q35/DSDT.mmio64 index bdf36c4d57..01f276a6af 100644 --- a/tests/data/acpi/x86/q35/DSDT.mmio64 +++ b/tests/data/acpi/x86/q35/DSDT.mmio64 Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.multi-bridge b/tests/data/acpi/x86/q35/DSDT.multi-bridge index 1db43a69e4..1bd2ee8d2e 100644 --- a/tests/data/acpi/x86/q35/DSDT.multi-bridge +++ b/tests/data/acpi/x86/q35/DSDT.multi-bridge Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.noacpihp b/tests/data/acpi/x86/q35/DSDT.noacpihp index 8bc16887e1..45cc2bcffa 100644 --- a/tests/data/acpi/x86/q35/DSDT.noacpihp +++ b/tests/data/acpi/x86/q35/DSDT.noacpihp Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.nohpet b/tests/data/acpi/x86/q35/DSDT.nohpet index c13e45e361..f110504b9c 100644 --- a/tests/data/acpi/x86/q35/DSDT.nohpet +++ b/tests/data/acpi/x86/q35/DSDT.nohpet Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.numamem b/tests/data/acpi/x86/q35/DSDT.numamem index ba6669437e..6090958f39 100644 --- a/tests/data/acpi/x86/q35/DSDT.numamem +++ b/tests/data/acpi/x86/q35/DSDT.numamem Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.pvpanic-isa b/tests/data/acpi/x86/q35/DSDT.pvpanic-isa index 6ad42873e9..7a8e568315 100644 --- a/tests/data/acpi/x86/q35/DSDT.pvpanic-isa +++ b/tests/data/acpi/x86/q35/DSDT.pvpanic-isa Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.thread-count b/tests/data/acpi/x86/q35/DSDT.thread-count index a24b04cbdb..08f5d5f54b 100644 --- a/tests/data/acpi/x86/q35/DSDT.thread-count +++ b/tests/data/acpi/x86/q35/DSDT.thread-count Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.thread-count2 b/tests/data/acpi/x86/q35/DSDT.thread-count2 index 3a0cb8c581..d29a7108f8 100644 --- a/tests/data/acpi/x86/q35/DSDT.thread-count2 +++ b/tests/data/acpi/x86/q35/DSDT.thread-count2 Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.tis.tpm12 b/tests/data/acpi/x86/q35/DSDT.tis.tpm12 index e381ce4cbf..29a416f050 100644 --- a/tests/data/acpi/x86/q35/DSDT.tis.tpm12 +++ b/tests/data/acpi/x86/q35/DSDT.tis.tpm12 Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.tis.tpm2 b/tests/data/acpi/x86/q35/DSDT.tis.tpm2 index a09253042c..59288f02c4 100644 --- a/tests/data/acpi/x86/q35/DSDT.tis.tpm2 +++ b/tests/data/acpi/x86/q35/DSDT.tis.tpm2 Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.type4-count b/tests/data/acpi/x86/q35/DSDT.type4-count index edc23198cd..eaca76e8e6 100644 --- a/tests/data/acpi/x86/q35/DSDT.type4-count +++ b/tests/data/acpi/x86/q35/DSDT.type4-count Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.viot b/tests/data/acpi/x86/q35/DSDT.viot index 8d98dd8845..de0942a13d 100644 --- a/tests/data/acpi/x86/q35/DSDT.viot +++ b/tests/data/acpi/x86/q35/DSDT.viot Binary files differdiff --git a/tests/data/acpi/x86/q35/DSDT.xapic b/tests/data/acpi/x86/q35/DSDT.xapic index d4acd851c6..9059812b58 100644 --- a/tests/data/acpi/x86/q35/DSDT.xapic +++ b/tests/data/acpi/x86/q35/DSDT.xapic Binary files differdiff --git a/tests/fp/fp-bench.c b/tests/fp/fp-bench.c index 8ce0ca1545..75c07d5d1f 100644 --- a/tests/fp/fp-bench.c +++ b/tests/fp/fp-bench.c @@ -488,6 +488,8 @@ static void run_bench(void) { bench_func_t f; + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &soft_status); + f = bench_funcs[operation][precision]; g_assert(f); f(); diff --git a/tests/fp/fp-test-log2.c b/tests/fp/fp-test-log2.c index 4eae93eb7c..de702c4c80 100644 --- a/tests/fp/fp-test-log2.c +++ b/tests/fp/fp-test-log2.c @@ -70,6 +70,7 @@ int main(int ac, char **av) float_status qsf = {0}; int i; + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf); set_float_rounding_mode(float_round_nearest_even, &qsf); test.d = 0.0; diff --git a/tests/fp/fp-test.c b/tests/fp/fp-test.c index 36b5712cda..5f6f25c882 100644 --- a/tests/fp/fp-test.c +++ b/tests/fp/fp-test.c @@ -935,6 +935,8 @@ void run_test(void) { unsigned int i; + set_float_2nan_prop_rule(float_2nan_prop_s_ab, &qsf); + genCases_setLevel(test_level); verCases_maxErrorCount = n_max_errors; diff --git a/tests/qtest/fuzz-virtio-balloon-test.c b/tests/qtest/fuzz-virtio-balloon-test.c new file mode 100644 index 0000000000..ecb597fbee --- /dev/null +++ b/tests/qtest/fuzz-virtio-balloon-test.c @@ -0,0 +1,37 @@ +/* + * QTest fuzzer-generated testcase for virtio balloon device + * + * Copyright (c) 2024 Gao Shiyuan <gaoshiyuan@baidu.com> + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "libqtest.h" + +/* + * https://gitlab.com/qemu-project/qemu/-/issues/2576 + * Used to trigger: + * virtio_address_space_lookup: Assertion `mrs.mr' failed. + */ +static void oss_fuzz_71649(void) +{ + QTestState *s = qtest_init("-device virtio-balloon -machine q35" + " -nodefaults"); + + qtest_outl(s, 0xcf8, 0x80000890); + qtest_outl(s, 0xcfc, 0x2); + qtest_outl(s, 0xcf8, 0x80000891); + qtest_inl(s, 0xcfc); + qtest_quit(s); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + + qtest_add_func("fuzz/virtio/oss_fuzz_71649", oss_fuzz_71649); + + return g_test_run(); +} + diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index 8ea126c2d8..aa93e98418 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -88,6 +88,7 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_MEGASAS_SCSI_PCI') ? ['fuzz-megasas-test'] : []) + \ (config_all_devices.has_key('CONFIG_LSI_SCSI_PCI') ? ['fuzz-lsi53c895a-test'] : []) + \ (config_all_devices.has_key('CONFIG_VIRTIO_SCSI') ? ['fuzz-virtio-scsi-test'] : []) + \ + (config_all_devices.has_key('CONFIG_VIRTIO_BALLOON') ? ['fuzz-virtio-balloon-test'] : []) + \ (config_all_devices.has_key('CONFIG_Q35') ? ['q35-test'] : []) + \ (config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \ (config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \ @@ -176,6 +177,7 @@ qtests_ppc64 = \ qtests_ppc + \ (config_all_devices.has_key('CONFIG_PSERIES') ? ['device-plug-test'] : []) + \ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xscom-test'] : []) + \ + (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-xive2-test'] : []) + \ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-spi-seeprom-test'] : []) + \ (config_all_devices.has_key('CONFIG_POWERNV') ? ['pnv-host-i2c-test'] : []) + \ (config_all_devices.has_key('CONFIG_PSERIES') ? ['numa-test'] : []) + \ @@ -345,6 +347,7 @@ qtests = { 'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'], 'migration-test': migration_files, 'pxe-test': files('boot-sector.c'), + 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c'), 'qos-test': [chardev, io, qos_test_ss.apply({}).sources()], 'tpm-crb-swtpm-test': [io, tpmemu_files], 'tpm-crb-test': [io, tpmemu_files], diff --git a/tests/qtest/pnv-xive2-common.c b/tests/qtest/pnv-xive2-common.c new file mode 100644 index 0000000000..bf2bce0056 --- /dev/null +++ b/tests/qtest/pnv-xive2-common.c @@ -0,0 +1,190 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * - Common functions for XIVE2 tests + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "libqtest.h" + +#include "pnv-xive2-common.h" + + +static uint64_t pnv_xscom_addr(uint32_t pcba) +{ + return P10_XSCOM_BASE | ((uint64_t) pcba << 3); +} + +static uint64_t pnv_xive_xscom_addr(uint32_t reg) +{ + return pnv_xscom_addr(XIVE_XSCOM + reg); +} + +uint64_t pnv_xive_xscom_read(QTestState *qts, uint32_t reg) +{ + return qtest_readq(qts, pnv_xive_xscom_addr(reg)); +} + +void pnv_xive_xscom_write(QTestState *qts, uint32_t reg, uint64_t val) +{ + qtest_writeq(qts, pnv_xive_xscom_addr(reg), val); +} + +static void xive_get_struct(QTestState *qts, uint64_t src, void *dest, + size_t size) +{ + uint8_t *destination = (uint8_t *)dest; + size_t i; + + for (i = 0; i < size; i++) { + *(destination + i) = qtest_readb(qts, src + i); + } +} + +static void xive_copy_struct(QTestState *qts, void *src, uint64_t dest, + size_t size) +{ + uint8_t *source = (uint8_t *)src; + size_t i; + + for (i = 0; i < size; i++) { + qtest_writeb(qts, dest + i, *(source + i)); + } +} + +uint64_t xive_get_queue_addr(uint32_t end_index) +{ + return XIVE_QUEUE_MEM + (uint64_t)end_index * XIVE_QUEUE_SIZE; +} + +uint8_t get_esb(QTestState *qts, uint32_t index, uint8_t page, + uint32_t offset) +{ + uint64_t addr; + + addr = XIVE_ESB_ADDR + ((uint64_t)index << (XIVE_PAGE_SHIFT + 1)); + if (page == 1) { + addr += 1 << XIVE_PAGE_SHIFT; + } + return qtest_readb(qts, addr + offset); +} + +void set_esb(QTestState *qts, uint32_t index, uint8_t page, + uint32_t offset, uint32_t val) +{ + uint64_t addr; + + addr = XIVE_ESB_ADDR + ((uint64_t)index << (XIVE_PAGE_SHIFT + 1)); + if (page == 1) { + addr += 1 << XIVE_PAGE_SHIFT; + } + return qtest_writel(qts, addr + offset, cpu_to_be32(val)); +} + +void get_nvp(QTestState *qts, uint32_t index, Xive2Nvp* nvp) +{ + uint64_t addr = XIVE_NVP_MEM + (uint64_t)index * sizeof(Xive2Nvp); + xive_get_struct(qts, addr, nvp, sizeof(Xive2Nvp)); +} + +void set_nvp(QTestState *qts, uint32_t index, uint8_t first) +{ + uint64_t nvp_addr; + Xive2Nvp nvp; + uint64_t report_addr; + + nvp_addr = XIVE_NVP_MEM + (uint64_t)index * sizeof(Xive2Nvp); + report_addr = (XIVE_REPORT_MEM + (uint64_t)index * XIVE_REPORT_SIZE) >> 8; + + memset(&nvp, 0, sizeof(nvp)); + nvp.w0 = xive_set_field32(NVP2_W0_VALID, 0, 1); + nvp.w0 = xive_set_field32(NVP2_W0_PGOFIRST, nvp.w0, first); + nvp.w6 = xive_set_field32(NVP2_W6_REPORTING_LINE, nvp.w6, + (report_addr >> 24) & 0xfffffff); + nvp.w7 = xive_set_field32(NVP2_W7_REPORTING_LINE, nvp.w7, + report_addr & 0xffffff); + xive_copy_struct(qts, &nvp, nvp_addr, sizeof(nvp)); +} + +static uint64_t get_cl_pair_addr(Xive2Nvp *nvp) +{ + uint64_t upper = xive_get_field32(0x0fffffff, nvp->w6); + uint64_t lower = xive_get_field32(0xffffff00, nvp->w7); + return (upper << 32) | (lower << 8); +} + +void get_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair) +{ + uint64_t addr = get_cl_pair_addr(nvp); + xive_get_struct(qts, addr, cl_pair, XIVE_REPORT_SIZE); +} + +void set_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair) +{ + uint64_t addr = get_cl_pair_addr(nvp); + xive_copy_struct(qts, cl_pair, addr, XIVE_REPORT_SIZE); +} + +void set_nvg(QTestState *qts, uint32_t index, uint8_t next) +{ + uint64_t nvg_addr; + Xive2Nvgc nvg; + + nvg_addr = XIVE_NVG_MEM + (uint64_t)index * sizeof(Xive2Nvgc); + + memset(&nvg, 0, sizeof(nvg)); + nvg.w0 = xive_set_field32(NVGC2_W0_VALID, 0, 1); + nvg.w0 = xive_set_field32(NVGC2_W0_PGONEXT, nvg.w0, next); + xive_copy_struct(qts, &nvg, nvg_addr, sizeof(nvg)); +} + +void set_eas(QTestState *qts, uint32_t index, uint32_t end_index, + uint32_t data) +{ + uint64_t eas_addr; + Xive2Eas eas; + + eas_addr = XIVE_EAS_MEM + (uint64_t)index * sizeof(Xive2Eas); + + memset(&eas, 0, sizeof(eas)); + eas.w = xive_set_field64(EAS2_VALID, 0, 1); + eas.w = xive_set_field64(EAS2_END_INDEX, eas.w, end_index); + eas.w = xive_set_field64(EAS2_END_DATA, eas.w, data); + xive_copy_struct(qts, &eas, eas_addr, sizeof(eas)); +} + +void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index, + uint8_t priority, bool i) +{ + uint64_t end_addr, queue_addr, queue_hi, queue_lo; + uint8_t queue_size; + Xive2End end; + + end_addr = XIVE_END_MEM + (uint64_t)index * sizeof(Xive2End); + queue_addr = xive_get_queue_addr(index); + queue_hi = (queue_addr >> 32) & END2_W2_EQ_ADDR_HI; + queue_lo = queue_addr & END2_W3_EQ_ADDR_LO; + queue_size = ctz16(XIVE_QUEUE_SIZE) - 12; + + memset(&end, 0, sizeof(end)); + end.w0 = xive_set_field32(END2_W0_VALID, 0, 1); + end.w0 = xive_set_field32(END2_W0_ENQUEUE, end.w0, 1); + end.w0 = xive_set_field32(END2_W0_UCOND_NOTIFY, end.w0, 1); + end.w0 = xive_set_field32(END2_W0_BACKLOG, end.w0, 1); + + end.w1 = xive_set_field32(END2_W1_GENERATION, 0, 1); + + end.w2 = cpu_to_be32(queue_hi); + + end.w3 = cpu_to_be32(queue_lo); + end.w3 = xive_set_field32(END2_W3_QSIZE, end.w3, queue_size); + + end.w6 = xive_set_field32(END2_W6_IGNORE, 0, i); + end.w6 = xive_set_field32(END2_W6_VP_OFFSET, end.w6, nvp_index); + + end.w7 = xive_set_field32(END2_W7_F0_PRIORITY, 0, priority); + xive_copy_struct(qts, &end, end_addr, sizeof(end)); +} + diff --git a/tests/qtest/pnv-xive2-common.h b/tests/qtest/pnv-xive2-common.h new file mode 100644 index 0000000000..9ae34771aa --- /dev/null +++ b/tests/qtest/pnv-xive2-common.h @@ -0,0 +1,111 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TEST_PNV_XIVE2_COMMON_H +#define TEST_PNV_XIVE2_COMMON_H + +#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit)) +#define PPC_BIT32(bit) (0x80000000 >> (bit)) +#define PPC_BIT8(bit) (0x80 >> (bit)) +#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) +#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \ + PPC_BIT32(bs)) +#include "qemu/bswap.h" +#include "hw/intc/pnv_xive2_regs.h" +#include "hw/ppc/xive_regs.h" +#include "hw/ppc/xive2_regs.h" + +/* + * sizing: + * 128 interrupts + * => ESB BAR range: 16M + * 256 ENDs + * => END BAR range: 16M + * 256 VPs + * => NVPG,NVC BAR range: 32M + */ +#define MAX_IRQS 128 +#define MAX_ENDS 256 +#define MAX_VPS 256 + +#define XIVE_PAGE_SHIFT 16 + +#define XIVE_TRIGGER_PAGE 0 +#define XIVE_EOI_PAGE 1 + +#define XIVE_IC_ADDR 0x0006030200000000ull +#define XIVE_IC_TM_INDIRECT (XIVE_IC_ADDR + (256 << XIVE_PAGE_SHIFT)) +#define XIVE_IC_BAR ((0x3ull << 62) | XIVE_IC_ADDR) +#define XIVE_TM_BAR 0xc006030203180000ull +#define XIVE_ESB_ADDR 0x0006050000000000ull +#define XIVE_ESB_BAR ((0x3ull << 62) | XIVE_ESB_ADDR) +#define XIVE_END_BAR 0xc006060000000000ull +#define XIVE_NVPG_ADDR 0x0006040000000000ull +#define XIVE_NVPG_BAR ((0x3ull << 62) | XIVE_NVPG_ADDR) +#define XIVE_NVC_ADDR 0x0006030208000000ull +#define XIVE_NVC_BAR ((0x3ull << 62) | XIVE_NVC_ADDR) + +/* + * Memory layout + * A check is done when a table is configured to ensure that the max + * size of the resource fits in the table. + */ +#define XIVE_VST_SIZE 0x10000ull /* must be at least 4k */ + +#define XIVE_MEM_START 0x10000000ull +#define XIVE_ESB_MEM XIVE_MEM_START +#define XIVE_EAS_MEM (XIVE_ESB_MEM + XIVE_VST_SIZE) +#define XIVE_END_MEM (XIVE_EAS_MEM + XIVE_VST_SIZE) +#define XIVE_NVP_MEM (XIVE_END_MEM + XIVE_VST_SIZE) +#define XIVE_NVG_MEM (XIVE_NVP_MEM + XIVE_VST_SIZE) +#define XIVE_NVC_MEM (XIVE_NVG_MEM + XIVE_VST_SIZE) +#define XIVE_SYNC_MEM (XIVE_NVC_MEM + XIVE_VST_SIZE) +#define XIVE_QUEUE_MEM (XIVE_SYNC_MEM + XIVE_VST_SIZE) +#define XIVE_QUEUE_SIZE 4096 /* per End */ +#define XIVE_REPORT_MEM (XIVE_QUEUE_MEM + XIVE_QUEUE_SIZE * MAX_VPS) +#define XIVE_REPORT_SIZE 256 /* two cache lines per NVP */ +#define XIVE_MEM_END (XIVE_REPORT_MEM + XIVE_REPORT_SIZE * MAX_VPS) + +#define P10_XSCOM_BASE 0x000603fc00000000ull +#define XIVE_XSCOM 0x2010800ull + +#define XIVE_ESB_RESET 0b00 +#define XIVE_ESB_OFF 0b01 +#define XIVE_ESB_PENDING 0b10 +#define XIVE_ESB_QUEUED 0b11 + +#define XIVE_ESB_GET 0x800 +#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ +#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ +#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ +#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ + +#define XIVE_ESB_STORE_EOI 0x400 /* Store */ + + +extern uint64_t pnv_xive_xscom_read(QTestState *qts, uint32_t reg); +extern void pnv_xive_xscom_write(QTestState *qts, uint32_t reg, uint64_t val); +extern uint64_t xive_get_queue_addr(uint32_t end_index); +extern uint8_t get_esb(QTestState *qts, uint32_t index, uint8_t page, + uint32_t offset); +extern void set_esb(QTestState *qts, uint32_t index, uint8_t page, + uint32_t offset, uint32_t val); +extern void get_nvp(QTestState *qts, uint32_t index, Xive2Nvp* nvp); +extern void set_nvp(QTestState *qts, uint32_t index, uint8_t first); +extern void get_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair); +extern void set_cl_pair(QTestState *qts, Xive2Nvp *nvp, uint8_t *cl_pair); +extern void set_nvg(QTestState *qts, uint32_t index, uint8_t next); +extern void set_eas(QTestState *qts, uint32_t index, uint32_t end_index, + uint32_t data); +extern void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index, + uint8_t priority, bool i); + + +void test_flush_sync_inject(QTestState *qts); + +#endif /* TEST_PNV_XIVE2_COMMON_H */ diff --git a/tests/qtest/pnv-xive2-flush-sync.c b/tests/qtest/pnv-xive2-flush-sync.c new file mode 100644 index 0000000000..3b32446adb --- /dev/null +++ b/tests/qtest/pnv-xive2-flush-sync.c @@ -0,0 +1,205 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * - Test cache flush/queue sync injection + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "libqtest.h" + +#include "pnv-xive2-common.h" +#include "hw/intc/pnv_xive2_regs.h" +#include "hw/ppc/xive_regs.h" +#include "hw/ppc/xive2_regs.h" + +#define PNV_XIVE2_QUEUE_IPI 0x00 +#define PNV_XIVE2_QUEUE_HW 0x01 +#define PNV_XIVE2_QUEUE_NXC 0x02 +#define PNV_XIVE2_QUEUE_INT 0x03 +#define PNV_XIVE2_QUEUE_OS 0x04 +#define PNV_XIVE2_QUEUE_POOL 0x05 +#define PNV_XIVE2_QUEUE_HARD 0x06 +#define PNV_XIVE2_CACHE_ENDC 0x08 +#define PNV_XIVE2_CACHE_ESBC 0x09 +#define PNV_XIVE2_CACHE_EASC 0x0a +#define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10 +#define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11 +#define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12 +#define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13 +#define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14 +#define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15 +#define PNV_XIVE2_CACHE_NXC 0x18 + +#define PNV_XIVE2_SYNC_IPI 0x000 +#define PNV_XIVE2_SYNC_HW 0x080 +#define PNV_XIVE2_SYNC_NxC 0x100 +#define PNV_XIVE2_SYNC_INT 0x180 +#define PNV_XIVE2_SYNC_OS_ESC 0x200 +#define PNV_XIVE2_SYNC_POOL_ESC 0x280 +#define PNV_XIVE2_SYNC_HARD_ESC 0x300 +#define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800 +#define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880 +#define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900 +#define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980 +#define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00 +#define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80 + + +static uint64_t get_sync_addr(uint32_t src_pir, int ic_topo_id, int type) +{ + int thread_nr = src_pir & 0x7f; + uint64_t addr = XIVE_SYNC_MEM + thread_nr * 512 + ic_topo_id * 32 + type; + return addr; +} + +static uint8_t get_sync(QTestState *qts, uint32_t src_pir, int ic_topo_id, + int type) +{ + uint64_t addr = get_sync_addr(src_pir, ic_topo_id, type); + return qtest_readb(qts, addr); +} + +static void clr_sync(QTestState *qts, uint32_t src_pir, int ic_topo_id, + int type) +{ + uint64_t addr = get_sync_addr(src_pir, ic_topo_id, type); + qtest_writeb(qts, addr, 0x0); +} + +static void inject_cache_flush(QTestState *qts, int ic_topo_id, + uint64_t scom_addr) +{ + (void)ic_topo_id; + pnv_xive_xscom_write(qts, scom_addr, 0); +} + +static void inject_queue_sync(QTestState *qts, int ic_topo_id, uint64_t offset) +{ + (void)ic_topo_id; + uint64_t addr = XIVE_IC_ADDR + (VST_SYNC << XIVE_PAGE_SHIFT) + offset; + qtest_writeq(qts, addr, 0); +} + +static void inject_op(QTestState *qts, int ic_topo_id, int type) +{ + switch (type) { + case PNV_XIVE2_QUEUE_IPI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_IPI); + break; + case PNV_XIVE2_QUEUE_HW: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_HW); + break; + case PNV_XIVE2_QUEUE_NXC: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NxC); + break; + case PNV_XIVE2_QUEUE_INT: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_INT); + break; + case PNV_XIVE2_QUEUE_OS: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_OS_ESC); + break; + case PNV_XIVE2_QUEUE_POOL: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_POOL_ESC); + break; + case PNV_XIVE2_QUEUE_HARD: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_HARD_ESC); + break; + case PNV_XIVE2_CACHE_ENDC: + inject_cache_flush(qts, ic_topo_id, X_VC_ENDC_FLUSH_INJECT); + break; + case PNV_XIVE2_CACHE_ESBC: + inject_cache_flush(qts, ic_topo_id, X_VC_ESBC_FLUSH_INJECT); + break; + case PNV_XIVE2_CACHE_EASC: + inject_cache_flush(qts, ic_topo_id, X_VC_EASC_FLUSH_INJECT); + break; + case PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_LD_LCL_NCO); + break; + case PNV_XIVE2_QUEUE_NXC_LD_LCL_CO: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_LD_LCL_CO); + break; + case PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_LCL_NCI); + break; + case PNV_XIVE2_QUEUE_NXC_ST_LCL_CI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_LCL_CI); + break; + case PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_RMT_NCI); + break; + case PNV_XIVE2_QUEUE_NXC_ST_RMT_CI: + inject_queue_sync(qts, ic_topo_id, PNV_XIVE2_SYNC_NXC_ST_RMT_CI); + break; + case PNV_XIVE2_CACHE_NXC: + inject_cache_flush(qts, ic_topo_id, X_PC_NXC_FLUSH_INJECT); + break; + default: + g_assert_not_reached(); + break; + } +} + +const uint8_t xive_inject_tests[] = { + PNV_XIVE2_QUEUE_IPI, + PNV_XIVE2_QUEUE_HW, + PNV_XIVE2_QUEUE_NXC, + PNV_XIVE2_QUEUE_INT, + PNV_XIVE2_QUEUE_OS, + PNV_XIVE2_QUEUE_POOL, + PNV_XIVE2_QUEUE_HARD, + PNV_XIVE2_CACHE_ENDC, + PNV_XIVE2_CACHE_ESBC, + PNV_XIVE2_CACHE_EASC, + PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO, + PNV_XIVE2_QUEUE_NXC_LD_LCL_CO, + PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI, + PNV_XIVE2_QUEUE_NXC_ST_LCL_CI, + PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI, + PNV_XIVE2_QUEUE_NXC_ST_RMT_CI, + PNV_XIVE2_CACHE_NXC, +}; + +void test_flush_sync_inject(QTestState *qts) +{ + int ic_topo_id = 0; + + /* + * Writes performed by qtest are not done in the context of a thread. + * This means that QEMU XIVE code doesn't have a way to determine what + * thread is originating the write. In order to allow for some testing, + * QEMU XIVE code will assume a PIR of 0 when unable to determine the + * source thread for cache flush and queue sync inject operations. + * See hw/intc/pnv_xive2.c: pnv_xive2_inject_notify() for details. + */ + int src_pir = 0; + int test_nr; + uint8_t byte; + + printf("# ============================================================\n"); + printf("# Starting cache flush/queue sync injection tests...\n"); + + for (test_nr = 0; test_nr < sizeof(xive_inject_tests); + test_nr++) { + int op_type = xive_inject_tests[test_nr]; + + printf("# Running test %d\n", test_nr); + + /* start with status byte set to 0 */ + clr_sync(qts, src_pir, ic_topo_id, op_type); + byte = get_sync(qts, src_pir, ic_topo_id, op_type); + g_assert_cmphex(byte, ==, 0); + + /* request cache flush or queue sync operation */ + inject_op(qts, ic_topo_id, op_type); + + /* verify that status byte was written to 0xff */ + byte = get_sync(qts, src_pir, ic_topo_id, op_type); + g_assert_cmphex(byte, ==, 0xff); + + clr_sync(qts, src_pir, ic_topo_id, op_type); + } +} + diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c new file mode 100644 index 0000000000..dd19e88861 --- /dev/null +++ b/tests/qtest/pnv-xive2-test.c @@ -0,0 +1,344 @@ +/* + * QTest testcase for PowerNV 10 interrupt controller (xive2) + * - Test irq to hardware thread + * - Test 'Pull Thread Context to Odd Thread Reporting Line' + * + * Copyright (c) 2024, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "libqtest.h" + +#include "pnv-xive2-common.h" +#include "hw/intc/pnv_xive2_regs.h" +#include "hw/ppc/xive_regs.h" +#include "hw/ppc/xive2_regs.h" + +#define SMT 4 /* some tests will break if less than 4 */ + + +static void set_table(QTestState *qts, uint64_t type, uint64_t addr) +{ + uint64_t vsd, size, log_size; + + /* + * First, let's make sure that all the resources used fit in the + * given table. + */ + switch (type) { + case VST_ESB: + size = MAX_IRQS / 4; + break; + case VST_EAS: + size = MAX_IRQS * 8; + break; + case VST_END: + size = MAX_ENDS * 32; + break; + case VST_NVP: + case VST_NVG: + case VST_NVC: + size = MAX_VPS * 32; + break; + case VST_SYNC: + size = 64 * 1024; + break; + default: + g_assert_not_reached(); + } + + g_assert_cmpuint(size, <=, XIVE_VST_SIZE); + log_size = ctzl(XIVE_VST_SIZE) - 12; + + vsd = ((uint64_t) VSD_MODE_EXCLUSIVE) << 62 | addr | log_size; + pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_ADDR, type << 48); + pnv_xive_xscom_write(qts, X_VC_VSD_TABLE_DATA, vsd); + + if (type != VST_EAS && type != VST_IC && type != VST_ERQ) { + pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_ADDR, type << 48); + pnv_xive_xscom_write(qts, X_PC_VSD_TABLE_DATA, vsd); + } +} + +static void set_tima8(QTestState *qts, uint32_t pir, uint32_t offset, + uint8_t b) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + qtest_writeb(qts, ic_addr + offset, b); +} + +static void set_tima32(QTestState *qts, uint32_t pir, uint32_t offset, + uint32_t l) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + qtest_writel(qts, ic_addr + offset, l); +} + +static uint8_t get_tima8(QTestState *qts, uint32_t pir, uint32_t offset) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + return qtest_readb(qts, ic_addr + offset); +} + +static uint16_t get_tima16(QTestState *qts, uint32_t pir, uint32_t offset) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + return qtest_readw(qts, ic_addr + offset); +} + +static uint32_t get_tima32(QTestState *qts, uint32_t pir, uint32_t offset) +{ + uint64_t ic_addr; + + ic_addr = XIVE_IC_TM_INDIRECT + (pir << XIVE_PAGE_SHIFT); + return qtest_readl(qts, ic_addr + offset); +} + +static void reset_pool_threads(QTestState *qts) +{ + uint8_t first_group = 0; + int i; + + for (i = 0; i < SMT; i++) { + uint32_t nvp_idx = 0x100 + i; + set_nvp(qts, nvp_idx, first_group); + set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD0, 0x000000ff); + set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD1, 0); + set_tima32(qts, i, TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | nvp_idx); + } +} + +static void reset_hw_threads(QTestState *qts) +{ + uint8_t first_group = 0; + uint32_t w1 = 0x000000ff; + int i; + + if (SMT >= 4) { + /* define 2 groups of 2, part of a bigger group of size 4 */ + set_nvg(qts, 0x80, 0x02); + set_nvg(qts, 0x82, 0x02); + set_nvg(qts, 0x81, 0); + first_group = 0x01; + w1 = 0x000300ff; + } + + for (i = 0; i < SMT; i++) { + set_nvp(qts, 0x80 + i, first_group); + set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0, 0x00ff00ff); + set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD1, w1); + set_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD2, 0x80000000); + } +} + +static void reset_state(QTestState *qts) +{ + size_t mem_used = XIVE_MEM_END - XIVE_MEM_START; + + qtest_memset(qts, XIVE_MEM_START, 0, mem_used); + reset_hw_threads(qts); + reset_pool_threads(qts); +} + +static void init_xive(QTestState *qts) +{ + uint64_t val1, val2, range; + + /* + * We can take a few shortcuts here, as we know the default values + * used for xive initialization + */ + + /* + * Set the BARs. + * We reuse the same values used by firmware to ease debug. + */ + pnv_xive_xscom_write(qts, X_CQ_IC_BAR, XIVE_IC_BAR); + pnv_xive_xscom_write(qts, X_CQ_TM_BAR, XIVE_TM_BAR); + + /* ESB and NVPG use 2 pages per resource. The others only one page */ + range = (MAX_IRQS << 17) >> 25; + val1 = XIVE_ESB_BAR | range; + pnv_xive_xscom_write(qts, X_CQ_ESB_BAR, val1); + + range = (MAX_ENDS << 16) >> 25; + val1 = XIVE_END_BAR | range; + pnv_xive_xscom_write(qts, X_CQ_END_BAR, val1); + + range = (MAX_VPS << 17) >> 25; + val1 = XIVE_NVPG_BAR | range; + pnv_xive_xscom_write(qts, X_CQ_NVPG_BAR, val1); + + range = (MAX_VPS << 16) >> 25; + val1 = XIVE_NVC_BAR | range; + pnv_xive_xscom_write(qts, X_CQ_NVC_BAR, val1); + + /* + * Enable hw threads. + * We check the value written. Useless with current + * implementation, but it validates the xscom read path and it's + * what the hardware procedure says + */ + val1 = 0xF000000000000000ull; /* core 0, 4 threads */ + pnv_xive_xscom_write(qts, X_TCTXT_EN0, val1); + val2 = pnv_xive_xscom_read(qts, X_TCTXT_EN0); + g_assert_cmphex(val1, ==, val2); + + /* Memory tables */ + set_table(qts, VST_ESB, XIVE_ESB_MEM); + set_table(qts, VST_EAS, XIVE_EAS_MEM); + set_table(qts, VST_END, XIVE_END_MEM); + set_table(qts, VST_NVP, XIVE_NVP_MEM); + set_table(qts, VST_NVG, XIVE_NVG_MEM); + set_table(qts, VST_NVC, XIVE_NVC_MEM); + set_table(qts, VST_SYNC, XIVE_SYNC_MEM); + + reset_hw_threads(qts); + reset_pool_threads(qts); +} + +static void test_hw_irq(QTestState *qts) +{ + uint32_t irq = 2; + uint32_t irq_data = 0x600df00d; + uint32_t end_index = 5; + uint32_t target_pir = 1; + uint32_t target_nvp = 0x80 + target_pir; + uint8_t priority = 5; + uint32_t reg32; + uint16_t reg16; + uint8_t pq, nsr, cppr; + + printf("# ============================================================\n"); + printf("# Testing irq %d to hardware thread %d\n", irq, target_pir); + + /* irq config */ + set_eas(qts, irq, end_index, irq_data); + set_end(qts, end_index, target_nvp, priority, false /* group */); + + /* enable and trigger irq */ + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); + + /* check irq is raised on cpu */ + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); + + reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x80); + g_assert_cmphex(cppr, ==, 0xFF); + + /* ack the irq */ + reg16 = get_tima16(qts, target_pir, TM_SPC_ACK_HV_REG); + nsr = reg16 >> 8; + cppr = reg16 & 0xFF; + g_assert_cmphex(nsr, ==, 0x80); + g_assert_cmphex(cppr, ==, priority); + + /* check irq data is what was configured */ + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); + + /* End Of Interrupt */ + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); + + /* reset CPPR */ + set_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); + reg32 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0); + nsr = reg32 >> 24; + cppr = (reg32 >> 16) & 0xFF; + g_assert_cmphex(nsr, ==, 0x00); + g_assert_cmphex(cppr, ==, 0xFF); +} + +#define XIVE_ODD_CL 0x80 +static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts) +{ + uint32_t target_pir = 1; + uint32_t target_nvp = 0x80 + target_pir; + Xive2Nvp nvp; + uint8_t cl_pair[XIVE_REPORT_SIZE]; + uint32_t qw1w0, qw3w0, qw1w2, qw2w2; + uint8_t qw3b8; + uint32_t cl_word; + uint32_t word2; + + printf("# ============================================================\n"); + printf("# Testing 'Pull Thread Context to Odd Thread Reporting Line'\n"); + + /* clear odd cache line prior to pull operation */ + memset(cl_pair, 0, sizeof(cl_pair)); + get_nvp(qts, target_nvp, &nvp); + set_cl_pair(qts, &nvp, cl_pair); + + /* Read some values from TIMA that we expect to see in cacheline */ + qw1w0 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD0); + qw3w0 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD0); + qw1w2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2); + qw2w2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2); + qw3b8 = get_tima8(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2); + + /* Execute the pull operation */ + set_tima8(qts, target_pir, TM_SPC_PULL_PHYS_CTX_OL, 0); + + /* Verify odd cache line values match TIMA after pull operation */ + get_cl_pair(qts, &nvp, cl_pair); + memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD0], 4); + g_assert_cmphex(qw1w0, ==, be32_to_cpu(cl_word)); + memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD0], 4); + g_assert_cmphex(qw3w0, ==, be32_to_cpu(cl_word)); + memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW1_OS + TM_WORD2], 4); + g_assert_cmphex(qw1w2, ==, be32_to_cpu(cl_word)); + memcpy(&cl_word, &cl_pair[XIVE_ODD_CL + TM_QW2_HV_POOL + TM_WORD2], 4); + g_assert_cmphex(qw2w2, ==, be32_to_cpu(cl_word)); + g_assert_cmphex(qw3b8, ==, + cl_pair[XIVE_ODD_CL + TM_QW3_HV_PHYS + TM_WORD2]); + + /* Verify that all TIMA valid bits for target thread are cleared */ + word2 = get_tima32(qts, target_pir, TM_QW1_OS + TM_WORD2); + g_assert_cmphex(xive_get_field32(TM_QW1W2_VO, word2), ==, 0); + word2 = get_tima32(qts, target_pir, TM_QW2_HV_POOL + TM_WORD2); + g_assert_cmphex(xive_get_field32(TM_QW2W2_VP, word2), ==, 0); + word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2); + g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0); +} +static void test_xive(void) +{ + QTestState *qts; + + qts = qtest_initf("-M powernv10 -smp %d,cores=1,threads=%d -nographic " + "-nodefaults -serial mon:stdio -S " + "-d guest_errors -trace '*xive*'", + SMT, SMT); + init_xive(qts); + + test_hw_irq(qts); + + /* omit reset_state here and use settings from test_hw_irq */ + test_pull_thread_ctx_to_odd_thread_cl(qts); + + reset_state(qts); + test_flush_sync_inject(qts); + + qtest_quit(qts); +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + qtest_add_func("xive2", test_xive); + return g_test_run(); +} diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index 9722145b97..95ff76ea44 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -179,10 +179,10 @@ run-plugin-%-with-libmem.so: PLUGIN_ARGS=$(COMMA)inline=true ifeq ($(filter %-softmmu, $(TARGET)),) run-%: % - $(call run-test, $<, $(QEMU) $(QEMU_OPTS) $<) + $(call run-test, $<, env QEMU=$(QEMU) $(QEMU) $(QEMU_OPTS) $<) run-plugin-%: - $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \ + $(call run-test, $@, env QEMU=$(QEMU) $(QEMU) $(QEMU_OPTS) \ -plugin $(PLUGIN_LIB)/$(call extract-plugin,$@)$(PLUGIN_ARGS) \ -d plugin -D $*.pout \ $(call strip-plugin,$<)) diff --git a/tests/tcg/multiarch/linux/linux-sigrtminmax.c b/tests/tcg/multiarch/linux/linux-sigrtminmax.c new file mode 100644 index 0000000000..a7059aacd9 --- /dev/null +++ b/tests/tcg/multiarch/linux/linux-sigrtminmax.c @@ -0,0 +1,74 @@ +/* + * Test the lowest and the highest real-time signals. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include <assert.h> +#include <signal.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +/* For hexagon and microblaze. */ +#ifndef __SIGRTMIN +#define __SIGRTMIN 32 +#endif + +extern char **environ; + +static bool seen_sigrtmin, seen_sigrtmax; + +static void handle_signal(int sig) +{ + if (sig == SIGRTMIN) { + seen_sigrtmin = true; + } else if (sig == SIGRTMAX) { + seen_sigrtmax = true; + } else { + _exit(1); + } +} + +int main(int argc, char **argv) +{ + char *qemu = getenv("QEMU"); + struct sigaction act; + + assert(qemu); + + if (!getenv("QEMU_RTSIG_MAP")) { + char **new_argv = malloc((argc + 2) + sizeof(char *)); + int tsig1, hsig1, count1, tsig2, hsig2, count2; + char rt_sigmap[64]; + + /* Re-exec with a mapping that includes SIGRTMIN and SIGRTMAX. */ + new_argv[0] = qemu; + memcpy(&new_argv[1], argv, (argc + 1) * sizeof(char *)); + tsig1 = __SIGRTMIN; + /* The host must have a few signals starting from this one. */ + hsig1 = 36; + count1 = SIGRTMIN - __SIGRTMIN + 1; + tsig2 = SIGRTMAX; + hsig2 = hsig1 + count1; + count2 = 1; + snprintf(rt_sigmap, sizeof(rt_sigmap), "%d %d %d,%d %d %d", + tsig1, hsig1, count1, tsig2, hsig2, count2); + setenv("QEMU_RTSIG_MAP", rt_sigmap, 0); + assert(execve(new_argv[0], new_argv, environ) == 0); + return EXIT_FAILURE; + } + + memset(&act, 0, sizeof(act)); + act.sa_handler = handle_signal; + assert(sigaction(SIGRTMIN, &act, NULL) == 0); + assert(sigaction(SIGRTMAX, &act, NULL) == 0); + + assert(kill(getpid(), SIGRTMIN) == 0); + assert(seen_sigrtmin); + assert(kill(getpid(), SIGRTMAX) == 0); + assert(seen_sigrtmax); + + return EXIT_SUCCESS; +} diff --git a/tests/tcg/ppc64/Makefile.target b/tests/tcg/ppc64/Makefile.target index 1940886c73..0d058b2600 100644 --- a/tests/tcg/ppc64/Makefile.target +++ b/tests/tcg/ppc64/Makefile.target @@ -6,7 +6,7 @@ VPATH += $(SRC_PATH)/tests/tcg/ppc64 config-cc.mak: Makefile $(quiet-@)( \ - $(call cc-option,-mpower8-vector, CROSS_CC_HAS_POWER8_VECTOR); \ + $(call cc-option,-mcpu=power8, CROSS_CC_HAS_CPU_POWER8); \ $(call cc-option,-mpower10, CROSS_CC_HAS_POWER10)) 3> config-cc.mak -include config-cc.mak @@ -23,15 +23,15 @@ run-threadcount: threadcount run-plugin-threadcount-with-%: $(call skip-test, $<, "BROKEN (flaky with clang) ") -ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),) +ifneq ($(CROSS_CC_HAS_CPU_POWER8),) PPC64_TESTS=bcdsub non_signalling_xscv endif -$(PPC64_TESTS): CFLAGS += -mpower8-vector +$(PPC64_TESTS): CFLAGS += -mcpu=power8 -ifneq ($(CROSS_CC_HAS_POWER8_VECTOR),) +ifneq ($(CROSS_CC_HAS_CPU_POWER8),) PPC64_TESTS += vsx_f2i_nan endif -vsx_f2i_nan: CFLAGS += -mpower8-vector -I$(SRC_PATH)/include +vsx_f2i_nan: CFLAGS += -mcpu=power8 -I$(SRC_PATH)/include PPC64_TESTS += mtfsf PPC64_TESTS += mffsce diff --git a/tests/unit/test-crypto-hash.c b/tests/unit/test-crypto-hash.c index 76c4699c15..8fee1593f9 100644 --- a/tests/unit/test-crypto-hash.c +++ b/tests/unit/test-crypto-hash.c @@ -43,6 +43,9 @@ "63b54e4cb2d2032b393994aa263c0dbb" \ "e00a9f2fe9ef6037352232a1eec55ee7" #define OUTPUT_RIPEMD160 "f3d658fad3fdfb2b52c9369cf0d441249ddfa8a0" +#ifdef CONFIG_CRYPTO_SM3 +#define OUTPUT_SM3 "d4a97db105b477b84c4f20ec9c31a6c814e2705a0b83a5a89748d75f0ef456a1" +#endif #define OUTPUT_MD5_B64 "Yo0gY3FWMDWrjvYvSSveyQ==" #define OUTPUT_SHA1_B64 "sudPJnWKOkIeUJzuBFJEt4dTzAI=" @@ -55,6 +58,10 @@ "7sVe5w==" #define OUTPUT_RIPEMD160_B64 "89ZY+tP9+ytSyTac8NRBJJ3fqKA=" +#ifdef CONFIG_CRYPTO_SM3 +#define OUTPUT_SM3_B64 "1Kl9sQW0d7hMTyDsnDGmyBTicFoLg6Wol0jXXw70VqE=" +#endif + static const char *expected_outputs[] = { [QCRYPTO_HASH_ALGO_MD5] = OUTPUT_MD5, [QCRYPTO_HASH_ALGO_SHA1] = OUTPUT_SHA1, @@ -63,6 +70,9 @@ static const char *expected_outputs[] = { [QCRYPTO_HASH_ALGO_SHA384] = OUTPUT_SHA384, [QCRYPTO_HASH_ALGO_SHA512] = OUTPUT_SHA512, [QCRYPTO_HASH_ALGO_RIPEMD160] = OUTPUT_RIPEMD160, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = OUTPUT_SM3, +#endif }; static const char *expected_outputs_b64[] = { [QCRYPTO_HASH_ALGO_MD5] = OUTPUT_MD5_B64, @@ -72,6 +82,9 @@ static const char *expected_outputs_b64[] = { [QCRYPTO_HASH_ALGO_SHA384] = OUTPUT_SHA384_B64, [QCRYPTO_HASH_ALGO_SHA512] = OUTPUT_SHA512_B64, [QCRYPTO_HASH_ALGO_RIPEMD160] = OUTPUT_RIPEMD160_B64, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = OUTPUT_SM3_B64, +#endif }; static const int expected_lens[] = { [QCRYPTO_HASH_ALGO_MD5] = 16, @@ -81,6 +94,9 @@ static const int expected_lens[] = { [QCRYPTO_HASH_ALGO_SHA384] = 48, [QCRYPTO_HASH_ALGO_SHA512] = 64, [QCRYPTO_HASH_ALGO_RIPEMD160] = 20, +#ifdef CONFIG_CRYPTO_SM3 + [QCRYPTO_HASH_ALGO_SM3] = 32, +#endif }; static const char hex[] = "0123456789abcdef"; diff --git a/tests/unit/test-crypto-hmac.c b/tests/unit/test-crypto-hmac.c index cdb8774443..20c60eb9d8 100644 --- a/tests/unit/test-crypto-hmac.c +++ b/tests/unit/test-crypto-hmac.c @@ -76,6 +76,14 @@ static QCryptoHmacTestData test_data[] = { "94964ed4c1155b62b668c241d67279e5" "8a711676", }, +#ifdef CONFIG_CRYPTO_SM3 + { + .alg = QCRYPTO_HASH_ALGO_SM3, + .hex_digest = + "760e3799332bc913819b930085360ddb" + "c05529261313d5b15b75bab4fd7ae91e", + }, +#endif }; static const char hex[] = "0123456789abcdef"; diff --git a/tests/unit/test-crypto-pbkdf.c b/tests/unit/test-crypto-pbkdf.c index 12ee808fbc..ddb7244e21 100644 --- a/tests/unit/test-crypto-pbkdf.c +++ b/tests/unit/test-crypto-pbkdf.c @@ -325,6 +325,22 @@ static QCryptoPbkdfTestData test_data[] = { "\xce\xbf\x91\x14\x8b\x5c\x48\x41", .nout = 32 }, +#ifdef CONFIG_CRYPTO_SM3 + { + .path = "/crypto/pbkdf/nonrfc/sm3/iter2", + .hash = QCRYPTO_HASH_ALGO_SM3, + .iterations = 2, + .key = "password", + .nkey = 8, + .salt = "ATHENA.MIT.EDUraeburn", + .nsalt = 21, + .out = "\x48\x71\x1b\x58\xa3\xcb\xce\x06" + "\xba\xad\x77\xa8\xb5\xb9\xd8\x07" + "\x6a\xe2\xb3\x5b\x95\xce\xc8\xce" + "\xe7\xb1\xcb\xee\x61\xdf\x04\xea", + .nout = 32 + }, +#endif #if 0 { .path = "/crypto/pbkdf/nonrfc/whirlpool/iter1200", |