diff options
148 files changed, 9181 insertions, 563 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 6a3df66778..a428cb2e23 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -187,6 +187,14 @@ F: disas/moxie.c F: hw/moxie/ F: default-configs/moxie-softmmu.mak +NiosII +M: Chris Wulff <crwulff@gmail.com> +M: Marek Vasut <marex@denx.de> +S: Maintained +F: target/nios2/ +F: hw/nios2/ +F: disas/nios2.c + OpenRISC M: Jia Liu <proljc@gmail.com> S: Maintained @@ -1420,6 +1428,7 @@ F: scripts/checkpatch.pl Migration M: Juan Quintela <quintela@redhat.com> M: Amit Shah <amit.shah@redhat.com> +M: Dr. David Alan Gilbert <dgilbert@redhat.com> S: Maintained F: include/migration/ F: migration/ diff --git a/README b/README index bd8060a3ee..cb60d05bee 100644 --- a/README +++ b/README @@ -45,6 +45,7 @@ of other UNIX targets. The simple steps to build QEMU are: Additional information can also be found online via the QEMU website: http://qemu-project.org/Hosts/Linux + http://qemu-project.org/Hosts/Mac http://qemu-project.org/Hosts/W32 diff --git a/arch_init.c b/arch_init.c index c316ae1023..0810116144 100644 --- a/arch_init.c +++ b/arch_init.c @@ -63,6 +63,8 @@ int graphic_depth = 32; #define QEMU_ARCH QEMU_ARCH_MIPS #elif defined(TARGET_MOXIE) #define QEMU_ARCH QEMU_ARCH_MOXIE +#elif defined(TARGET_NIOS2) +#define QEMU_ARCH QEMU_ARCH_NIOS2 #elif defined(TARGET_OPENRISC) #define QEMU_ARCH QEMU_ARCH_OPENRISC #elif defined(TARGET_PPC) diff --git a/block.c b/block.c index 39ddea3411..a0346c80c6 100644 --- a/block.c +++ b/block.c @@ -1851,7 +1851,7 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, bdrv_refresh_filename(bs); /* Check if any unknown options were used */ - if (options && (qdict_size(options) != 0)) { + if (qdict_size(options) != 0) { const QDictEntry *entry = qdict_first(options); if (flags & BDRV_O_PROTOCOL) { error_setg(errp, "Block protocol '%s' doesn't support the option " diff --git a/block/qcow.c b/block/qcow.c index 7540f43f46..fb738fc507 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -104,6 +104,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, unsigned int len, i, shift; int ret; QCowHeader header; + Error *local_err = NULL; ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); if (ret < 0) { @@ -252,7 +253,12 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, error_setg(&s->migration_blocker, "The qcow format used by node '%s' " "does not support live migration", bdrv_get_device_or_node_name(bs)); - migrate_add_blocker(s->migration_blocker); + ret = migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + goto fail; + } qemu_co_mutex_init(&s->lock); return 0; diff --git a/block/vdi.c b/block/vdi.c index 96b78d5a43..0aeb940aa8 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -361,6 +361,7 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags, VdiHeader header; size_t bmap_size; int ret; + Error *local_err = NULL; logout("\n"); @@ -471,7 +472,12 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags, error_setg(&s->migration_blocker, "The vdi format used by node '%s' " "does not support live migration", bdrv_get_device_or_node_name(bs)); - migrate_add_blocker(s->migration_blocker); + ret = migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + goto fail_free_bmap; + } qemu_co_mutex_init(&s->write_lock); diff --git a/block/vhdx.c b/block/vhdx.c index 0ba2f0a2f9..68db9e074e 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -991,6 +991,17 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, } } + /* Disable migration when VHDX images are used */ + error_setg(&s->migration_blocker, "The vhdx format used by node '%s' " + "does not support live migration", + bdrv_get_device_or_node_name(bs)); + ret = migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + goto fail; + } + if (flags & BDRV_O_RDWR) { ret = vhdx_update_headers(bs, s, false, NULL); if (ret < 0) { @@ -1000,12 +1011,6 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, /* TODO: differencing files */ - /* Disable migration when VHDX images are used */ - error_setg(&s->migration_blocker, "The vhdx format used by node '%s' " - "does not support live migration", - bdrv_get_device_or_node_name(bs)); - migrate_add_blocker(s->migration_blocker); - return 0; fail: vhdx_close(bs); diff --git a/block/vmdk.c b/block/vmdk.c index a11c27a1c4..7750212969 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -941,6 +941,7 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags, int ret; BDRVVmdkState *s = bs->opaque; uint32_t magic; + Error *local_err = NULL; buf = vmdk_read_desc(bs->file, 0, errp); if (!buf) { @@ -976,7 +977,13 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags, error_setg(&s->migration_blocker, "The vmdk format used by node '%s' " "does not support live migration", bdrv_get_device_or_node_name(bs)); - migrate_add_blocker(s->migration_blocker); + ret = migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + goto fail; + } + g_free(buf); return 0; diff --git a/block/vpc.c b/block/vpc.c index 8d5886f003..ed6353dbd4 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -422,13 +422,18 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, #endif } - qemu_co_mutex_init(&s->lock); - /* Disable migration when VHD images are used */ error_setg(&s->migration_blocker, "The vpc format used by node '%s' " "does not support live migration", bdrv_get_device_or_node_name(bs)); - migrate_add_blocker(s->migration_blocker); + ret = migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + goto fail; + } + + qemu_co_mutex_init(&s->lock); return 0; diff --git a/block/vvfat.c b/block/vvfat.c index ded21092ee..c6bf67e8f3 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -1185,22 +1185,26 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags, s->sector_count = s->faked_sectors + s->sectors_per_cluster*s->cluster_count; - if (s->first_sectors_number == 0x40) { - init_mbr(s, cyls, heads, secs); - } - - // assert(is_consistent(s)); - qemu_co_mutex_init(&s->lock); - /* Disable migration when vvfat is used rw */ if (s->qcow) { error_setg(&s->migration_blocker, "The vvfat (rw) format used by node '%s' " "does not support live migration", bdrv_get_device_or_node_name(bs)); - migrate_add_blocker(s->migration_blocker); + ret = migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + goto fail; + } } + if (s->first_sectors_number == 0x40) { + init_mbr(s, cyls, heads, secs); + } + + qemu_co_mutex_init(&s->lock); + ret = 0; fail: qemu_opts_del(opts); diff --git a/configure b/configure index 1004ef5573..86fd833feb 100755 --- a/configure +++ b/configure @@ -5935,6 +5935,8 @@ case "$target_name" in ;; moxie) ;; + nios2) + ;; or32) TARGET_ARCH=openrisc TARGET_BASE_ARCH=openrisc @@ -6140,6 +6142,9 @@ for i in $ARCH $TARGET_BASE_ARCH ; do moxie*) disas_config "MOXIE" ;; + nios2) + disas_config "NIOS2" + ;; or32) disas_config "OPENRISC" ;; diff --git a/default-configs/nios2-linux-user.mak b/default-configs/nios2-linux-user.mak new file mode 100644 index 0000000000..5be3eb795d --- /dev/null +++ b/default-configs/nios2-linux-user.mak @@ -0,0 +1 @@ +# Default configuration for nios2-linux-user diff --git a/default-configs/nios2-softmmu.mak b/default-configs/nios2-softmmu.mak new file mode 100644 index 0000000000..74dc70caae --- /dev/null +++ b/default-configs/nios2-softmmu.mak @@ -0,0 +1,6 @@ +# Default configuration for nios2-softmmu + +CONFIG_NIOS2=y +CONFIG_SERIAL=y +CONFIG_PTIMER=y +CONFIG_ALTERA_TIMER=y diff --git a/disas/Makefile.objs b/disas/Makefile.objs index abeba84661..62632ef0dd 100644 --- a/disas/Makefile.objs +++ b/disas/Makefile.objs @@ -15,6 +15,7 @@ common-obj-$(CONFIG_IA64_DIS) += ia64.o common-obj-$(CONFIG_M68K_DIS) += m68k.o common-obj-$(CONFIG_MICROBLAZE_DIS) += microblaze.o common-obj-$(CONFIG_MIPS_DIS) += mips.o +common-obj-$(CONFIG_NIOS2_DIS) += nios2.o common-obj-$(CONFIG_MOXIE_DIS) += moxie.o common-obj-$(CONFIG_PPC_DIS) += ppc.o common-obj-$(CONFIG_S390_DIS) += s390.o diff --git a/disas/cris.c b/disas/cris.c index 08161d1f21..8a1daf936c 100644 --- a/disas/cris.c +++ b/disas/cris.c @@ -2490,7 +2490,7 @@ print_with_operands (const struct cris_opcode *opcodep, const struct cris_spec_reg *sregp = spec_reg_info ((insn >> 12) & 15, disdata->distype); - if (sregp->name == NULL) + if (sregp == NULL || sregp->name == NULL) /* Should have been caught as a non-match earlier. */ *tp++ = '?'; else diff --git a/disas/nios2.c b/disas/nios2.c new file mode 100644 index 0000000000..b342936d21 --- /dev/null +++ b/disas/nios2.c @@ -0,0 +1,3534 @@ +/* Nios II opcode library for QEMU. + Copyright (C) 2012-2016 Free Software Foundation, Inc. + Contributed by Nigel Gray (ngray@altera.com). + Contributed by Mentor Graphics, Inc. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* This file resembles a concatenation of the following files from + binutils: + + include/opcode/nios2.h + include/opcode/nios2r1.h + include/opcode/nios2r2.h + opcodes/nios2-opc.c + opcodes/nios2-dis.c + + It has been derived from the original patches which have been + relicensed by the contributors as GPL version 2 for inclusion + in QEMU. */ + +#ifndef _NIOS2_H_ +#define _NIOS2_H_ + +/*#include "bfd.h"*/ +#include "qemu/osdep.h" +#include "disas/bfd.h" + + +/**************************************************************************** + * This file contains structures, bit masks and shift counts used + * by the GNU toolchain to define the Nios II instruction set and + * access various opcode fields. + ****************************************************************************/ + +/* Instruction encoding formats. */ +enum iw_format_type { + /* R1 formats. */ + iw_i_type, + iw_r_type, + iw_j_type, + iw_custom_type, + + /* 32-bit R2 formats. */ + iw_L26_type, + iw_F2I16_type, + iw_F2X4I12_type, + iw_F1X4I12_type, + iw_F1X4L17_type, + iw_F3X6L5_type, + iw_F2X6L10_type, + iw_F3X6_type, + iw_F3X8_type, + + /* 16-bit R2 formats. */ + iw_I10_type, + iw_T1I7_type, + iw_T2I4_type, + iw_T1X1I6_type, + iw_X1I7_type, + iw_L5I4X1_type, + iw_T2X1L3_type, + iw_T2X1I3_type, + iw_T3X1_type, + iw_T2X3_type, + iw_F1X1_type, + iw_X2L5_type, + iw_F1I5_type, + iw_F2_type +}; + +/* Identify different overflow situations for error messages. */ +enum overflow_type +{ + call_target_overflow = 0, + branch_target_overflow, + address_offset_overflow, + signed_immed16_overflow, + unsigned_immed16_overflow, + unsigned_immed5_overflow, + signed_immed12_overflow, + custom_opcode_overflow, + enumeration_overflow, + no_overflow +}; + +/* This structure holds information for a particular instruction. + + The args field is a string describing the operands. The following + letters can appear in the args: + c - a 5-bit control register index + d - a 5-bit destination register index + s - a 5-bit left source register index + t - a 5-bit right source register index + D - a 3-bit encoded destination register + S - a 3-bit encoded left source register + T - a 3-bit encoded right source register + i - a 16-bit signed immediate + j - a 5-bit unsigned immediate + k - a (second) 5-bit unsigned immediate + l - a 8-bit custom instruction constant + m - a 26-bit unsigned immediate + o - a 16-bit signed pc-relative offset + u - a 16-bit unsigned immediate + I - a 12-bit signed immediate + M - a 6-bit unsigned immediate + N - a 6-bit unsigned immediate with 2-bit shift + O - a 10-bit signed pc-relative offset with 1-bit shift + P - a 7-bit signed pc-relative offset with 1-bit shift + U - a 7-bit unsigned immediate with 2-bit shift + V - a 5-bit unsigned immediate with 2-bit shift + W - a 4-bit unsigned immediate with 2-bit shift + X - a 4-bit unsigned immediate with 1-bit shift + Y - a 4-bit unsigned immediate + e - an immediate coded as an enumeration for addi.n/subi.n + f - an immediate coded as an enumeration for slli.n/srli.n + g - an immediate coded as an enumeration for andi.n + h - an immediate coded as an enumeration for movi.n + R - a reglist for ldwm/stwm or push.n/pop.n + B - a base register specifier and option list for ldwm/stwm + Literal ',', '(', and ')' characters may also appear in the args as + delimiters. + + Note that the args describe the semantics and assembly-language syntax + of the operands, not their encoding into the instruction word. + + The pinfo field is INSN_MACRO for a macro. Otherwise, it is a collection + of bits describing the instruction, notably any relevant hazard + information. + + When assembling, the match field contains the opcode template, which + is modified by the arguments to produce the actual opcode + that is emitted. If pinfo is INSN_MACRO, then this is 0. + + If pinfo is INSN_MACRO, the mask field stores the macro identifier. + Otherwise this is a bit mask for the relevant portions of the opcode + when disassembling. If the actual opcode anded with the match field + equals the opcode field, then we have found the correct instruction. */ + +struct nios2_opcode +{ + const char *name; /* The name of the instruction. */ + const char *args; /* A string describing the arguments for this + instruction. */ + const char *args_test; /* Like args, but with an extra argument for + the expected opcode. */ + unsigned long num_args; /* The number of arguments the instruction + takes. */ + unsigned size; /* Size in bytes of the instruction. */ + enum iw_format_type format; /* Instruction format. */ + unsigned long match; /* The basic opcode for the instruction. */ + unsigned long mask; /* Mask for the opcode field of the + instruction. */ + unsigned long pinfo; /* Is this a real instruction or instruction + macro? */ + enum overflow_type overflow_msg; /* Used to generate informative + message when fixup overflows. */ +}; + +/* This value is used in the nios2_opcode.pinfo field to indicate that the + instruction is a macro or pseudo-op. This requires special treatment by + the assembler, and is used by the disassembler to determine whether to + check for a nop. */ +#define NIOS2_INSN_MACRO 0x80000000 +#define NIOS2_INSN_MACRO_MOV 0x80000001 +#define NIOS2_INSN_MACRO_MOVI 0x80000002 +#define NIOS2_INSN_MACRO_MOVIA 0x80000004 + +#define NIOS2_INSN_RELAXABLE 0x40000000 +#define NIOS2_INSN_UBRANCH 0x00000010 +#define NIOS2_INSN_CBRANCH 0x00000020 +#define NIOS2_INSN_CALL 0x00000040 + +#define NIOS2_INSN_OPTARG 0x00000080 + +/* Register attributes. */ +#define REG_NORMAL (1<<0) /* Normal registers. */ +#define REG_CONTROL (1<<1) /* Control registers. */ +#define REG_COPROCESSOR (1<<2) /* For custom instructions. */ +#define REG_3BIT (1<<3) /* For R2 CDX instructions. */ +#define REG_LDWM (1<<4) /* For R2 ldwm/stwm. */ +#define REG_POP (1<<5) /* For R2 pop.n/push.n. */ + +struct nios2_reg +{ + const char *name; + const int index; + unsigned long regtype; +}; + +/* Pull in the instruction field accessors, opcodes, and masks. */ +/*#include "nios2r1.h"*/ + +#ifndef _NIOS2R1_H_ +#define _NIOS2R1_H_ + +/* R1 fields. */ +#define IW_R1_OP_LSB 0 +#define IW_R1_OP_SIZE 6 +#define IW_R1_OP_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R1_OP_SIZE)) +#define IW_R1_OP_SHIFTED_MASK (IW_R1_OP_UNSHIFTED_MASK << IW_R1_OP_LSB) +#define GET_IW_R1_OP(W) (((W) >> IW_R1_OP_LSB) & IW_R1_OP_UNSHIFTED_MASK) +#define SET_IW_R1_OP(V) (((V) & IW_R1_OP_UNSHIFTED_MASK) << IW_R1_OP_LSB) + +#define IW_I_A_LSB 27 +#define IW_I_A_SIZE 5 +#define IW_I_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I_A_SIZE)) +#define IW_I_A_SHIFTED_MASK (IW_I_A_UNSHIFTED_MASK << IW_I_A_LSB) +#define GET_IW_I_A(W) (((W) >> IW_I_A_LSB) & IW_I_A_UNSHIFTED_MASK) +#define SET_IW_I_A(V) (((V) & IW_I_A_UNSHIFTED_MASK) << IW_I_A_LSB) + +#define IW_I_B_LSB 22 +#define IW_I_B_SIZE 5 +#define IW_I_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I_B_SIZE)) +#define IW_I_B_SHIFTED_MASK (IW_I_B_UNSHIFTED_MASK << IW_I_B_LSB) +#define GET_IW_I_B(W) (((W) >> IW_I_B_LSB) & IW_I_B_UNSHIFTED_MASK) +#define SET_IW_I_B(V) (((V) & IW_I_B_UNSHIFTED_MASK) << IW_I_B_LSB) + +#define IW_I_IMM16_LSB 6 +#define IW_I_IMM16_SIZE 16 +#define IW_I_IMM16_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I_IMM16_SIZE)) +#define IW_I_IMM16_SHIFTED_MASK (IW_I_IMM16_UNSHIFTED_MASK << IW_I_IMM16_LSB) +#define GET_IW_I_IMM16(W) (((W) >> IW_I_IMM16_LSB) & IW_I_IMM16_UNSHIFTED_MASK) +#define SET_IW_I_IMM16(V) (((V) & IW_I_IMM16_UNSHIFTED_MASK) << IW_I_IMM16_LSB) + +#define IW_R_A_LSB 27 +#define IW_R_A_SIZE 5 +#define IW_R_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_A_SIZE)) +#define IW_R_A_SHIFTED_MASK (IW_R_A_UNSHIFTED_MASK << IW_R_A_LSB) +#define GET_IW_R_A(W) (((W) >> IW_R_A_LSB) & IW_R_A_UNSHIFTED_MASK) +#define SET_IW_R_A(V) (((V) & IW_R_A_UNSHIFTED_MASK) << IW_R_A_LSB) + +#define IW_R_B_LSB 22 +#define IW_R_B_SIZE 5 +#define IW_R_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_B_SIZE)) +#define IW_R_B_SHIFTED_MASK (IW_R_B_UNSHIFTED_MASK << IW_R_B_LSB) +#define GET_IW_R_B(W) (((W) >> IW_R_B_LSB) & IW_R_B_UNSHIFTED_MASK) +#define SET_IW_R_B(V) (((V) & IW_R_B_UNSHIFTED_MASK) << IW_R_B_LSB) + +#define IW_R_C_LSB 17 +#define IW_R_C_SIZE 5 +#define IW_R_C_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_C_SIZE)) +#define IW_R_C_SHIFTED_MASK (IW_R_C_UNSHIFTED_MASK << IW_R_C_LSB) +#define GET_IW_R_C(W) (((W) >> IW_R_C_LSB) & IW_R_C_UNSHIFTED_MASK) +#define SET_IW_R_C(V) (((V) & IW_R_C_UNSHIFTED_MASK) << IW_R_C_LSB) + +#define IW_R_OPX_LSB 11 +#define IW_R_OPX_SIZE 6 +#define IW_R_OPX_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_OPX_SIZE)) +#define IW_R_OPX_SHIFTED_MASK (IW_R_OPX_UNSHIFTED_MASK << IW_R_OPX_LSB) +#define GET_IW_R_OPX(W) (((W) >> IW_R_OPX_LSB) & IW_R_OPX_UNSHIFTED_MASK) +#define SET_IW_R_OPX(V) (((V) & IW_R_OPX_UNSHIFTED_MASK) << IW_R_OPX_LSB) + +#define IW_R_IMM5_LSB 6 +#define IW_R_IMM5_SIZE 5 +#define IW_R_IMM5_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_IMM5_SIZE)) +#define IW_R_IMM5_SHIFTED_MASK (IW_R_IMM5_UNSHIFTED_MASK << IW_R_IMM5_LSB) +#define GET_IW_R_IMM5(W) (((W) >> IW_R_IMM5_LSB) & IW_R_IMM5_UNSHIFTED_MASK) +#define SET_IW_R_IMM5(V) (((V) & IW_R_IMM5_UNSHIFTED_MASK) << IW_R_IMM5_LSB) + +#define IW_J_IMM26_LSB 6 +#define IW_J_IMM26_SIZE 26 +#define IW_J_IMM26_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_J_IMM26_SIZE)) +#define IW_J_IMM26_SHIFTED_MASK (IW_J_IMM26_UNSHIFTED_MASK << IW_J_IMM26_LSB) +#define GET_IW_J_IMM26(W) (((W) >> IW_J_IMM26_LSB) & IW_J_IMM26_UNSHIFTED_MASK) +#define SET_IW_J_IMM26(V) (((V) & IW_J_IMM26_UNSHIFTED_MASK) << IW_J_IMM26_LSB) + +#define IW_CUSTOM_A_LSB 27 +#define IW_CUSTOM_A_SIZE 5 +#define IW_CUSTOM_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_A_SIZE)) +#define IW_CUSTOM_A_SHIFTED_MASK (IW_CUSTOM_A_UNSHIFTED_MASK << IW_CUSTOM_A_LSB) +#define GET_IW_CUSTOM_A(W) (((W) >> IW_CUSTOM_A_LSB) & IW_CUSTOM_A_UNSHIFTED_MASK) +#define SET_IW_CUSTOM_A(V) (((V) & IW_CUSTOM_A_UNSHIFTED_MASK) << IW_CUSTOM_A_LSB) + +#define IW_CUSTOM_B_LSB 22 +#define IW_CUSTOM_B_SIZE 5 +#define IW_CUSTOM_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_B_SIZE)) +#define IW_CUSTOM_B_SHIFTED_MASK (IW_CUSTOM_B_UNSHIFTED_MASK << IW_CUSTOM_B_LSB) +#define GET_IW_CUSTOM_B(W) (((W) >> IW_CUSTOM_B_LSB) & IW_CUSTOM_B_UNSHIFTED_MASK) +#define SET_IW_CUSTOM_B(V) (((V) & IW_CUSTOM_B_UNSHIFTED_MASK) << IW_CUSTOM_B_LSB) + +#define IW_CUSTOM_C_LSB 17 +#define IW_CUSTOM_C_SIZE 5 +#define IW_CUSTOM_C_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_C_SIZE)) +#define IW_CUSTOM_C_SHIFTED_MASK (IW_CUSTOM_C_UNSHIFTED_MASK << IW_CUSTOM_C_LSB) +#define GET_IW_CUSTOM_C(W) (((W) >> IW_CUSTOM_C_LSB) & IW_CUSTOM_C_UNSHIFTED_MASK) +#define SET_IW_CUSTOM_C(V) (((V) & IW_CUSTOM_C_UNSHIFTED_MASK) << IW_CUSTOM_C_LSB) + +#define IW_CUSTOM_READA_LSB 16 +#define IW_CUSTOM_READA_SIZE 1 +#define IW_CUSTOM_READA_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_READA_SIZE)) +#define IW_CUSTOM_READA_SHIFTED_MASK (IW_CUSTOM_READA_UNSHIFTED_MASK << IW_CUSTOM_READA_LSB) +#define GET_IW_CUSTOM_READA(W) (((W) >> IW_CUSTOM_READA_LSB) & IW_CUSTOM_READA_UNSHIFTED_MASK) +#define SET_IW_CUSTOM_READA(V) (((V) & IW_CUSTOM_READA_UNSHIFTED_MASK) << IW_CUSTOM_READA_LSB) + +#define IW_CUSTOM_READB_LSB 15 +#define IW_CUSTOM_READB_SIZE 1 +#define IW_CUSTOM_READB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_READB_SIZE)) +#define IW_CUSTOM_READB_SHIFTED_MASK (IW_CUSTOM_READB_UNSHIFTED_MASK << IW_CUSTOM_READB_LSB) +#define GET_IW_CUSTOM_READB(W) (((W) >> IW_CUSTOM_READB_LSB) & IW_CUSTOM_READB_UNSHIFTED_MASK) +#define SET_IW_CUSTOM_READB(V) (((V) & IW_CUSTOM_READB_UNSHIFTED_MASK) << IW_CUSTOM_READB_LSB) + +#define IW_CUSTOM_READC_LSB 14 +#define IW_CUSTOM_READC_SIZE 1 +#define IW_CUSTOM_READC_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_READC_SIZE)) +#define IW_CUSTOM_READC_SHIFTED_MASK (IW_CUSTOM_READC_UNSHIFTED_MASK << IW_CUSTOM_READC_LSB) +#define GET_IW_CUSTOM_READC(W) (((W) >> IW_CUSTOM_READC_LSB) & IW_CUSTOM_READC_UNSHIFTED_MASK) +#define SET_IW_CUSTOM_READC(V) (((V) & IW_CUSTOM_READC_UNSHIFTED_MASK) << IW_CUSTOM_READC_LSB) + +#define IW_CUSTOM_N_LSB 6 +#define IW_CUSTOM_N_SIZE 8 +#define IW_CUSTOM_N_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_N_SIZE)) +#define IW_CUSTOM_N_SHIFTED_MASK (IW_CUSTOM_N_UNSHIFTED_MASK << IW_CUSTOM_N_LSB) +#define GET_IW_CUSTOM_N(W) (((W) >> IW_CUSTOM_N_LSB) & IW_CUSTOM_N_UNSHIFTED_MASK) +#define SET_IW_CUSTOM_N(V) (((V) & IW_CUSTOM_N_UNSHIFTED_MASK) << IW_CUSTOM_N_LSB) + +/* R1 opcodes. */ +#define R1_OP_CALL 0 +#define R1_OP_JMPI 1 +#define R1_OP_LDBU 3 +#define R1_OP_ADDI 4 +#define R1_OP_STB 5 +#define R1_OP_BR 6 +#define R1_OP_LDB 7 +#define R1_OP_CMPGEI 8 +#define R1_OP_LDHU 11 +#define R1_OP_ANDI 12 +#define R1_OP_STH 13 +#define R1_OP_BGE 14 +#define R1_OP_LDH 15 +#define R1_OP_CMPLTI 16 +#define R1_OP_INITDA 19 +#define R1_OP_ORI 20 +#define R1_OP_STW 21 +#define R1_OP_BLT 22 +#define R1_OP_LDW 23 +#define R1_OP_CMPNEI 24 +#define R1_OP_FLUSHDA 27 +#define R1_OP_XORI 28 +#define R1_OP_BNE 30 +#define R1_OP_CMPEQI 32 +#define R1_OP_LDBUIO 35 +#define R1_OP_MULI 36 +#define R1_OP_STBIO 37 +#define R1_OP_BEQ 38 +#define R1_OP_LDBIO 39 +#define R1_OP_CMPGEUI 40 +#define R1_OP_LDHUIO 43 +#define R1_OP_ANDHI 44 +#define R1_OP_STHIO 45 +#define R1_OP_BGEU 46 +#define R1_OP_LDHIO 47 +#define R1_OP_CMPLTUI 48 +#define R1_OP_CUSTOM 50 +#define R1_OP_INITD 51 +#define R1_OP_ORHI 52 +#define R1_OP_STWIO 53 +#define R1_OP_BLTU 54 +#define R1_OP_LDWIO 55 +#define R1_OP_RDPRS 56 +#define R1_OP_OPX 58 +#define R1_OP_FLUSHD 59 +#define R1_OP_XORHI 60 + +#define R1_OPX_ERET 1 +#define R1_OPX_ROLI 2 +#define R1_OPX_ROL 3 +#define R1_OPX_FLUSHP 4 +#define R1_OPX_RET 5 +#define R1_OPX_NOR 6 +#define R1_OPX_MULXUU 7 +#define R1_OPX_CMPGE 8 +#define R1_OPX_BRET 9 +#define R1_OPX_ROR 11 +#define R1_OPX_FLUSHI 12 +#define R1_OPX_JMP 13 +#define R1_OPX_AND 14 +#define R1_OPX_CMPLT 16 +#define R1_OPX_SLLI 18 +#define R1_OPX_SLL 19 +#define R1_OPX_WRPRS 20 +#define R1_OPX_OR 22 +#define R1_OPX_MULXSU 23 +#define R1_OPX_CMPNE 24 +#define R1_OPX_SRLI 26 +#define R1_OPX_SRL 27 +#define R1_OPX_NEXTPC 28 +#define R1_OPX_CALLR 29 +#define R1_OPX_XOR 30 +#define R1_OPX_MULXSS 31 +#define R1_OPX_CMPEQ 32 +#define R1_OPX_DIVU 36 +#define R1_OPX_DIV 37 +#define R1_OPX_RDCTL 38 +#define R1_OPX_MUL 39 +#define R1_OPX_CMPGEU 40 +#define R1_OPX_INITI 41 +#define R1_OPX_TRAP 45 +#define R1_OPX_WRCTL 46 +#define R1_OPX_CMPLTU 48 +#define R1_OPX_ADD 49 +#define R1_OPX_BREAK 52 +#define R1_OPX_SYNC 54 +#define R1_OPX_SUB 57 +#define R1_OPX_SRAI 58 +#define R1_OPX_SRA 59 + +/* Some convenience macros for R1 encodings, for use in instruction tables. + MATCH_R1_OPX0(NAME) and MASK_R1_OPX0 are used for R-type instructions + with 3 register operands and constant 0 in the immediate field. + The general forms are MATCH_R1_OPX(NAME, A, B, C) where the arguments specify + constant values and MASK_R1_OPX(A, B, C, N) where the arguments are booleans + that are true if the field should be included in the mask. + */ +#define MATCH_R1_OP(NAME) \ + (SET_IW_R1_OP (R1_OP_##NAME)) +#define MASK_R1_OP \ + IW_R1_OP_SHIFTED_MASK + +#define MATCH_R1_OPX0(NAME) \ + (SET_IW_R1_OP (R1_OP_OPX) | SET_IW_R_OPX (R1_OPX_##NAME)) +#define MASK_R1_OPX0 \ + (IW_R1_OP_SHIFTED_MASK | IW_R_OPX_SHIFTED_MASK | IW_R_IMM5_SHIFTED_MASK) + +#define MATCH_R1_OPX(NAME, A, B, C) \ + (MATCH_R1_OPX0 (NAME) | SET_IW_R_A (A) | SET_IW_R_B (B) | SET_IW_R_C (C)) +#define MASK_R1_OPX(A, B, C, N) \ + (IW_R1_OP_SHIFTED_MASK | IW_R_OPX_SHIFTED_MASK \ + | (A ? IW_R_A_SHIFTED_MASK : 0) \ + | (B ? IW_R_B_SHIFTED_MASK : 0) \ + | (C ? IW_R_C_SHIFTED_MASK : 0) \ + | (N ? IW_R_IMM5_SHIFTED_MASK : 0)) + +/* And here's the match/mask macros for the R1 instruction set. */ +#define MATCH_R1_ADD MATCH_R1_OPX0 (ADD) +#define MASK_R1_ADD MASK_R1_OPX0 +#define MATCH_R1_ADDI MATCH_R1_OP (ADDI) +#define MASK_R1_ADDI MASK_R1_OP +#define MATCH_R1_AND MATCH_R1_OPX0 (AND) +#define MASK_R1_AND MASK_R1_OPX0 +#define MATCH_R1_ANDHI MATCH_R1_OP (ANDHI) +#define MASK_R1_ANDHI MASK_R1_OP +#define MATCH_R1_ANDI MATCH_R1_OP (ANDI) +#define MASK_R1_ANDI MASK_R1_OP +#define MATCH_R1_BEQ MATCH_R1_OP (BEQ) +#define MASK_R1_BEQ MASK_R1_OP +#define MATCH_R1_BGE MATCH_R1_OP (BGE) +#define MASK_R1_BGE MASK_R1_OP +#define MATCH_R1_BGEU MATCH_R1_OP (BGEU) +#define MASK_R1_BGEU MASK_R1_OP +#define MATCH_R1_BGT MATCH_R1_OP (BLT) +#define MASK_R1_BGT MASK_R1_OP +#define MATCH_R1_BGTU MATCH_R1_OP (BLTU) +#define MASK_R1_BGTU MASK_R1_OP +#define MATCH_R1_BLE MATCH_R1_OP (BGE) +#define MASK_R1_BLE MASK_R1_OP +#define MATCH_R1_BLEU MATCH_R1_OP (BGEU) +#define MASK_R1_BLEU MASK_R1_OP +#define MATCH_R1_BLT MATCH_R1_OP (BLT) +#define MASK_R1_BLT MASK_R1_OP +#define MATCH_R1_BLTU MATCH_R1_OP (BLTU) +#define MASK_R1_BLTU MASK_R1_OP +#define MATCH_R1_BNE MATCH_R1_OP (BNE) +#define MASK_R1_BNE MASK_R1_OP +#define MATCH_R1_BR MATCH_R1_OP (BR) +#define MASK_R1_BR MASK_R1_OP | IW_I_A_SHIFTED_MASK | IW_I_B_SHIFTED_MASK +#define MATCH_R1_BREAK MATCH_R1_OPX (BREAK, 0, 0, 0x1e) +#define MASK_R1_BREAK MASK_R1_OPX (1, 1, 1, 0) +#define MATCH_R1_BRET MATCH_R1_OPX (BRET, 0x1e, 0, 0) +#define MASK_R1_BRET MASK_R1_OPX (1, 1, 1, 1) +#define MATCH_R1_CALL MATCH_R1_OP (CALL) +#define MASK_R1_CALL MASK_R1_OP +#define MATCH_R1_CALLR MATCH_R1_OPX (CALLR, 0, 0, 0x1f) +#define MASK_R1_CALLR MASK_R1_OPX (0, 1, 1, 1) +#define MATCH_R1_CMPEQ MATCH_R1_OPX0 (CMPEQ) +#define MASK_R1_CMPEQ MASK_R1_OPX0 +#define MATCH_R1_CMPEQI MATCH_R1_OP (CMPEQI) +#define MASK_R1_CMPEQI MASK_R1_OP +#define MATCH_R1_CMPGE MATCH_R1_OPX0 (CMPGE) +#define MASK_R1_CMPGE MASK_R1_OPX0 +#define MATCH_R1_CMPGEI MATCH_R1_OP (CMPGEI) +#define MASK_R1_CMPGEI MASK_R1_OP +#define MATCH_R1_CMPGEU MATCH_R1_OPX0 (CMPGEU) +#define MASK_R1_CMPGEU MASK_R1_OPX0 +#define MATCH_R1_CMPGEUI MATCH_R1_OP (CMPGEUI) +#define MASK_R1_CMPGEUI MASK_R1_OP +#define MATCH_R1_CMPGT MATCH_R1_OPX0 (CMPLT) +#define MASK_R1_CMPGT MASK_R1_OPX0 +#define MATCH_R1_CMPGTI MATCH_R1_OP (CMPGEI) +#define MASK_R1_CMPGTI MASK_R1_OP +#define MATCH_R1_CMPGTU MATCH_R1_OPX0 (CMPLTU) +#define MASK_R1_CMPGTU MASK_R1_OPX0 +#define MATCH_R1_CMPGTUI MATCH_R1_OP (CMPGEUI) +#define MASK_R1_CMPGTUI MASK_R1_OP +#define MATCH_R1_CMPLE MATCH_R1_OPX0 (CMPGE) +#define MASK_R1_CMPLE MASK_R1_OPX0 +#define MATCH_R1_CMPLEI MATCH_R1_OP (CMPLTI) +#define MASK_R1_CMPLEI MASK_R1_OP +#define MATCH_R1_CMPLEU MATCH_R1_OPX0 (CMPGEU) +#define MASK_R1_CMPLEU MASK_R1_OPX0 +#define MATCH_R1_CMPLEUI MATCH_R1_OP (CMPLTUI) +#define MASK_R1_CMPLEUI MASK_R1_OP +#define MATCH_R1_CMPLT MATCH_R1_OPX0 (CMPLT) +#define MASK_R1_CMPLT MASK_R1_OPX0 +#define MATCH_R1_CMPLTI MATCH_R1_OP (CMPLTI) +#define MASK_R1_CMPLTI MASK_R1_OP +#define MATCH_R1_CMPLTU MATCH_R1_OPX0 (CMPLTU) +#define MASK_R1_CMPLTU MASK_R1_OPX0 +#define MATCH_R1_CMPLTUI MATCH_R1_OP (CMPLTUI) +#define MASK_R1_CMPLTUI MASK_R1_OP +#define MATCH_R1_CMPNE MATCH_R1_OPX0 (CMPNE) +#define MASK_R1_CMPNE MASK_R1_OPX0 +#define MATCH_R1_CMPNEI MATCH_R1_OP (CMPNEI) +#define MASK_R1_CMPNEI MASK_R1_OP +#define MATCH_R1_CUSTOM MATCH_R1_OP (CUSTOM) +#define MASK_R1_CUSTOM MASK_R1_OP +#define MATCH_R1_DIV MATCH_R1_OPX0 (DIV) +#define MASK_R1_DIV MASK_R1_OPX0 +#define MATCH_R1_DIVU MATCH_R1_OPX0 (DIVU) +#define MASK_R1_DIVU MASK_R1_OPX0 +#define MATCH_R1_ERET MATCH_R1_OPX (ERET, 0x1d, 0x1e, 0) +#define MASK_R1_ERET MASK_R1_OPX (1, 1, 1, 1) +#define MATCH_R1_FLUSHD MATCH_R1_OP (FLUSHD) | SET_IW_I_B (0) +#define MASK_R1_FLUSHD MASK_R1_OP | IW_I_B_SHIFTED_MASK +#define MATCH_R1_FLUSHDA MATCH_R1_OP (FLUSHDA) | SET_IW_I_B (0) +#define MASK_R1_FLUSHDA MASK_R1_OP | IW_I_B_SHIFTED_MASK +#define MATCH_R1_FLUSHI MATCH_R1_OPX (FLUSHI, 0, 0, 0) +#define MASK_R1_FLUSHI MASK_R1_OPX (0, 1, 1, 1) +#define MATCH_R1_FLUSHP MATCH_R1_OPX (FLUSHP, 0, 0, 0) +#define MASK_R1_FLUSHP MASK_R1_OPX (1, 1, 1, 1) +#define MATCH_R1_INITD MATCH_R1_OP (INITD) | SET_IW_I_B (0) +#define MASK_R1_INITD MASK_R1_OP | IW_I_B_SHIFTED_MASK +#define MATCH_R1_INITDA MATCH_R1_OP (INITDA) | SET_IW_I_B (0) +#define MASK_R1_INITDA MASK_R1_OP | IW_I_B_SHIFTED_MASK +#define MATCH_R1_INITI MATCH_R1_OPX (INITI, 0, 0, 0) +#define MASK_R1_INITI MASK_R1_OPX (0, 1, 1, 1) +#define MATCH_R1_JMP MATCH_R1_OPX (JMP, 0, 0, 0) +#define MASK_R1_JMP MASK_R1_OPX (0, 1, 1, 1) +#define MATCH_R1_JMPI MATCH_R1_OP (JMPI) +#define MASK_R1_JMPI MASK_R1_OP +#define MATCH_R1_LDB MATCH_R1_OP (LDB) +#define MASK_R1_LDB MASK_R1_OP +#define MATCH_R1_LDBIO MATCH_R1_OP (LDBIO) +#define MASK_R1_LDBIO MASK_R1_OP +#define MATCH_R1_LDBU MATCH_R1_OP (LDBU) +#define MASK_R1_LDBU MASK_R1_OP +#define MATCH_R1_LDBUIO MATCH_R1_OP (LDBUIO) +#define MASK_R1_LDBUIO MASK_R1_OP +#define MATCH_R1_LDH MATCH_R1_OP (LDH) +#define MASK_R1_LDH MASK_R1_OP +#define MATCH_R1_LDHIO MATCH_R1_OP (LDHIO) +#define MASK_R1_LDHIO MASK_R1_OP +#define MATCH_R1_LDHU MATCH_R1_OP (LDHU) +#define MASK_R1_LDHU MASK_R1_OP +#define MATCH_R1_LDHUIO MATCH_R1_OP (LDHUIO) +#define MASK_R1_LDHUIO MASK_R1_OP +#define MATCH_R1_LDW MATCH_R1_OP (LDW) +#define MASK_R1_LDW MASK_R1_OP +#define MATCH_R1_LDWIO MATCH_R1_OP (LDWIO) +#define MASK_R1_LDWIO MASK_R1_OP +#define MATCH_R1_MOV MATCH_R1_OPX (ADD, 0, 0, 0) +#define MASK_R1_MOV MASK_R1_OPX (0, 1, 0, 1) +#define MATCH_R1_MOVHI MATCH_R1_OP (ORHI) | SET_IW_I_A (0) +#define MASK_R1_MOVHI MASK_R1_OP | IW_I_A_SHIFTED_MASK +#define MATCH_R1_MOVI MATCH_R1_OP (ADDI) | SET_IW_I_A (0) +#define MASK_R1_MOVI MASK_R1_OP | IW_I_A_SHIFTED_MASK +#define MATCH_R1_MOVUI MATCH_R1_OP (ORI) | SET_IW_I_A (0) +#define MASK_R1_MOVUI MASK_R1_OP | IW_I_A_SHIFTED_MASK +#define MATCH_R1_MUL MATCH_R1_OPX0 (MUL) +#define MASK_R1_MUL MASK_R1_OPX0 +#define MATCH_R1_MULI MATCH_R1_OP (MULI) +#define MASK_R1_MULI MASK_R1_OP +#define MATCH_R1_MULXSS MATCH_R1_OPX0 (MULXSS) +#define MASK_R1_MULXSS MASK_R1_OPX0 +#define MATCH_R1_MULXSU MATCH_R1_OPX0 (MULXSU) +#define MASK_R1_MULXSU MASK_R1_OPX0 +#define MATCH_R1_MULXUU MATCH_R1_OPX0 (MULXUU) +#define MASK_R1_MULXUU MASK_R1_OPX0 +#define MATCH_R1_NEXTPC MATCH_R1_OPX (NEXTPC, 0, 0, 0) +#define MASK_R1_NEXTPC MASK_R1_OPX (1, 1, 0, 1) +#define MATCH_R1_NOP MATCH_R1_OPX (ADD, 0, 0, 0) +#define MASK_R1_NOP MASK_R1_OPX (1, 1, 1, 1) +#define MATCH_R1_NOR MATCH_R1_OPX0 (NOR) +#define MASK_R1_NOR MASK_R1_OPX0 +#define MATCH_R1_OR MATCH_R1_OPX0 (OR) +#define MASK_R1_OR MASK_R1_OPX0 +#define MATCH_R1_ORHI MATCH_R1_OP (ORHI) +#define MASK_R1_ORHI MASK_R1_OP +#define MATCH_R1_ORI MATCH_R1_OP (ORI) +#define MASK_R1_ORI MASK_R1_OP +#define MATCH_R1_RDCTL MATCH_R1_OPX (RDCTL, 0, 0, 0) +#define MASK_R1_RDCTL MASK_R1_OPX (1, 1, 0, 0) +#define MATCH_R1_RDPRS MATCH_R1_OP (RDPRS) +#define MASK_R1_RDPRS MASK_R1_OP +#define MATCH_R1_RET MATCH_R1_OPX (RET, 0x1f, 0, 0) +#define MASK_R1_RET MASK_R1_OPX (1, 1, 1, 1) +#define MATCH_R1_ROL MATCH_R1_OPX0 (ROL) +#define MASK_R1_ROL MASK_R1_OPX0 +#define MATCH_R1_ROLI MATCH_R1_OPX (ROLI, 0, 0, 0) +#define MASK_R1_ROLI MASK_R1_OPX (0, 1, 0, 0) +#define MATCH_R1_ROR MATCH_R1_OPX0 (ROR) +#define MASK_R1_ROR MASK_R1_OPX0 +#define MATCH_R1_SLL MATCH_R1_OPX0 (SLL) +#define MASK_R1_SLL MASK_R1_OPX0 +#define MATCH_R1_SLLI MATCH_R1_OPX (SLLI, 0, 0, 0) +#define MASK_R1_SLLI MASK_R1_OPX (0, 1, 0, 0) +#define MATCH_R1_SRA MATCH_R1_OPX0 (SRA) +#define MASK_R1_SRA MASK_R1_OPX0 +#define MATCH_R1_SRAI MATCH_R1_OPX (SRAI, 0, 0, 0) +#define MASK_R1_SRAI MASK_R1_OPX (0, 1, 0, 0) +#define MATCH_R1_SRL MATCH_R1_OPX0 (SRL) +#define MASK_R1_SRL MASK_R1_OPX0 +#define MATCH_R1_SRLI MATCH_R1_OPX (SRLI, 0, 0, 0) +#define MASK_R1_SRLI MASK_R1_OPX (0, 1, 0, 0) +#define MATCH_R1_STB MATCH_R1_OP (STB) +#define MASK_R1_STB MASK_R1_OP +#define MATCH_R1_STBIO MATCH_R1_OP (STBIO) +#define MASK_R1_STBIO MASK_R1_OP +#define MATCH_R1_STH MATCH_R1_OP (STH) +#define MASK_R1_STH MASK_R1_OP +#define MATCH_R1_STHIO MATCH_R1_OP (STHIO) +#define MASK_R1_STHIO MASK_R1_OP +#define MATCH_R1_STW MATCH_R1_OP (STW) +#define MASK_R1_STW MASK_R1_OP +#define MATCH_R1_STWIO MATCH_R1_OP (STWIO) +#define MASK_R1_STWIO MASK_R1_OP +#define MATCH_R1_SUB MATCH_R1_OPX0 (SUB) +#define MASK_R1_SUB MASK_R1_OPX0 +#define MATCH_R1_SUBI MATCH_R1_OP (ADDI) +#define MASK_R1_SUBI MASK_R1_OP +#define MATCH_R1_SYNC MATCH_R1_OPX (SYNC, 0, 0, 0) +#define MASK_R1_SYNC MASK_R1_OPX (1, 1, 1, 1) +#define MATCH_R1_TRAP MATCH_R1_OPX (TRAP, 0, 0, 0x1d) +#define MASK_R1_TRAP MASK_R1_OPX (1, 1, 1, 0) +#define MATCH_R1_WRCTL MATCH_R1_OPX (WRCTL, 0, 0, 0) +#define MASK_R1_WRCTL MASK_R1_OPX (0, 1, 1, 0) +#define MATCH_R1_WRPRS MATCH_R1_OPX (WRPRS, 0, 0, 0) +#define MASK_R1_WRPRS MASK_R1_OPX (0, 1, 0, 1) +#define MATCH_R1_XOR MATCH_R1_OPX0 (XOR) +#define MASK_R1_XOR MASK_R1_OPX0 +#define MATCH_R1_XORHI MATCH_R1_OP (XORHI) +#define MASK_R1_XORHI MASK_R1_OP +#define MATCH_R1_XORI MATCH_R1_OP (XORI) +#define MASK_R1_XORI MASK_R1_OP + +#endif /* _NIOS2R1_H */ + +/*#include "nios2r2.h"*/ + +#ifndef _NIOS2R2_H_ +#define _NIOS2R2_H_ + +/* Fields for 32-bit R2 instructions. */ + +#define IW_R2_OP_LSB 0 +#define IW_R2_OP_SIZE 6 +#define IW_R2_OP_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R2_OP_SIZE)) +#define IW_R2_OP_SHIFTED_MASK (IW_R2_OP_UNSHIFTED_MASK << IW_R2_OP_LSB) +#define GET_IW_R2_OP(W) (((W) >> IW_R2_OP_LSB) & IW_R2_OP_UNSHIFTED_MASK) +#define SET_IW_R2_OP(V) (((V) & IW_R2_OP_UNSHIFTED_MASK) << IW_R2_OP_LSB) + +#define IW_L26_IMM26_LSB 6 +#define IW_L26_IMM26_SIZE 26 +#define IW_L26_IMM26_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L26_IMM26_SIZE)) +#define IW_L26_IMM26_SHIFTED_MASK (IW_L26_IMM26_UNSHIFTED_MASK << IW_L26_IMM26_LSB) +#define GET_IW_L26_IMM26(W) (((W) >> IW_L26_IMM26_LSB) & IW_L26_IMM26_UNSHIFTED_MASK) +#define SET_IW_L26_IMM26(V) (((V) & IW_L26_IMM26_UNSHIFTED_MASK) << IW_L26_IMM26_LSB) + +#define IW_F2I16_A_LSB 6 +#define IW_F2I16_A_SIZE 5 +#define IW_F2I16_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2I16_A_SIZE)) +#define IW_F2I16_A_SHIFTED_MASK (IW_F2I16_A_UNSHIFTED_MASK << IW_F2I16_A_LSB) +#define GET_IW_F2I16_A(W) (((W) >> IW_F2I16_A_LSB) & IW_F2I16_A_UNSHIFTED_MASK) +#define SET_IW_F2I16_A(V) (((V) & IW_F2I16_A_UNSHIFTED_MASK) << IW_F2I16_A_LSB) + +#define IW_F2I16_B_LSB 11 +#define IW_F2I16_B_SIZE 5 +#define IW_F2I16_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2I16_B_SIZE)) +#define IW_F2I16_B_SHIFTED_MASK (IW_F2I16_B_UNSHIFTED_MASK << IW_F2I16_B_LSB) +#define GET_IW_F2I16_B(W) (((W) >> IW_F2I16_B_LSB) & IW_F2I16_B_UNSHIFTED_MASK) +#define SET_IW_F2I16_B(V) (((V) & IW_F2I16_B_UNSHIFTED_MASK) << IW_F2I16_B_LSB) + +#define IW_F2I16_IMM16_LSB 16 +#define IW_F2I16_IMM16_SIZE 16 +#define IW_F2I16_IMM16_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2I16_IMM16_SIZE)) +#define IW_F2I16_IMM16_SHIFTED_MASK (IW_F2I16_IMM16_UNSHIFTED_MASK << IW_F2I16_IMM16_LSB) +#define GET_IW_F2I16_IMM16(W) (((W) >> IW_F2I16_IMM16_LSB) & IW_F2I16_IMM16_UNSHIFTED_MASK) +#define SET_IW_F2I16_IMM16(V) (((V) & IW_F2I16_IMM16_UNSHIFTED_MASK) << IW_F2I16_IMM16_LSB) + +/* Common to all three I12-group formats F2X4I12, F1X4I12, F1X4L17. */ +#define IW_I12_X_LSB 28 +#define IW_I12_X_SIZE 4 +#define IW_I12_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I12_X_SIZE)) +#define IW_I12_X_SHIFTED_MASK (IW_I12_X_UNSHIFTED_MASK << IW_I12_X_LSB) +#define GET_IW_I12_X(W) (((W) >> IW_I12_X_LSB) & IW_I12_X_UNSHIFTED_MASK) +#define SET_IW_I12_X(V) (((V) & IW_I12_X_UNSHIFTED_MASK) << IW_I12_X_LSB) + +#define IW_F2X4I12_A_LSB 6 +#define IW_F2X4I12_A_SIZE 5 +#define IW_F2X4I12_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X4I12_A_SIZE)) +#define IW_F2X4I12_A_SHIFTED_MASK (IW_F2X4I12_A_UNSHIFTED_MASK << IW_F2X4I12_A_LSB) +#define GET_IW_F2X4I12_A(W) (((W) >> IW_F2X4I12_A_LSB) & IW_F2X4I12_A_UNSHIFTED_MASK) +#define SET_IW_F2X4I12_A(V) (((V) & IW_F2X4I12_A_UNSHIFTED_MASK) << IW_F2X4I12_A_LSB) + +#define IW_F2X4I12_B_LSB 11 +#define IW_F2X4I12_B_SIZE 5 +#define IW_F2X4I12_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X4I12_B_SIZE)) +#define IW_F2X4I12_B_SHIFTED_MASK (IW_F2X4I12_B_UNSHIFTED_MASK << IW_F2X4I12_B_LSB) +#define GET_IW_F2X4I12_B(W) (((W) >> IW_F2X4I12_B_LSB) & IW_F2X4I12_B_UNSHIFTED_MASK) +#define SET_IW_F2X4I12_B(V) (((V) & IW_F2X4I12_B_UNSHIFTED_MASK) << IW_F2X4I12_B_LSB) + +#define IW_F2X4I12_IMM12_LSB 16 +#define IW_F2X4I12_IMM12_SIZE 12 +#define IW_F2X4I12_IMM12_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X4I12_IMM12_SIZE)) +#define IW_F2X4I12_IMM12_SHIFTED_MASK (IW_F2X4I12_IMM12_UNSHIFTED_MASK << IW_F2X4I12_IMM12_LSB) +#define GET_IW_F2X4I12_IMM12(W) (((W) >> IW_F2X4I12_IMM12_LSB) & IW_F2X4I12_IMM12_UNSHIFTED_MASK) +#define SET_IW_F2X4I12_IMM12(V) (((V) & IW_F2X4I12_IMM12_UNSHIFTED_MASK) << IW_F2X4I12_IMM12_LSB) + +#define IW_F1X4I12_A_LSB 6 +#define IW_F1X4I12_A_SIZE 5 +#define IW_F1X4I12_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4I12_A_SIZE)) +#define IW_F1X4I12_A_SHIFTED_MASK (IW_F1X4I12_A_UNSHIFTED_MASK << IW_F1X4I12_A_LSB) +#define GET_IW_F1X4I12_A(W) (((W) >> IW_F1X4I12_A_LSB) & IW_F1X4I12_A_UNSHIFTED_MASK) +#define SET_IW_F1X4I12_A(V) (((V) & IW_F1X4I12_A_UNSHIFTED_MASK) << IW_F1X4I12_A_LSB) + +#define IW_F1X4I12_X_LSB 11 +#define IW_F1X4I12_X_SIZE 5 +#define IW_F1X4I12_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4I12_X_SIZE)) +#define IW_F1X4I12_X_SHIFTED_MASK (IW_F1X4I12_X_UNSHIFTED_MASK << IW_F1X4I12_X_LSB) +#define GET_IW_F1X4I12_X(W) (((W) >> IW_F1X4I12_X_LSB) & IW_F1X4I12_X_UNSHIFTED_MASK) +#define SET_IW_F1X4I12_X(V) (((V) & IW_F1X4I12_X_UNSHIFTED_MASK) << IW_F1X4I12_X_LSB) + +#define IW_F1X4I12_IMM12_LSB 16 +#define IW_F1X4I12_IMM12_SIZE 12 +#define IW_F1X4I12_IMM12_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4I12_IMM12_SIZE)) +#define IW_F1X4I12_IMM12_SHIFTED_MASK (IW_F1X4I12_IMM12_UNSHIFTED_MASK << IW_F1X4I12_IMM12_LSB) +#define GET_IW_F1X4I12_IMM12(W) (((W) >> IW_F1X4I12_IMM12_LSB) & IW_F1X4I12_IMM12_UNSHIFTED_MASK) +#define SET_IW_F1X4I12_IMM12(V) (((V) & IW_F1X4I12_IMM12_UNSHIFTED_MASK) << IW_F1X4I12_IMM12_LSB) + +#define IW_F1X4L17_A_LSB 6 +#define IW_F1X4L17_A_SIZE 5 +#define IW_F1X4L17_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_A_SIZE)) +#define IW_F1X4L17_A_SHIFTED_MASK (IW_F1X4L17_A_UNSHIFTED_MASK << IW_F1X4L17_A_LSB) +#define GET_IW_F1X4L17_A(W) (((W) >> IW_F1X4L17_A_LSB) & IW_F1X4L17_A_UNSHIFTED_MASK) +#define SET_IW_F1X4L17_A(V) (((V) & IW_F1X4L17_A_UNSHIFTED_MASK) << IW_F1X4L17_A_LSB) + +#define IW_F1X4L17_ID_LSB 11 +#define IW_F1X4L17_ID_SIZE 1 +#define IW_F1X4L17_ID_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_ID_SIZE)) +#define IW_F1X4L17_ID_SHIFTED_MASK (IW_F1X4L17_ID_UNSHIFTED_MASK << IW_F1X4L17_ID_LSB) +#define GET_IW_F1X4L17_ID(W) (((W) >> IW_F1X4L17_ID_LSB) & IW_F1X4L17_ID_UNSHIFTED_MASK) +#define SET_IW_F1X4L17_ID(V) (((V) & IW_F1X4L17_ID_UNSHIFTED_MASK) << IW_F1X4L17_ID_LSB) + +#define IW_F1X4L17_WB_LSB 12 +#define IW_F1X4L17_WB_SIZE 1 +#define IW_F1X4L17_WB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_WB_SIZE)) +#define IW_F1X4L17_WB_SHIFTED_MASK (IW_F1X4L17_WB_UNSHIFTED_MASK << IW_F1X4L17_WB_LSB) +#define GET_IW_F1X4L17_WB(W) (((W) >> IW_F1X4L17_WB_LSB) & IW_F1X4L17_WB_UNSHIFTED_MASK) +#define SET_IW_F1X4L17_WB(V) (((V) & IW_F1X4L17_WB_UNSHIFTED_MASK) << IW_F1X4L17_WB_LSB) + +#define IW_F1X4L17_RS_LSB 13 +#define IW_F1X4L17_RS_SIZE 1 +#define IW_F1X4L17_RS_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_RS_SIZE)) +#define IW_F1X4L17_RS_SHIFTED_MASK (IW_F1X4L17_RS_UNSHIFTED_MASK << IW_F1X4L17_RS_LSB) +#define GET_IW_F1X4L17_RS(W) (((W) >> IW_F1X4L17_RS_LSB) & IW_F1X4L17_RS_UNSHIFTED_MASK) +#define SET_IW_F1X4L17_RS(V) (((V) & IW_F1X4L17_RS_UNSHIFTED_MASK) << IW_F1X4L17_RS_LSB) + +#define IW_F1X4L17_PC_LSB 14 +#define IW_F1X4L17_PC_SIZE 1 +#define IW_F1X4L17_PC_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_PC_SIZE)) +#define IW_F1X4L17_PC_SHIFTED_MASK (IW_F1X4L17_PC_UNSHIFTED_MASK << IW_F1X4L17_PC_LSB) +#define GET_IW_F1X4L17_PC(W) (((W) >> IW_F1X4L17_PC_LSB) & IW_F1X4L17_PC_UNSHIFTED_MASK) +#define SET_IW_F1X4L17_PC(V) (((V) & IW_F1X4L17_PC_UNSHIFTED_MASK) << IW_F1X4L17_PC_LSB) + +#define IW_F1X4L17_RSV_LSB 15 +#define IW_F1X4L17_RSV_SIZE 1 +#define IW_F1X4L17_RSV_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_RSV_SIZE)) +#define IW_F1X4L17_RSV_SHIFTED_MASK (IW_F1X4L17_RSV_UNSHIFTED_MASK << IW_F1X4L17_RSV_LSB) +#define GET_IW_F1X4L17_RSV(W) (((W) >> IW_F1X4L17_RSV_LSB) & IW_F1X4L17_RSV_UNSHIFTED_MASK) +#define SET_IW_F1X4L17_RSV(V) (((V) & IW_F1X4L17_RSV_UNSHIFTED_MASK) << IW_F1X4L17_RSV_LSB) + +#define IW_F1X4L17_REGMASK_LSB 16 +#define IW_F1X4L17_REGMASK_SIZE 12 +#define IW_F1X4L17_REGMASK_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_REGMASK_SIZE)) +#define IW_F1X4L17_REGMASK_SHIFTED_MASK (IW_F1X4L17_REGMASK_UNSHIFTED_MASK << IW_F1X4L17_REGMASK_LSB) +#define GET_IW_F1X4L17_REGMASK(W) (((W) >> IW_F1X4L17_REGMASK_LSB) & IW_F1X4L17_REGMASK_UNSHIFTED_MASK) +#define SET_IW_F1X4L17_REGMASK(V) (((V) & IW_F1X4L17_REGMASK_UNSHIFTED_MASK) << IW_F1X4L17_REGMASK_LSB) + +/* Shared by OPX-group formats F3X6L5, F2X6L10, F3X6. */ +#define IW_OPX_X_LSB 26 +#define IW_OPX_X_SIZE 6 +#define IW_OPX_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_OPX_X_SIZE)) +#define IW_OPX_X_SHIFTED_MASK (IW_OPX_X_UNSHIFTED_MASK << IW_OPX_X_LSB) +#define GET_IW_OPX_X(W) (((W) >> IW_OPX_X_LSB) & IW_OPX_X_UNSHIFTED_MASK) +#define SET_IW_OPX_X(V) (((V) & IW_OPX_X_UNSHIFTED_MASK) << IW_OPX_X_LSB) + +/* F3X6L5 accessors are also used for F3X6 formats. */ +#define IW_F3X6L5_A_LSB 6 +#define IW_F3X6L5_A_SIZE 5 +#define IW_F3X6L5_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X6L5_A_SIZE)) +#define IW_F3X6L5_A_SHIFTED_MASK (IW_F3X6L5_A_UNSHIFTED_MASK << IW_F3X6L5_A_LSB) +#define GET_IW_F3X6L5_A(W) (((W) >> IW_F3X6L5_A_LSB) & IW_F3X6L5_A_UNSHIFTED_MASK) +#define SET_IW_F3X6L5_A(V) (((V) & IW_F3X6L5_A_UNSHIFTED_MASK) << IW_F3X6L5_A_LSB) + +#define IW_F3X6L5_B_LSB 11 +#define IW_F3X6L5_B_SIZE 5 +#define IW_F3X6L5_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X6L5_B_SIZE)) +#define IW_F3X6L5_B_SHIFTED_MASK (IW_F3X6L5_B_UNSHIFTED_MASK << IW_F3X6L5_B_LSB) +#define GET_IW_F3X6L5_B(W) (((W) >> IW_F3X6L5_B_LSB) & IW_F3X6L5_B_UNSHIFTED_MASK) +#define SET_IW_F3X6L5_B(V) (((V) & IW_F3X6L5_B_UNSHIFTED_MASK) << IW_F3X6L5_B_LSB) + +#define IW_F3X6L5_C_LSB 16 +#define IW_F3X6L5_C_SIZE 5 +#define IW_F3X6L5_C_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X6L5_C_SIZE)) +#define IW_F3X6L5_C_SHIFTED_MASK (IW_F3X6L5_C_UNSHIFTED_MASK << IW_F3X6L5_C_LSB) +#define GET_IW_F3X6L5_C(W) (((W) >> IW_F3X6L5_C_LSB) & IW_F3X6L5_C_UNSHIFTED_MASK) +#define SET_IW_F3X6L5_C(V) (((V) & IW_F3X6L5_C_UNSHIFTED_MASK) << IW_F3X6L5_C_LSB) + +#define IW_F3X6L5_IMM5_LSB 21 +#define IW_F3X6L5_IMM5_SIZE 5 +#define IW_F3X6L5_IMM5_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X6L5_IMM5_SIZE)) +#define IW_F3X6L5_IMM5_SHIFTED_MASK (IW_F3X6L5_IMM5_UNSHIFTED_MASK << IW_F3X6L5_IMM5_LSB) +#define GET_IW_F3X6L5_IMM5(W) (((W) >> IW_F3X6L5_IMM5_LSB) & IW_F3X6L5_IMM5_UNSHIFTED_MASK) +#define SET_IW_F3X6L5_IMM5(V) (((V) & IW_F3X6L5_IMM5_UNSHIFTED_MASK) << IW_F3X6L5_IMM5_LSB) + +#define IW_F2X6L10_A_LSB 6 +#define IW_F2X6L10_A_SIZE 5 +#define IW_F2X6L10_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X6L10_A_SIZE)) +#define IW_F2X6L10_A_SHIFTED_MASK (IW_F2X6L10_A_UNSHIFTED_MASK << IW_F2X6L10_A_LSB) +#define GET_IW_F2X6L10_A(W) (((W) >> IW_F2X6L10_A_LSB) & IW_F2X6L10_A_UNSHIFTED_MASK) +#define SET_IW_F2X6L10_A(V) (((V) & IW_F2X6L10_A_UNSHIFTED_MASK) << IW_F2X6L10_A_LSB) + +#define IW_F2X6L10_B_LSB 11 +#define IW_F2X6L10_B_SIZE 5 +#define IW_F2X6L10_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X6L10_B_SIZE)) +#define IW_F2X6L10_B_SHIFTED_MASK (IW_F2X6L10_B_UNSHIFTED_MASK << IW_F2X6L10_B_LSB) +#define GET_IW_F2X6L10_B(W) (((W) >> IW_F2X6L10_B_LSB) & IW_F2X6L10_B_UNSHIFTED_MASK) +#define SET_IW_F2X6L10_B(V) (((V) & IW_F2X6L10_B_UNSHIFTED_MASK) << IW_F2X6L10_B_LSB) + +#define IW_F2X6L10_LSB_LSB 16 +#define IW_F2X6L10_LSB_SIZE 5 +#define IW_F2X6L10_LSB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X6L10_LSB_SIZE)) +#define IW_F2X6L10_LSB_SHIFTED_MASK (IW_F2X6L10_LSB_UNSHIFTED_MASK << IW_F2X6L10_LSB_LSB) +#define GET_IW_F2X6L10_LSB(W) (((W) >> IW_F2X6L10_LSB_LSB) & IW_F2X6L10_LSB_UNSHIFTED_MASK) +#define SET_IW_F2X6L10_LSB(V) (((V) & IW_F2X6L10_LSB_UNSHIFTED_MASK) << IW_F2X6L10_LSB_LSB) + +#define IW_F2X6L10_MSB_LSB 21 +#define IW_F2X6L10_MSB_SIZE 5 +#define IW_F2X6L10_MSB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X6L10_MSB_SIZE)) +#define IW_F2X6L10_MSB_SHIFTED_MASK (IW_F2X6L10_MSB_UNSHIFTED_MASK << IW_F2X6L10_MSB_LSB) +#define GET_IW_F2X6L10_MSB(W) (((W) >> IW_F2X6L10_MSB_LSB) & IW_F2X6L10_MSB_UNSHIFTED_MASK) +#define SET_IW_F2X6L10_MSB(V) (((V) & IW_F2X6L10_MSB_UNSHIFTED_MASK) << IW_F2X6L10_MSB_LSB) + +#define IW_F3X8_A_LSB 6 +#define IW_F3X8_A_SIZE 5 +#define IW_F3X8_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_A_SIZE)) +#define IW_F3X8_A_SHIFTED_MASK (IW_F3X8_A_UNSHIFTED_MASK << IW_F3X8_A_LSB) +#define GET_IW_F3X8_A(W) (((W) >> IW_F3X8_A_LSB) & IW_F3X8_A_UNSHIFTED_MASK) +#define SET_IW_F3X8_A(V) (((V) & IW_F3X8_A_UNSHIFTED_MASK) << IW_F3X8_A_LSB) + +#define IW_F3X8_B_LSB 11 +#define IW_F3X8_B_SIZE 5 +#define IW_F3X8_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_B_SIZE)) +#define IW_F3X8_B_SHIFTED_MASK (IW_F3X8_B_UNSHIFTED_MASK << IW_F3X8_B_LSB) +#define GET_IW_F3X8_B(W) (((W) >> IW_F3X8_B_LSB) & IW_F3X8_B_UNSHIFTED_MASK) +#define SET_IW_F3X8_B(V) (((V) & IW_F3X8_B_UNSHIFTED_MASK) << IW_F3X8_B_LSB) + +#define IW_F3X8_C_LSB 16 +#define IW_F3X8_C_SIZE 5 +#define IW_F3X8_C_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_C_SIZE)) +#define IW_F3X8_C_SHIFTED_MASK (IW_F3X8_C_UNSHIFTED_MASK << IW_F3X8_C_LSB) +#define GET_IW_F3X8_C(W) (((W) >> IW_F3X8_C_LSB) & IW_F3X8_C_UNSHIFTED_MASK) +#define SET_IW_F3X8_C(V) (((V) & IW_F3X8_C_UNSHIFTED_MASK) << IW_F3X8_C_LSB) + +#define IW_F3X8_READA_LSB 21 +#define IW_F3X8_READA_SIZE 1 +#define IW_F3X8_READA_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_READA_SIZE)) +#define IW_F3X8_READA_SHIFTED_MASK (IW_F3X8_READA_UNSHIFTED_MASK << IW_F3X8_READA_LSB) +#define GET_IW_F3X8_READA(W) (((W) >> IW_F3X8_READA_LSB) & IW_F3X8_READA_UNSHIFTED_MASK) +#define SET_IW_F3X8_READA(V) (((V) & IW_F3X8_READA_UNSHIFTED_MASK) << IW_F3X8_READA_LSB) + +#define IW_F3X8_READB_LSB 22 +#define IW_F3X8_READB_SIZE 1 +#define IW_F3X8_READB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_READB_SIZE)) +#define IW_F3X8_READB_SHIFTED_MASK (IW_F3X8_READB_UNSHIFTED_MASK << IW_F3X8_READB_LSB) +#define GET_IW_F3X8_READB(W) (((W) >> IW_F3X8_READB_LSB) & IW_F3X8_READB_UNSHIFTED_MASK) +#define SET_IW_F3X8_READB(V) (((V) & IW_F3X8_READB_UNSHIFTED_MASK) << IW_F3X8_READB_LSB) + +#define IW_F3X8_READC_LSB 23 +#define IW_F3X8_READC_SIZE 1 +#define IW_F3X8_READC_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_READC_SIZE)) +#define IW_F3X8_READC_SHIFTED_MASK (IW_F3X8_READC_UNSHIFTED_MASK << IW_F3X8_READC_LSB) +#define GET_IW_F3X8_READC(W) (((W) >> IW_F3X8_READC_LSB) & IW_F3X8_READC_UNSHIFTED_MASK) +#define SET_IW_F3X8_READC(V) (((V) & IW_F3X8_READC_UNSHIFTED_MASK) << IW_F3X8_READC_LSB) + +#define IW_F3X8_N_LSB 24 +#define IW_F3X8_N_SIZE 8 +#define IW_F3X8_N_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_N_SIZE)) +#define IW_F3X8_N_SHIFTED_MASK (IW_F3X8_N_UNSHIFTED_MASK << IW_F3X8_N_LSB) +#define GET_IW_F3X8_N(W) (((W) >> IW_F3X8_N_LSB) & IW_F3X8_N_UNSHIFTED_MASK) +#define SET_IW_F3X8_N(V) (((V) & IW_F3X8_N_UNSHIFTED_MASK) << IW_F3X8_N_LSB) + +/* 16-bit R2 fields. */ + +#define IW_I10_IMM10_LSB 6 +#define IW_I10_IMM10_SIZE 10 +#define IW_I10_IMM10_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I10_IMM10_SIZE)) +#define IW_I10_IMM10_SHIFTED_MASK (IW_I10_IMM10_UNSHIFTED_MASK << IW_I10_IMM10_LSB) +#define GET_IW_I10_IMM10(W) (((W) >> IW_I10_IMM10_LSB) & IW_I10_IMM10_UNSHIFTED_MASK) +#define SET_IW_I10_IMM10(V) (((V) & IW_I10_IMM10_UNSHIFTED_MASK) << IW_I10_IMM10_LSB) + +#define IW_T1I7_A3_LSB 6 +#define IW_T1I7_A3_SIZE 3 +#define IW_T1I7_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1I7_A3_SIZE)) +#define IW_T1I7_A3_SHIFTED_MASK (IW_T1I7_A3_UNSHIFTED_MASK << IW_T1I7_A3_LSB) +#define GET_IW_T1I7_A3(W) (((W) >> IW_T1I7_A3_LSB) & IW_T1I7_A3_UNSHIFTED_MASK) +#define SET_IW_T1I7_A3(V) (((V) & IW_T1I7_A3_UNSHIFTED_MASK) << IW_T1I7_A3_LSB) + +#define IW_T1I7_IMM7_LSB 9 +#define IW_T1I7_IMM7_SIZE 7 +#define IW_T1I7_IMM7_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1I7_IMM7_SIZE)) +#define IW_T1I7_IMM7_SHIFTED_MASK (IW_T1I7_IMM7_UNSHIFTED_MASK << IW_T1I7_IMM7_LSB) +#define GET_IW_T1I7_IMM7(W) (((W) >> IW_T1I7_IMM7_LSB) & IW_T1I7_IMM7_UNSHIFTED_MASK) +#define SET_IW_T1I7_IMM7(V) (((V) & IW_T1I7_IMM7_UNSHIFTED_MASK) << IW_T1I7_IMM7_LSB) + +#define IW_T2I4_A3_LSB 6 +#define IW_T2I4_A3_SIZE 3 +#define IW_T2I4_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2I4_A3_SIZE)) +#define IW_T2I4_A3_SHIFTED_MASK (IW_T2I4_A3_UNSHIFTED_MASK << IW_T2I4_A3_LSB) +#define GET_IW_T2I4_A3(W) (((W) >> IW_T2I4_A3_LSB) & IW_T2I4_A3_UNSHIFTED_MASK) +#define SET_IW_T2I4_A3(V) (((V) & IW_T2I4_A3_UNSHIFTED_MASK) << IW_T2I4_A3_LSB) + +#define IW_T2I4_B3_LSB 9 +#define IW_T2I4_B3_SIZE 3 +#define IW_T2I4_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2I4_B3_SIZE)) +#define IW_T2I4_B3_SHIFTED_MASK (IW_T2I4_B3_UNSHIFTED_MASK << IW_T2I4_B3_LSB) +#define GET_IW_T2I4_B3(W) (((W) >> IW_T2I4_B3_LSB) & IW_T2I4_B3_UNSHIFTED_MASK) +#define SET_IW_T2I4_B3(V) (((V) & IW_T2I4_B3_UNSHIFTED_MASK) << IW_T2I4_B3_LSB) + +#define IW_T2I4_IMM4_LSB 12 +#define IW_T2I4_IMM4_SIZE 4 +#define IW_T2I4_IMM4_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2I4_IMM4_SIZE)) +#define IW_T2I4_IMM4_SHIFTED_MASK (IW_T2I4_IMM4_UNSHIFTED_MASK << IW_T2I4_IMM4_LSB) +#define GET_IW_T2I4_IMM4(W) (((W) >> IW_T2I4_IMM4_LSB) & IW_T2I4_IMM4_UNSHIFTED_MASK) +#define SET_IW_T2I4_IMM4(V) (((V) & IW_T2I4_IMM4_UNSHIFTED_MASK) << IW_T2I4_IMM4_LSB) + +#define IW_T1X1I6_A3_LSB 6 +#define IW_T1X1I6_A3_SIZE 3 +#define IW_T1X1I6_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1X1I6_A3_SIZE)) +#define IW_T1X1I6_A3_SHIFTED_MASK (IW_T1X1I6_A3_UNSHIFTED_MASK << IW_T1X1I6_A3_LSB) +#define GET_IW_T1X1I6_A3(W) (((W) >> IW_T1X1I6_A3_LSB) & IW_T1X1I6_A3_UNSHIFTED_MASK) +#define SET_IW_T1X1I6_A3(V) (((V) & IW_T1X1I6_A3_UNSHIFTED_MASK) << IW_T1X1I6_A3_LSB) + +#define IW_T1X1I6_IMM6_LSB 9 +#define IW_T1X1I6_IMM6_SIZE 6 +#define IW_T1X1I6_IMM6_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1X1I6_IMM6_SIZE)) +#define IW_T1X1I6_IMM6_SHIFTED_MASK (IW_T1X1I6_IMM6_UNSHIFTED_MASK << IW_T1X1I6_IMM6_LSB) +#define GET_IW_T1X1I6_IMM6(W) (((W) >> IW_T1X1I6_IMM6_LSB) & IW_T1X1I6_IMM6_UNSHIFTED_MASK) +#define SET_IW_T1X1I6_IMM6(V) (((V) & IW_T1X1I6_IMM6_UNSHIFTED_MASK) << IW_T1X1I6_IMM6_LSB) + +#define IW_T1X1I6_X_LSB 15 +#define IW_T1X1I6_X_SIZE 1 +#define IW_T1X1I6_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1X1I6_X_SIZE)) +#define IW_T1X1I6_X_SHIFTED_MASK (IW_T1X1I6_X_UNSHIFTED_MASK << IW_T1X1I6_X_LSB) +#define GET_IW_T1X1I6_X(W) (((W) >> IW_T1X1I6_X_LSB) & IW_T1X1I6_X_UNSHIFTED_MASK) +#define SET_IW_T1X1I6_X(V) (((V) & IW_T1X1I6_X_UNSHIFTED_MASK) << IW_T1X1I6_X_LSB) + +#define IW_X1I7_IMM7_LSB 6 +#define IW_X1I7_IMM7_SIZE 7 +#define IW_X1I7_IMM7_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X1I7_IMM7_SIZE)) +#define IW_X1I7_IMM7_SHIFTED_MASK (IW_X1I7_IMM7_UNSHIFTED_MASK << IW_X1I7_IMM7_LSB) +#define GET_IW_X1I7_IMM7(W) (((W) >> IW_X1I7_IMM7_LSB) & IW_X1I7_IMM7_UNSHIFTED_MASK) +#define SET_IW_X1I7_IMM7(V) (((V) & IW_X1I7_IMM7_UNSHIFTED_MASK) << IW_X1I7_IMM7_LSB) + +#define IW_X1I7_RSV_LSB 13 +#define IW_X1I7_RSV_SIZE 2 +#define IW_X1I7_RSV_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X1I7_RSV_SIZE)) +#define IW_X1I7_RSV_SHIFTED_MASK (IW_X1I7_RSV_UNSHIFTED_MASK << IW_X1I7_RSV_LSB) +#define GET_IW_X1I7_RSV(W) (((W) >> IW_X1I7_RSV_LSB) & IW_X1I7_RSV_UNSHIFTED_MASK) +#define SET_IW_X1I7_RSV(V) (((V) & IW_X1I7_RSV_UNSHIFTED_MASK) << IW_X1I7_RSV_LSB) + +#define IW_X1I7_X_LSB 15 +#define IW_X1I7_X_SIZE 1 +#define IW_X1I7_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X1I7_X_SIZE)) +#define IW_X1I7_X_SHIFTED_MASK (IW_X1I7_X_UNSHIFTED_MASK << IW_X1I7_X_LSB) +#define GET_IW_X1I7_X(W) (((W) >> IW_X1I7_X_LSB) & IW_X1I7_X_UNSHIFTED_MASK) +#define SET_IW_X1I7_X(V) (((V) & IW_X1I7_X_UNSHIFTED_MASK) << IW_X1I7_X_LSB) + +#define IW_L5I4X1_IMM4_LSB 6 +#define IW_L5I4X1_IMM4_SIZE 4 +#define IW_L5I4X1_IMM4_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_IMM4_SIZE)) +#define IW_L5I4X1_IMM4_SHIFTED_MASK (IW_L5I4X1_IMM4_UNSHIFTED_MASK << IW_L5I4X1_IMM4_LSB) +#define GET_IW_L5I4X1_IMM4(W) (((W) >> IW_L5I4X1_IMM4_LSB) & IW_L5I4X1_IMM4_UNSHIFTED_MASK) +#define SET_IW_L5I4X1_IMM4(V) (((V) & IW_L5I4X1_IMM4_UNSHIFTED_MASK) << IW_L5I4X1_IMM4_LSB) + +#define IW_L5I4X1_REGRANGE_LSB 10 +#define IW_L5I4X1_REGRANGE_SIZE 3 +#define IW_L5I4X1_REGRANGE_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_REGRANGE_SIZE)) +#define IW_L5I4X1_REGRANGE_SHIFTED_MASK (IW_L5I4X1_REGRANGE_UNSHIFTED_MASK << IW_L5I4X1_REGRANGE_LSB) +#define GET_IW_L5I4X1_REGRANGE(W) (((W) >> IW_L5I4X1_REGRANGE_LSB) & IW_L5I4X1_REGRANGE_UNSHIFTED_MASK) +#define SET_IW_L5I4X1_REGRANGE(V) (((V) & IW_L5I4X1_REGRANGE_UNSHIFTED_MASK) << IW_L5I4X1_REGRANGE_LSB) + +#define IW_L5I4X1_FP_LSB 13 +#define IW_L5I4X1_FP_SIZE 1 +#define IW_L5I4X1_FP_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_FP_SIZE)) +#define IW_L5I4X1_FP_SHIFTED_MASK (IW_L5I4X1_FP_UNSHIFTED_MASK << IW_L5I4X1_FP_LSB) +#define GET_IW_L5I4X1_FP(W) (((W) >> IW_L5I4X1_FP_LSB) & IW_L5I4X1_FP_UNSHIFTED_MASK) +#define SET_IW_L5I4X1_FP(V) (((V) & IW_L5I4X1_FP_UNSHIFTED_MASK) << IW_L5I4X1_FP_LSB) + +#define IW_L5I4X1_CS_LSB 14 +#define IW_L5I4X1_CS_SIZE 1 +#define IW_L5I4X1_CS_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_CS_SIZE)) +#define IW_L5I4X1_CS_SHIFTED_MASK (IW_L5I4X1_CS_UNSHIFTED_MASK << IW_L5I4X1_CS_LSB) +#define GET_IW_L5I4X1_CS(W) (((W) >> IW_L5I4X1_CS_LSB) & IW_L5I4X1_CS_UNSHIFTED_MASK) +#define SET_IW_L5I4X1_CS(V) (((V) & IW_L5I4X1_CS_UNSHIFTED_MASK) << IW_L5I4X1_CS_LSB) + +#define IW_L5I4X1_X_LSB 15 +#define IW_L5I4X1_X_SIZE 1 +#define IW_L5I4X1_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_X_SIZE)) +#define IW_L5I4X1_X_SHIFTED_MASK (IW_L5I4X1_X_UNSHIFTED_MASK << IW_L5I4X1_X_LSB) +#define GET_IW_L5I4X1_X(W) (((W) >> IW_L5I4X1_X_LSB) & IW_L5I4X1_X_UNSHIFTED_MASK) +#define SET_IW_L5I4X1_X(V) (((V) & IW_L5I4X1_X_UNSHIFTED_MASK) << IW_L5I4X1_X_LSB) + +#define IW_T2X1L3_A3_LSB 6 +#define IW_T2X1L3_A3_SIZE 3 +#define IW_T2X1L3_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1L3_A3_SIZE)) +#define IW_T2X1L3_A3_SHIFTED_MASK (IW_T2X1L3_A3_UNSHIFTED_MASK << IW_T2X1L3_A3_LSB) +#define GET_IW_T2X1L3_A3(W) (((W) >> IW_T2X1L3_A3_LSB) & IW_T2X1L3_A3_UNSHIFTED_MASK) +#define SET_IW_T2X1L3_A3(V) (((V) & IW_T2X1L3_A3_UNSHIFTED_MASK) << IW_T2X1L3_A3_LSB) + +#define IW_T2X1L3_B3_LSB 9 +#define IW_T2X1L3_B3_SIZE 3 +#define IW_T2X1L3_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1L3_B3_SIZE)) +#define IW_T2X1L3_B3_SHIFTED_MASK (IW_T2X1L3_B3_UNSHIFTED_MASK << IW_T2X1L3_B3_LSB) +#define GET_IW_T2X1L3_B3(W) (((W) >> IW_T2X1L3_B3_LSB) & IW_T2X1L3_B3_UNSHIFTED_MASK) +#define SET_IW_T2X1L3_B3(V) (((V) & IW_T2X1L3_B3_UNSHIFTED_MASK) << IW_T2X1L3_B3_LSB) + +#define IW_T2X1L3_SHAMT_LSB 12 +#define IW_T2X1L3_SHAMT_SIZE 3 +#define IW_T2X1L3_SHAMT_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1L3_SHAMT_SIZE)) +#define IW_T2X1L3_SHAMT_SHIFTED_MASK (IW_T2X1L3_SHAMT_UNSHIFTED_MASK << IW_T2X1L3_SHAMT_LSB) +#define GET_IW_T2X1L3_SHAMT(W) (((W) >> IW_T2X1L3_SHAMT_LSB) & IW_T2X1L3_SHAMT_UNSHIFTED_MASK) +#define SET_IW_T2X1L3_SHAMT(V) (((V) & IW_T2X1L3_SHAMT_UNSHIFTED_MASK) << IW_T2X1L3_SHAMT_LSB) + +#define IW_T2X1L3_X_LSB 15 +#define IW_T2X1L3_X_SIZE 1 +#define IW_T2X1L3_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1L3_X_SIZE)) +#define IW_T2X1L3_X_SHIFTED_MASK (IW_T2X1L3_X_UNSHIFTED_MASK << IW_T2X1L3_X_LSB) +#define GET_IW_T2X1L3_X(W) (((W) >> IW_T2X1L3_X_LSB) & IW_T2X1L3_X_UNSHIFTED_MASK) +#define SET_IW_T2X1L3_X(V) (((V) & IW_T2X1L3_X_UNSHIFTED_MASK) << IW_T2X1L3_X_LSB) + +#define IW_T2X1I3_A3_LSB 6 +#define IW_T2X1I3_A3_SIZE 3 +#define IW_T2X1I3_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1I3_A3_SIZE)) +#define IW_T2X1I3_A3_SHIFTED_MASK (IW_T2X1I3_A3_UNSHIFTED_MASK << IW_T2X1I3_A3_LSB) +#define GET_IW_T2X1I3_A3(W) (((W) >> IW_T2X1I3_A3_LSB) & IW_T2X1I3_A3_UNSHIFTED_MASK) +#define SET_IW_T2X1I3_A3(V) (((V) & IW_T2X1I3_A3_UNSHIFTED_MASK) << IW_T2X1I3_A3_LSB) + +#define IW_T2X1I3_B3_LSB 9 +#define IW_T2X1I3_B3_SIZE 3 +#define IW_T2X1I3_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1I3_B3_SIZE)) +#define IW_T2X1I3_B3_SHIFTED_MASK (IW_T2X1I3_B3_UNSHIFTED_MASK << IW_T2X1I3_B3_LSB) +#define GET_IW_T2X1I3_B3(W) (((W) >> IW_T2X1I3_B3_LSB) & IW_T2X1I3_B3_UNSHIFTED_MASK) +#define SET_IW_T2X1I3_B3(V) (((V) & IW_T2X1I3_B3_UNSHIFTED_MASK) << IW_T2X1I3_B3_LSB) + +#define IW_T2X1I3_IMM3_LSB 12 +#define IW_T2X1I3_IMM3_SIZE 3 +#define IW_T2X1I3_IMM3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1I3_IMM3_SIZE)) +#define IW_T2X1I3_IMM3_SHIFTED_MASK (IW_T2X1I3_IMM3_UNSHIFTED_MASK << IW_T2X1I3_IMM3_LSB) +#define GET_IW_T2X1I3_IMM3(W) (((W) >> IW_T2X1I3_IMM3_LSB) & IW_T2X1I3_IMM3_UNSHIFTED_MASK) +#define SET_IW_T2X1I3_IMM3(V) (((V) & IW_T2X1I3_IMM3_UNSHIFTED_MASK) << IW_T2X1I3_IMM3_LSB) + +#define IW_T2X1I3_X_LSB 15 +#define IW_T2X1I3_X_SIZE 1 +#define IW_T2X1I3_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1I3_X_SIZE)) +#define IW_T2X1I3_X_SHIFTED_MASK (IW_T2X1I3_X_UNSHIFTED_MASK << IW_T2X1I3_X_LSB) +#define GET_IW_T2X1I3_X(W) (((W) >> IW_T2X1I3_X_LSB) & IW_T2X1I3_X_UNSHIFTED_MASK) +#define SET_IW_T2X1I3_X(V) (((V) & IW_T2X1I3_X_UNSHIFTED_MASK) << IW_T2X1I3_X_LSB) + +#define IW_T3X1_A3_LSB 6 +#define IW_T3X1_A3_SIZE 3 +#define IW_T3X1_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T3X1_A3_SIZE)) +#define IW_T3X1_A3_SHIFTED_MASK (IW_T3X1_A3_UNSHIFTED_MASK << IW_T3X1_A3_LSB) +#define GET_IW_T3X1_A3(W) (((W) >> IW_T3X1_A3_LSB) & IW_T3X1_A3_UNSHIFTED_MASK) +#define SET_IW_T3X1_A3(V) (((V) & IW_T3X1_A3_UNSHIFTED_MASK) << IW_T3X1_A3_LSB) + +#define IW_T3X1_B3_LSB 9 +#define IW_T3X1_B3_SIZE 3 +#define IW_T3X1_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T3X1_B3_SIZE)) +#define IW_T3X1_B3_SHIFTED_MASK (IW_T3X1_B3_UNSHIFTED_MASK << IW_T3X1_B3_LSB) +#define GET_IW_T3X1_B3(W) (((W) >> IW_T3X1_B3_LSB) & IW_T3X1_B3_UNSHIFTED_MASK) +#define SET_IW_T3X1_B3(V) (((V) & IW_T3X1_B3_UNSHIFTED_MASK) << IW_T3X1_B3_LSB) + +#define IW_T3X1_C3_LSB 12 +#define IW_T3X1_C3_SIZE 3 +#define IW_T3X1_C3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T3X1_C3_SIZE)) +#define IW_T3X1_C3_SHIFTED_MASK (IW_T3X1_C3_UNSHIFTED_MASK << IW_T3X1_C3_LSB) +#define GET_IW_T3X1_C3(W) (((W) >> IW_T3X1_C3_LSB) & IW_T3X1_C3_UNSHIFTED_MASK) +#define SET_IW_T3X1_C3(V) (((V) & IW_T3X1_C3_UNSHIFTED_MASK) << IW_T3X1_C3_LSB) + +#define IW_T3X1_X_LSB 15 +#define IW_T3X1_X_SIZE 1 +#define IW_T3X1_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T3X1_X_SIZE)) +#define IW_T3X1_X_SHIFTED_MASK (IW_T3X1_X_UNSHIFTED_MASK << IW_T3X1_X_LSB) +#define GET_IW_T3X1_X(W) (((W) >> IW_T3X1_X_LSB) & IW_T3X1_X_UNSHIFTED_MASK) +#define SET_IW_T3X1_X(V) (((V) & IW_T3X1_X_UNSHIFTED_MASK) << IW_T3X1_X_LSB) + +/* The X field for all three R.N-class instruction formats is represented + here as 4 bits, including the bits defined as constant 0 or 1 that + determine which of the formats T2X3, F1X1, or X2L5 it is. */ +#define IW_R_N_X_LSB 12 +#define IW_R_N_X_SIZE 4 +#define IW_R_N_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_N_X_SIZE)) +#define IW_R_N_X_SHIFTED_MASK (IW_R_N_X_UNSHIFTED_MASK << IW_R_N_X_LSB) +#define GET_IW_R_N_X(W) (((W) >> IW_R_N_X_LSB) & IW_R_N_X_UNSHIFTED_MASK) +#define SET_IW_R_N_X(V) (((V) & IW_R_N_X_UNSHIFTED_MASK) << IW_R_N_X_LSB) + +#define IW_T2X3_A3_LSB 6 +#define IW_T2X3_A3_SIZE 3 +#define IW_T2X3_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X3_A3_SIZE)) +#define IW_T2X3_A3_SHIFTED_MASK (IW_T2X3_A3_UNSHIFTED_MASK << IW_T2X3_A3_LSB) +#define GET_IW_T2X3_A3(W) (((W) >> IW_T2X3_A3_LSB) & IW_T2X3_A3_UNSHIFTED_MASK) +#define SET_IW_T2X3_A3(V) (((V) & IW_T2X3_A3_UNSHIFTED_MASK) << IW_T2X3_A3_LSB) + +#define IW_T2X3_B3_LSB 9 +#define IW_T2X3_B3_SIZE 3 +#define IW_T2X3_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X3_B3_SIZE)) +#define IW_T2X3_B3_SHIFTED_MASK (IW_T2X3_B3_UNSHIFTED_MASK << IW_T2X3_B3_LSB) +#define GET_IW_T2X3_B3(W) (((W) >> IW_T2X3_B3_LSB) & IW_T2X3_B3_UNSHIFTED_MASK) +#define SET_IW_T2X3_B3(V) (((V) & IW_T2X3_B3_UNSHIFTED_MASK) << IW_T2X3_B3_LSB) + +#define IW_F1X1_A_LSB 6 +#define IW_F1X1_A_SIZE 5 +#define IW_F1X1_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X1_A_SIZE)) +#define IW_F1X1_A_SHIFTED_MASK (IW_F1X1_A_UNSHIFTED_MASK << IW_F1X1_A_LSB) +#define GET_IW_F1X1_A(W) (((W) >> IW_F1X1_A_LSB) & IW_F1X1_A_UNSHIFTED_MASK) +#define SET_IW_F1X1_A(V) (((V) & IW_F1X1_A_UNSHIFTED_MASK) << IW_F1X1_A_LSB) + +#define IW_F1X1_RSV_LSB 11 +#define IW_F1X1_RSV_SIZE 1 +#define IW_F1X1_RSV_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X1_RSV_SIZE)) +#define IW_F1X1_RSV_SHIFTED_MASK (IW_F1X1_RSV_UNSHIFTED_MASK << IW_F1X1_RSV_LSB) +#define GET_IW_F1X1_RSV(W) (((W) >> IW_F1X1_RSV_LSB) & IW_F1X1_RSV_UNSHIFTED_MASK) +#define SET_IW_F1X1_RSV(V) (((V) & IW_F1X1_RSV_UNSHIFTED_MASK) << IW_F1X1_RSV_LSB) + +#define IW_X2L5_IMM5_LSB 6 +#define IW_X2L5_IMM5_SIZE 5 +#define IW_X2L5_IMM5_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X2L5_IMM5_SIZE)) +#define IW_X2L5_IMM5_SHIFTED_MASK (IW_X2L5_IMM5_UNSHIFTED_MASK << IW_X2L5_IMM5_LSB) +#define GET_IW_X2L5_IMM5(W) (((W) >> IW_X2L5_IMM5_LSB) & IW_X2L5_IMM5_UNSHIFTED_MASK) +#define SET_IW_X2L5_IMM5(V) (((V) & IW_X2L5_IMM5_UNSHIFTED_MASK) << IW_X2L5_IMM5_LSB) + +#define IW_X2L5_RSV_LSB 11 +#define IW_X2L5_RSV_SIZE 1 +#define IW_X2L5_RSV_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X2L5_RSV_SIZE)) +#define IW_X2L5_RSV_SHIFTED_MASK (IW_X2L5_RSV_UNSHIFTED_MASK << IW_X2L5_RSV_LSB) +#define GET_IW_X2L5_RSV(W) (((W) >> IW_X2L5_RSV_LSB) & IW_X2L5_RSV_UNSHIFTED_MASK) +#define SET_IW_X2L5_RSV(V) (((V) & IW_X2L5_RSV_UNSHIFTED_MASK) << IW_X2L5_RSV_LSB) + +#define IW_F1I5_IMM5_LSB 6 +#define IW_F1I5_IMM5_SIZE 5 +#define IW_F1I5_IMM5_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1I5_IMM5_SIZE)) +#define IW_F1I5_IMM5_SHIFTED_MASK (IW_F1I5_IMM5_UNSHIFTED_MASK << IW_F1I5_IMM5_LSB) +#define GET_IW_F1I5_IMM5(W) (((W) >> IW_F1I5_IMM5_LSB) & IW_F1I5_IMM5_UNSHIFTED_MASK) +#define SET_IW_F1I5_IMM5(V) (((V) & IW_F1I5_IMM5_UNSHIFTED_MASK) << IW_F1I5_IMM5_LSB) + +#define IW_F1I5_B_LSB 11 +#define IW_F1I5_B_SIZE 5 +#define IW_F1I5_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1I5_B_SIZE)) +#define IW_F1I5_B_SHIFTED_MASK (IW_F1I5_B_UNSHIFTED_MASK << IW_F1I5_B_LSB) +#define GET_IW_F1I5_B(W) (((W) >> IW_F1I5_B_LSB) & IW_F1I5_B_UNSHIFTED_MASK) +#define SET_IW_F1I5_B(V) (((V) & IW_F1I5_B_UNSHIFTED_MASK) << IW_F1I5_B_LSB) + +#define IW_F2_A_LSB 6 +#define IW_F2_A_SIZE 5 +#define IW_F2_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2_A_SIZE)) +#define IW_F2_A_SHIFTED_MASK (IW_F2_A_UNSHIFTED_MASK << IW_F2_A_LSB) +#define GET_IW_F2_A(W) (((W) >> IW_F2_A_LSB) & IW_F2_A_UNSHIFTED_MASK) +#define SET_IW_F2_A(V) (((V) & IW_F2_A_UNSHIFTED_MASK) << IW_F2_A_LSB) + +#define IW_F2_B_LSB 11 +#define IW_F2_B_SIZE 5 +#define IW_F2_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2_B_SIZE)) +#define IW_F2_B_SHIFTED_MASK (IW_F2_B_UNSHIFTED_MASK << IW_F2_B_LSB) +#define GET_IW_F2_B(W) (((W) >> IW_F2_B_LSB) & IW_F2_B_UNSHIFTED_MASK) +#define SET_IW_F2_B(V) (((V) & IW_F2_B_UNSHIFTED_MASK) << IW_F2_B_LSB) + +/* R2 opcodes. */ +#define R2_OP_CALL 0 +#define R2_OP_AS_N 1 +#define R2_OP_BR 2 +#define R2_OP_BR_N 3 +#define R2_OP_ADDI 4 +#define R2_OP_LDBU_N 5 +#define R2_OP_LDBU 6 +#define R2_OP_LDB 7 +#define R2_OP_JMPI 8 +#define R2_OP_R_N 9 +#define R2_OP_ANDI_N 11 +#define R2_OP_ANDI 12 +#define R2_OP_LDHU_N 13 +#define R2_OP_LDHU 14 +#define R2_OP_LDH 15 +#define R2_OP_ASI_N 17 +#define R2_OP_BGE 18 +#define R2_OP_LDWSP_N 19 +#define R2_OP_ORI 20 +#define R2_OP_LDW_N 21 +#define R2_OP_CMPGEI 22 +#define R2_OP_LDW 23 +#define R2_OP_SHI_N 25 +#define R2_OP_BLT 26 +#define R2_OP_MOVI_N 27 +#define R2_OP_XORI 28 +#define R2_OP_STZ_N 29 +#define R2_OP_CMPLTI 30 +#define R2_OP_ANDCI 31 +#define R2_OP_OPX 32 +#define R2_OP_PP_N 33 +#define R2_OP_BNE 34 +#define R2_OP_BNEZ_N 35 +#define R2_OP_MULI 36 +#define R2_OP_STB_N 37 +#define R2_OP_CMPNEI 38 +#define R2_OP_STB 39 +#define R2_OP_I12 40 +#define R2_OP_SPI_N 41 +#define R2_OP_BEQ 42 +#define R2_OP_BEQZ_N 43 +#define R2_OP_ANDHI 44 +#define R2_OP_STH_N 45 +#define R2_OP_CMPEQI 46 +#define R2_OP_STH 47 +#define R2_OP_CUSTOM 48 +#define R2_OP_BGEU 50 +#define R2_OP_STWSP_N 51 +#define R2_OP_ORHI 52 +#define R2_OP_STW_N 53 +#define R2_OP_CMPGEUI 54 +#define R2_OP_STW 55 +#define R2_OP_BLTU 58 +#define R2_OP_MOV_N 59 +#define R2_OP_XORHI 60 +#define R2_OP_SPADDI_N 61 +#define R2_OP_CMPLTUI 62 +#define R2_OP_ANDCHI 63 + +#define R2_OPX_WRPIE 0 +#define R2_OPX_ERET 1 +#define R2_OPX_ROLI 2 +#define R2_OPX_ROL 3 +#define R2_OPX_FLUSHP 4 +#define R2_OPX_RET 5 +#define R2_OPX_NOR 6 +#define R2_OPX_MULXUU 7 +#define R2_OPX_ENI 8 +#define R2_OPX_BRET 9 +#define R2_OPX_ROR 11 +#define R2_OPX_FLUSHI 12 +#define R2_OPX_JMP 13 +#define R2_OPX_AND 14 +#define R2_OPX_CMPGE 16 +#define R2_OPX_SLLI 18 +#define R2_OPX_SLL 19 +#define R2_OPX_WRPRS 20 +#define R2_OPX_OR 22 +#define R2_OPX_MULXSU 23 +#define R2_OPX_CMPLT 24 +#define R2_OPX_SRLI 26 +#define R2_OPX_SRL 27 +#define R2_OPX_NEXTPC 28 +#define R2_OPX_CALLR 29 +#define R2_OPX_XOR 30 +#define R2_OPX_MULXSS 31 +#define R2_OPX_CMPNE 32 +#define R2_OPX_INSERT 35 +#define R2_OPX_DIVU 36 +#define R2_OPX_DIV 37 +#define R2_OPX_RDCTL 38 +#define R2_OPX_MUL 39 +#define R2_OPX_CMPEQ 40 +#define R2_OPX_INITI 41 +#define R2_OPX_MERGE 43 +#define R2_OPX_HBREAK 44 +#define R2_OPX_TRAP 45 +#define R2_OPX_WRCTL 46 +#define R2_OPX_CMPGEU 48 +#define R2_OPX_ADD 49 +#define R2_OPX_EXTRACT 51 +#define R2_OPX_BREAK 52 +#define R2_OPX_LDEX 53 +#define R2_OPX_SYNC 54 +#define R2_OPX_LDSEX 55 +#define R2_OPX_CMPLTU 56 +#define R2_OPX_SUB 57 +#define R2_OPX_SRAI 58 +#define R2_OPX_SRA 59 +#define R2_OPX_STEX 61 +#define R2_OPX_STSEX 63 + +#define R2_I12_LDBIO 0 +#define R2_I12_STBIO 1 +#define R2_I12_LDBUIO 2 +#define R2_I12_DCACHE 3 +#define R2_I12_LDHIO 4 +#define R2_I12_STHIO 5 +#define R2_I12_LDHUIO 6 +#define R2_I12_RDPRS 7 +#define R2_I12_LDWIO 8 +#define R2_I12_STWIO 9 +#define R2_I12_LDWM 12 +#define R2_I12_STWM 13 + +#define R2_DCACHE_INITD 0 +#define R2_DCACHE_INITDA 1 +#define R2_DCACHE_FLUSHD 2 +#define R2_DCACHE_FLUSHDA 3 + +#define R2_AS_N_ADD_N 0 +#define R2_AS_N_SUB_N 1 + +#define R2_R_N_AND_N 0 +#define R2_R_N_OR_N 2 +#define R2_R_N_XOR_N 3 +#define R2_R_N_SLL_N 4 +#define R2_R_N_SRL_N 5 +#define R2_R_N_NOT_N 6 +#define R2_R_N_NEG_N 7 +#define R2_R_N_CALLR_N 8 +#define R2_R_N_JMPR_N 10 +#define R2_R_N_BREAK_N 12 +#define R2_R_N_TRAP_N 13 +#define R2_R_N_RET_N 14 + +#define R2_SPI_N_SPINCI_N 0 +#define R2_SPI_N_SPDECI_N 1 + +#define R2_ASI_N_ADDI_N 0 +#define R2_ASI_N_SUBI_N 1 + +#define R2_SHI_N_SLLI_N 0 +#define R2_SHI_N_SRLI_N 1 + +#define R2_PP_N_POP_N 0 +#define R2_PP_N_PUSH_N 1 + +#define R2_STZ_N_STWZ_N 0 +#define R2_STZ_N_STBZ_N 1 + +/* Convenience macros for R2 encodings. */ + +#define MATCH_R2_OP(NAME) \ + (SET_IW_R2_OP (R2_OP_##NAME)) +#define MASK_R2_OP \ + IW_R2_OP_SHIFTED_MASK + +#define MATCH_R2_OPX0(NAME) \ + (SET_IW_R2_OP (R2_OP_OPX) | SET_IW_OPX_X (R2_OPX_##NAME)) +#define MASK_R2_OPX0 \ + (IW_R2_OP_SHIFTED_MASK | IW_OPX_X_SHIFTED_MASK \ + | IW_F3X6L5_IMM5_SHIFTED_MASK) + +#define MATCH_R2_OPX(NAME, A, B, C) \ + (MATCH_R2_OPX0 (NAME) | SET_IW_F3X6L5_A (A) | SET_IW_F3X6L5_B (B) \ + | SET_IW_F3X6L5_C (C)) +#define MASK_R2_OPX(A, B, C, N) \ + (IW_R2_OP_SHIFTED_MASK | IW_OPX_X_SHIFTED_MASK \ + | (A ? IW_F3X6L5_A_SHIFTED_MASK : 0) \ + | (B ? IW_F3X6L5_B_SHIFTED_MASK : 0) \ + | (C ? IW_F3X6L5_C_SHIFTED_MASK : 0) \ + | (N ? IW_F3X6L5_IMM5_SHIFTED_MASK : 0)) + +#define MATCH_R2_I12(NAME) \ + (SET_IW_R2_OP (R2_OP_I12) | SET_IW_I12_X (R2_I12_##NAME)) +#define MASK_R2_I12 \ + (IW_R2_OP_SHIFTED_MASK | IW_I12_X_SHIFTED_MASK ) + +#define MATCH_R2_DCACHE(NAME) \ + (MATCH_R2_I12(DCACHE) | SET_IW_F1X4I12_X (R2_DCACHE_##NAME)) +#define MASK_R2_DCACHE \ + (MASK_R2_I12 | IW_F1X4I12_X_SHIFTED_MASK) + +#define MATCH_R2_R_N(NAME) \ + (SET_IW_R2_OP (R2_OP_R_N) | SET_IW_R_N_X (R2_R_N_##NAME)) +#define MASK_R2_R_N \ + (IW_R2_OP_SHIFTED_MASK | IW_R_N_X_SHIFTED_MASK ) + +/* Match/mask macros for R2 instructions. */ + +#define MATCH_R2_ADD MATCH_R2_OPX0 (ADD) +#define MASK_R2_ADD MASK_R2_OPX0 +#define MATCH_R2_ADDI MATCH_R2_OP (ADDI) +#define MASK_R2_ADDI MASK_R2_OP +#define MATCH_R2_ADD_N (MATCH_R2_OP (AS_N) | SET_IW_T3X1_X (R2_AS_N_ADD_N)) +#define MASK_R2_ADD_N (MASK_R2_OP | IW_T3X1_X_SHIFTED_MASK) +#define MATCH_R2_ADDI_N (MATCH_R2_OP (ASI_N) | SET_IW_T2X1I3_X (R2_ASI_N_ADDI_N)) +#define MASK_R2_ADDI_N (MASK_R2_OP | IW_T2X1I3_X_SHIFTED_MASK) +#define MATCH_R2_AND MATCH_R2_OPX0 (AND) +#define MASK_R2_AND MASK_R2_OPX0 +#define MATCH_R2_ANDCHI MATCH_R2_OP (ANDCHI) +#define MASK_R2_ANDCHI MASK_R2_OP +#define MATCH_R2_ANDCI MATCH_R2_OP (ANDCI) +#define MASK_R2_ANDCI MASK_R2_OP +#define MATCH_R2_ANDHI MATCH_R2_OP (ANDHI) +#define MASK_R2_ANDHI MASK_R2_OP +#define MATCH_R2_ANDI MATCH_R2_OP (ANDI) +#define MASK_R2_ANDI MASK_R2_OP +#define MATCH_R2_ANDI_N MATCH_R2_OP (ANDI_N) +#define MASK_R2_ANDI_N MASK_R2_OP +#define MATCH_R2_AND_N MATCH_R2_R_N (AND_N) +#define MASK_R2_AND_N MASK_R2_R_N +#define MATCH_R2_BEQ MATCH_R2_OP (BEQ) +#define MASK_R2_BEQ MASK_R2_OP +#define MATCH_R2_BEQZ_N MATCH_R2_OP (BEQZ_N) +#define MASK_R2_BEQZ_N MASK_R2_OP +#define MATCH_R2_BGE MATCH_R2_OP (BGE) +#define MASK_R2_BGE MASK_R2_OP +#define MATCH_R2_BGEU MATCH_R2_OP (BGEU) +#define MASK_R2_BGEU MASK_R2_OP +#define MATCH_R2_BGT MATCH_R2_OP (BLT) +#define MASK_R2_BGT MASK_R2_OP +#define MATCH_R2_BGTU MATCH_R2_OP (BLTU) +#define MASK_R2_BGTU MASK_R2_OP +#define MATCH_R2_BLE MATCH_R2_OP (BGE) +#define MASK_R2_BLE MASK_R2_OP +#define MATCH_R2_BLEU MATCH_R2_OP (BGEU) +#define MASK_R2_BLEU MASK_R2_OP +#define MATCH_R2_BLT MATCH_R2_OP (BLT) +#define MASK_R2_BLT MASK_R2_OP +#define MATCH_R2_BLTU MATCH_R2_OP (BLTU) +#define MASK_R2_BLTU MASK_R2_OP +#define MATCH_R2_BNE MATCH_R2_OP (BNE) +#define MASK_R2_BNE MASK_R2_OP +#define MATCH_R2_BNEZ_N MATCH_R2_OP (BNEZ_N) +#define MASK_R2_BNEZ_N MASK_R2_OP +#define MATCH_R2_BR MATCH_R2_OP (BR) +#define MASK_R2_BR MASK_R2_OP | IW_F2I16_A_SHIFTED_MASK | IW_F2I16_B_SHIFTED_MASK +#define MATCH_R2_BREAK MATCH_R2_OPX (BREAK, 0, 0, 0x1e) +#define MASK_R2_BREAK MASK_R2_OPX (1, 1, 1, 0) +#define MATCH_R2_BREAK_N MATCH_R2_R_N (BREAK_N) +#define MASK_R2_BREAK_N MASK_R2_R_N +#define MATCH_R2_BRET MATCH_R2_OPX (BRET, 0x1e, 0, 0) +#define MASK_R2_BRET MASK_R2_OPX (1, 1, 1, 1) +#define MATCH_R2_BR_N MATCH_R2_OP (BR_N) +#define MASK_R2_BR_N MASK_R2_OP +#define MATCH_R2_CALL MATCH_R2_OP (CALL) +#define MASK_R2_CALL MASK_R2_OP +#define MATCH_R2_CALLR MATCH_R2_OPX (CALLR, 0, 0, 0x1f) +#define MASK_R2_CALLR MASK_R2_OPX (0, 1, 1, 1) +#define MATCH_R2_CALLR_N MATCH_R2_R_N (CALLR_N) +#define MASK_R2_CALLR_N MASK_R2_R_N +#define MATCH_R2_CMPEQ MATCH_R2_OPX0 (CMPEQ) +#define MASK_R2_CMPEQ MASK_R2_OPX0 +#define MATCH_R2_CMPEQI MATCH_R2_OP (CMPEQI) +#define MASK_R2_CMPEQI MASK_R2_OP +#define MATCH_R2_CMPGE MATCH_R2_OPX0 (CMPGE) +#define MASK_R2_CMPGE MASK_R2_OPX0 +#define MATCH_R2_CMPGEI MATCH_R2_OP (CMPGEI) +#define MASK_R2_CMPGEI MASK_R2_OP +#define MATCH_R2_CMPGEU MATCH_R2_OPX0 (CMPGEU) +#define MASK_R2_CMPGEU MASK_R2_OPX0 +#define MATCH_R2_CMPGEUI MATCH_R2_OP (CMPGEUI) +#define MASK_R2_CMPGEUI MASK_R2_OP +#define MATCH_R2_CMPGT MATCH_R2_OPX0 (CMPLT) +#define MASK_R2_CMPGT MASK_R2_OPX0 +#define MATCH_R2_CMPGTI MATCH_R2_OP (CMPGEI) +#define MASK_R2_CMPGTI MASK_R2_OP +#define MATCH_R2_CMPGTU MATCH_R2_OPX0 (CMPLTU) +#define MASK_R2_CMPGTU MASK_R2_OPX0 +#define MATCH_R2_CMPGTUI MATCH_R2_OP (CMPGEUI) +#define MASK_R2_CMPGTUI MASK_R2_OP +#define MATCH_R2_CMPLE MATCH_R2_OPX0 (CMPGE) +#define MASK_R2_CMPLE MASK_R2_OPX0 +#define MATCH_R2_CMPLEI MATCH_R2_OP (CMPLTI) +#define MASK_R2_CMPLEI MASK_R2_OP +#define MATCH_R2_CMPLEU MATCH_R2_OPX0 (CMPGEU) +#define MASK_R2_CMPLEU MASK_R2_OPX0 +#define MATCH_R2_CMPLEUI MATCH_R2_OP (CMPLTUI) +#define MASK_R2_CMPLEUI MASK_R2_OP +#define MATCH_R2_CMPLT MATCH_R2_OPX0 (CMPLT) +#define MASK_R2_CMPLT MASK_R2_OPX0 +#define MATCH_R2_CMPLTI MATCH_R2_OP (CMPLTI) +#define MASK_R2_CMPLTI MASK_R2_OP +#define MATCH_R2_CMPLTU MATCH_R2_OPX0 (CMPLTU) +#define MASK_R2_CMPLTU MASK_R2_OPX0 +#define MATCH_R2_CMPLTUI MATCH_R2_OP (CMPLTUI) +#define MASK_R2_CMPLTUI MASK_R2_OP +#define MATCH_R2_CMPNE MATCH_R2_OPX0 (CMPNE) +#define MASK_R2_CMPNE MASK_R2_OPX0 +#define MATCH_R2_CMPNEI MATCH_R2_OP (CMPNEI) +#define MASK_R2_CMPNEI MASK_R2_OP +#define MATCH_R2_CUSTOM MATCH_R2_OP (CUSTOM) +#define MASK_R2_CUSTOM MASK_R2_OP +#define MATCH_R2_DIV MATCH_R2_OPX0 (DIV) +#define MASK_R2_DIV MASK_R2_OPX0 +#define MATCH_R2_DIVU MATCH_R2_OPX0 (DIVU) +#define MASK_R2_DIVU MASK_R2_OPX0 +#define MATCH_R2_ENI MATCH_R2_OPX (ENI, 0, 0, 0) +#define MASK_R2_ENI MASK_R2_OPX (1, 1, 1, 0) +#define MATCH_R2_ERET MATCH_R2_OPX (ERET, 0x1d, 0x1e, 0) +#define MASK_R2_ERET MASK_R2_OPX (1, 1, 1, 1) +#define MATCH_R2_EXTRACT MATCH_R2_OPX (EXTRACT, 0, 0, 0) +#define MASK_R2_EXTRACT MASK_R2_OPX (0, 0, 0, 0) +#define MATCH_R2_FLUSHD MATCH_R2_DCACHE (FLUSHD) +#define MASK_R2_FLUSHD MASK_R2_DCACHE +#define MATCH_R2_FLUSHDA MATCH_R2_DCACHE (FLUSHDA) +#define MASK_R2_FLUSHDA MASK_R2_DCACHE +#define MATCH_R2_FLUSHI MATCH_R2_OPX (FLUSHI, 0, 0, 0) +#define MASK_R2_FLUSHI MASK_R2_OPX (0, 1, 1, 1) +#define MATCH_R2_FLUSHP MATCH_R2_OPX (FLUSHP, 0, 0, 0) +#define MASK_R2_FLUSHP MASK_R2_OPX (1, 1, 1, 1) +#define MATCH_R2_INITD MATCH_R2_DCACHE (INITD) +#define MASK_R2_INITD MASK_R2_DCACHE +#define MATCH_R2_INITDA MATCH_R2_DCACHE (INITDA) +#define MASK_R2_INITDA MASK_R2_DCACHE +#define MATCH_R2_INITI MATCH_R2_OPX (INITI, 0, 0, 0) +#define MASK_R2_INITI MASK_R2_OPX (0, 1, 1, 1) +#define MATCH_R2_INSERT MATCH_R2_OPX (INSERT, 0, 0, 0) +#define MASK_R2_INSERT MASK_R2_OPX (0, 0, 0, 0) +#define MATCH_R2_JMP MATCH_R2_OPX (JMP, 0, 0, 0) +#define MASK_R2_JMP MASK_R2_OPX (0, 1, 1, 1) +#define MATCH_R2_JMPI MATCH_R2_OP (JMPI) +#define MASK_R2_JMPI MASK_R2_OP +#define MATCH_R2_JMPR_N MATCH_R2_R_N (JMPR_N) +#define MASK_R2_JMPR_N MASK_R2_R_N +#define MATCH_R2_LDB MATCH_R2_OP (LDB) +#define MASK_R2_LDB MASK_R2_OP +#define MATCH_R2_LDBIO MATCH_R2_I12 (LDBIO) +#define MASK_R2_LDBIO MASK_R2_I12 +#define MATCH_R2_LDBU MATCH_R2_OP (LDBU) +#define MASK_R2_LDBU MASK_R2_OP +#define MATCH_R2_LDBUIO MATCH_R2_I12 (LDBUIO) +#define MASK_R2_LDBUIO MASK_R2_I12 +#define MATCH_R2_LDBU_N MATCH_R2_OP (LDBU_N) +#define MASK_R2_LDBU_N MASK_R2_OP +#define MATCH_R2_LDEX MATCH_R2_OPX (LDEX, 0, 0, 0) +#define MASK_R2_LDEX MASK_R2_OPX (0, 1, 0, 1) +#define MATCH_R2_LDH MATCH_R2_OP (LDH) +#define MASK_R2_LDH MASK_R2_OP +#define MATCH_R2_LDHIO MATCH_R2_I12 (LDHIO) +#define MASK_R2_LDHIO MASK_R2_I12 +#define MATCH_R2_LDHU MATCH_R2_OP (LDHU) +#define MASK_R2_LDHU MASK_R2_OP +#define MATCH_R2_LDHUIO MATCH_R2_I12 (LDHUIO) +#define MASK_R2_LDHUIO MASK_R2_I12 +#define MATCH_R2_LDHU_N MATCH_R2_OP (LDHU_N) +#define MASK_R2_LDHU_N MASK_R2_OP +#define MATCH_R2_LDSEX MATCH_R2_OPX (LDSEX, 0, 0, 0) +#define MASK_R2_LDSEX MASK_R2_OPX (0, 1, 0, 1) +#define MATCH_R2_LDW MATCH_R2_OP (LDW) +#define MASK_R2_LDW MASK_R2_OP +#define MATCH_R2_LDWIO MATCH_R2_I12 (LDWIO) +#define MASK_R2_LDWIO MASK_R2_I12 +#define MATCH_R2_LDWM MATCH_R2_I12 (LDWM) +#define MASK_R2_LDWM MASK_R2_I12 +#define MATCH_R2_LDWSP_N MATCH_R2_OP (LDWSP_N) +#define MASK_R2_LDWSP_N MASK_R2_OP +#define MATCH_R2_LDW_N MATCH_R2_OP (LDW_N) +#define MASK_R2_LDW_N MASK_R2_OP +#define MATCH_R2_MERGE MATCH_R2_OPX (MERGE, 0, 0, 0) +#define MASK_R2_MERGE MASK_R2_OPX (0, 0, 0, 0) +#define MATCH_R2_MOV MATCH_R2_OPX (ADD, 0, 0, 0) +#define MASK_R2_MOV MASK_R2_OPX (0, 1, 0, 1) +#define MATCH_R2_MOVHI MATCH_R2_OP (ORHI) | SET_IW_F2I16_A (0) +#define MASK_R2_MOVHI MASK_R2_OP | IW_F2I16_A_SHIFTED_MASK +#define MATCH_R2_MOVI MATCH_R2_OP (ADDI) | SET_IW_F2I16_A (0) +#define MASK_R2_MOVI MASK_R2_OP | IW_F2I16_A_SHIFTED_MASK +#define MATCH_R2_MOVUI MATCH_R2_OP (ORI) | SET_IW_F2I16_A (0) +#define MASK_R2_MOVUI MASK_R2_OP | IW_F2I16_A_SHIFTED_MASK +#define MATCH_R2_MOV_N MATCH_R2_OP (MOV_N) +#define MASK_R2_MOV_N MASK_R2_OP +#define MATCH_R2_MOVI_N MATCH_R2_OP (MOVI_N) +#define MASK_R2_MOVI_N MASK_R2_OP +#define MATCH_R2_MUL MATCH_R2_OPX0 (MUL) +#define MASK_R2_MUL MASK_R2_OPX0 +#define MATCH_R2_MULI MATCH_R2_OP (MULI) +#define MASK_R2_MULI MASK_R2_OP +#define MATCH_R2_MULXSS MATCH_R2_OPX0 (MULXSS) +#define MASK_R2_MULXSS MASK_R2_OPX0 +#define MATCH_R2_MULXSU MATCH_R2_OPX0 (MULXSU) +#define MASK_R2_MULXSU MASK_R2_OPX0 +#define MATCH_R2_MULXUU MATCH_R2_OPX0 (MULXUU) +#define MASK_R2_MULXUU MASK_R2_OPX0 +#define MATCH_R2_NEG_N MATCH_R2_R_N (NEG_N) +#define MASK_R2_NEG_N MASK_R2_R_N +#define MATCH_R2_NEXTPC MATCH_R2_OPX (NEXTPC, 0, 0, 0) +#define MASK_R2_NEXTPC MASK_R2_OPX (1, 1, 0, 1) +#define MATCH_R2_NOP MATCH_R2_OPX (ADD, 0, 0, 0) +#define MASK_R2_NOP MASK_R2_OPX (1, 1, 1, 1) +#define MATCH_R2_NOP_N (MATCH_R2_OP (MOV_N) | SET_IW_F2_A (0) | SET_IW_F2_B (0)) +#define MASK_R2_NOP_N (MASK_R2_OP | IW_F2_A_SHIFTED_MASK | IW_F2_B_SHIFTED_MASK) +#define MATCH_R2_NOR MATCH_R2_OPX0 (NOR) +#define MASK_R2_NOR MASK_R2_OPX0 +#define MATCH_R2_NOT_N MATCH_R2_R_N (NOT_N) +#define MASK_R2_NOT_N MASK_R2_R_N +#define MATCH_R2_OR MATCH_R2_OPX0 (OR) +#define MASK_R2_OR MASK_R2_OPX0 +#define MATCH_R2_OR_N MATCH_R2_R_N (OR_N) +#define MASK_R2_OR_N MASK_R2_R_N +#define MATCH_R2_ORHI MATCH_R2_OP (ORHI) +#define MASK_R2_ORHI MASK_R2_OP +#define MATCH_R2_ORI MATCH_R2_OP (ORI) +#define MASK_R2_ORI MASK_R2_OP +#define MATCH_R2_POP_N (MATCH_R2_OP (PP_N) | SET_IW_L5I4X1_X (R2_PP_N_POP_N)) +#define MASK_R2_POP_N (MASK_R2_OP | IW_L5I4X1_X_SHIFTED_MASK) +#define MATCH_R2_PUSH_N (MATCH_R2_OP (PP_N) | SET_IW_L5I4X1_X (R2_PP_N_PUSH_N)) +#define MASK_R2_PUSH_N (MASK_R2_OP | IW_L5I4X1_X_SHIFTED_MASK) +#define MATCH_R2_RDCTL MATCH_R2_OPX (RDCTL, 0, 0, 0) +#define MASK_R2_RDCTL MASK_R2_OPX (1, 1, 0, 0) +#define MATCH_R2_RDPRS MATCH_R2_I12 (RDPRS) +#define MASK_R2_RDPRS MASK_R2_I12 +#define MATCH_R2_RET MATCH_R2_OPX (RET, 0x1f, 0, 0) +#define MASK_R2_RET MASK_R2_OPX (1, 1, 1, 1) +#define MATCH_R2_RET_N (MATCH_R2_R_N (RET_N) | SET_IW_X2L5_IMM5 (0)) +#define MASK_R2_RET_N (MASK_R2_R_N | IW_X2L5_IMM5_SHIFTED_MASK) +#define MATCH_R2_ROL MATCH_R2_OPX0 (ROL) +#define MASK_R2_ROL MASK_R2_OPX0 +#define MATCH_R2_ROLI MATCH_R2_OPX (ROLI, 0, 0, 0) +#define MASK_R2_ROLI MASK_R2_OPX (0, 1, 0, 0) +#define MATCH_R2_ROR MATCH_R2_OPX0 (ROR) +#define MASK_R2_ROR MASK_R2_OPX0 +#define MATCH_R2_SLL MATCH_R2_OPX0 (SLL) +#define MASK_R2_SLL MASK_R2_OPX0 +#define MATCH_R2_SLLI MATCH_R2_OPX (SLLI, 0, 0, 0) +#define MASK_R2_SLLI MASK_R2_OPX (0, 1, 0, 0) +#define MATCH_R2_SLL_N MATCH_R2_R_N (SLL_N) +#define MASK_R2_SLL_N MASK_R2_R_N +#define MATCH_R2_SLLI_N (MATCH_R2_OP (SHI_N) | SET_IW_T2X1L3_X (R2_SHI_N_SLLI_N)) +#define MASK_R2_SLLI_N (MASK_R2_OP | IW_T2X1L3_X_SHIFTED_MASK) +#define MATCH_R2_SPADDI_N MATCH_R2_OP (SPADDI_N) +#define MASK_R2_SPADDI_N MASK_R2_OP +#define MATCH_R2_SPDECI_N (MATCH_R2_OP (SPI_N) | SET_IW_X1I7_X (R2_SPI_N_SPDECI_N)) +#define MASK_R2_SPDECI_N (MASK_R2_OP | IW_X1I7_X_SHIFTED_MASK) +#define MATCH_R2_SPINCI_N (MATCH_R2_OP (SPI_N) | SET_IW_X1I7_X (R2_SPI_N_SPINCI_N)) +#define MASK_R2_SPINCI_N (MASK_R2_OP | IW_X1I7_X_SHIFTED_MASK) +#define MATCH_R2_SRA MATCH_R2_OPX0 (SRA) +#define MASK_R2_SRA MASK_R2_OPX0 +#define MATCH_R2_SRAI MATCH_R2_OPX (SRAI, 0, 0, 0) +#define MASK_R2_SRAI MASK_R2_OPX (0, 1, 0, 0) +#define MATCH_R2_SRL MATCH_R2_OPX0 (SRL) +#define MASK_R2_SRL MASK_R2_OPX0 +#define MATCH_R2_SRLI MATCH_R2_OPX (SRLI, 0, 0, 0) +#define MASK_R2_SRLI MASK_R2_OPX (0, 1, 0, 0) +#define MATCH_R2_SRL_N MATCH_R2_R_N (SRL_N) +#define MASK_R2_SRL_N MASK_R2_R_N +#define MATCH_R2_SRLI_N (MATCH_R2_OP (SHI_N) | SET_IW_T2X1L3_X (R2_SHI_N_SRLI_N)) +#define MASK_R2_SRLI_N (MASK_R2_OP | IW_T2X1L3_X_SHIFTED_MASK) +#define MATCH_R2_STB MATCH_R2_OP (STB) +#define MASK_R2_STB MASK_R2_OP +#define MATCH_R2_STBIO MATCH_R2_I12 (STBIO) +#define MASK_R2_STBIO MASK_R2_I12 +#define MATCH_R2_STB_N MATCH_R2_OP (STB_N) +#define MASK_R2_STB_N MASK_R2_OP +#define MATCH_R2_STBZ_N (MATCH_R2_OP (STZ_N) | SET_IW_T1X1I6_X (R2_STZ_N_STBZ_N)) +#define MASK_R2_STBZ_N (MASK_R2_OP | IW_T1X1I6_X_SHIFTED_MASK) +#define MATCH_R2_STEX MATCH_R2_OPX0 (STEX) +#define MASK_R2_STEX MASK_R2_OPX0 +#define MATCH_R2_STH MATCH_R2_OP (STH) +#define MASK_R2_STH MASK_R2_OP +#define MATCH_R2_STHIO MATCH_R2_I12 (STHIO) +#define MASK_R2_STHIO MASK_R2_I12 +#define MATCH_R2_STH_N MATCH_R2_OP (STH_N) +#define MASK_R2_STH_N MASK_R2_OP +#define MATCH_R2_STSEX MATCH_R2_OPX0 (STSEX) +#define MASK_R2_STSEX MASK_R2_OPX0 +#define MATCH_R2_STW MATCH_R2_OP (STW) +#define MASK_R2_STW MASK_R2_OP +#define MATCH_R2_STWIO MATCH_R2_I12 (STWIO) +#define MASK_R2_STWIO MASK_R2_I12 +#define MATCH_R2_STWM MATCH_R2_I12 (STWM) +#define MASK_R2_STWM MASK_R2_I12 +#define MATCH_R2_STWSP_N MATCH_R2_OP (STWSP_N) +#define MASK_R2_STWSP_N MASK_R2_OP +#define MATCH_R2_STW_N MATCH_R2_OP (STW_N) +#define MASK_R2_STW_N MASK_R2_OP +#define MATCH_R2_STWZ_N MATCH_R2_OP (STZ_N) +#define MASK_R2_STWZ_N MASK_R2_OP +#define MATCH_R2_SUB MATCH_R2_OPX0 (SUB) +#define MASK_R2_SUB MASK_R2_OPX0 +#define MATCH_R2_SUBI MATCH_R2_OP (ADDI) +#define MASK_R2_SUBI MASK_R2_OP +#define MATCH_R2_SUB_N (MATCH_R2_OP (AS_N) | SET_IW_T3X1_X (R2_AS_N_SUB_N)) +#define MASK_R2_SUB_N (MASK_R2_OP | IW_T3X1_X_SHIFTED_MASK) +#define MATCH_R2_SUBI_N (MATCH_R2_OP (ASI_N) | SET_IW_T2X1I3_X (R2_ASI_N_SUBI_N)) +#define MASK_R2_SUBI_N (MASK_R2_OP | IW_T2X1I3_X_SHIFTED_MASK) +#define MATCH_R2_SYNC MATCH_R2_OPX (SYNC, 0, 0, 0) +#define MASK_R2_SYNC MASK_R2_OPX (1, 1, 1, 1) +#define MATCH_R2_TRAP MATCH_R2_OPX (TRAP, 0, 0, 0x1d) +#define MASK_R2_TRAP MASK_R2_OPX (1, 1, 1, 0) +#define MATCH_R2_TRAP_N MATCH_R2_R_N (TRAP_N) +#define MASK_R2_TRAP_N MASK_R2_R_N +#define MATCH_R2_WRCTL MATCH_R2_OPX (WRCTL, 0, 0, 0) +#define MASK_R2_WRCTL MASK_R2_OPX (0, 1, 1, 0) +#define MATCH_R2_WRPIE MATCH_R2_OPX (WRPIE, 0, 0, 0) +#define MASK_R2_WRPIE MASK_R2_OPX (0, 1, 0, 1) +#define MATCH_R2_WRPRS MATCH_R2_OPX (WRPRS, 0, 0, 0) +#define MASK_R2_WRPRS MASK_R2_OPX (0, 1, 0, 1) +#define MATCH_R2_XOR MATCH_R2_OPX0 (XOR) +#define MASK_R2_XOR MASK_R2_OPX0 +#define MATCH_R2_XORHI MATCH_R2_OP (XORHI) +#define MASK_R2_XORHI MASK_R2_OP +#define MATCH_R2_XORI MATCH_R2_OP (XORI) +#define MASK_R2_XORI MASK_R2_OP +#define MATCH_R2_XOR_N MATCH_R2_R_N (XOR_N) +#define MASK_R2_XOR_N MASK_R2_R_N + +#endif /* _NIOS2R2_H */ + + +/* These are the data structures used to hold the instruction information. */ +extern const struct nios2_opcode nios2_r1_opcodes[]; +extern const int nios2_num_r1_opcodes; +extern const struct nios2_opcode nios2_r2_opcodes[]; +extern const int nios2_num_r2_opcodes; +extern struct nios2_opcode *nios2_opcodes; +extern int nios2_num_opcodes; + +/* These are the data structures used to hold the register information. */ +extern const struct nios2_reg nios2_builtin_regs[]; +extern struct nios2_reg *nios2_regs; +extern const int nios2_num_builtin_regs; +extern int nios2_num_regs; + +/* Return the opcode descriptor for a single instruction. */ +extern const struct nios2_opcode * +nios2_find_opcode_hash (unsigned long, unsigned long); + +/* Lookup tables for R2 immediate decodings. */ +extern unsigned int nios2_r2_asi_n_mappings[]; +extern const int nios2_num_r2_asi_n_mappings; +extern unsigned int nios2_r2_shi_n_mappings[]; +extern const int nios2_num_r2_shi_n_mappings; +extern unsigned int nios2_r2_andi_n_mappings[]; +extern const int nios2_num_r2_andi_n_mappings; + +/* Lookup table for 3-bit register decodings. */ +extern int nios2_r2_reg3_mappings[]; +extern const int nios2_num_r2_reg3_mappings; + +/* Lookup table for REG_RANGE value list decodings. */ +extern unsigned long nios2_r2_reg_range_mappings[]; +extern const int nios2_num_r2_reg_range_mappings; + +#endif /* _NIOS2_H */ + +/*#include "sysdep.h" +#include <stdio.h> +#include "opcode/nios2.h" +*/ +/* Register string table */ + +const struct nios2_reg nios2_builtin_regs[] = { + /* Standard register names. */ + {"zero", 0, REG_NORMAL}, + {"at", 1, REG_NORMAL}, /* assembler temporary */ + {"r2", 2, REG_NORMAL | REG_3BIT | REG_LDWM}, + {"r3", 3, REG_NORMAL | REG_3BIT | REG_LDWM}, + {"r4", 4, REG_NORMAL | REG_3BIT | REG_LDWM}, + {"r5", 5, REG_NORMAL | REG_3BIT | REG_LDWM}, + {"r6", 6, REG_NORMAL | REG_3BIT | REG_LDWM}, + {"r7", 7, REG_NORMAL | REG_3BIT | REG_LDWM}, + {"r8", 8, REG_NORMAL | REG_LDWM}, + {"r9", 9, REG_NORMAL | REG_LDWM}, + {"r10", 10, REG_NORMAL | REG_LDWM}, + {"r11", 11, REG_NORMAL | REG_LDWM}, + {"r12", 12, REG_NORMAL | REG_LDWM}, + {"r13", 13, REG_NORMAL | REG_LDWM}, + {"r14", 14, REG_NORMAL | REG_LDWM}, + {"r15", 15, REG_NORMAL | REG_LDWM}, + {"r16", 16, REG_NORMAL | REG_3BIT | REG_LDWM | REG_POP}, + {"r17", 17, REG_NORMAL | REG_3BIT | REG_LDWM | REG_POP}, + {"r18", 18, REG_NORMAL | REG_LDWM | REG_POP}, + {"r19", 19, REG_NORMAL | REG_LDWM | REG_POP}, + {"r20", 20, REG_NORMAL | REG_LDWM | REG_POP}, + {"r21", 21, REG_NORMAL | REG_LDWM | REG_POP}, + {"r22", 22, REG_NORMAL | REG_LDWM | REG_POP}, + {"r23", 23, REG_NORMAL | REG_LDWM | REG_POP}, + {"et", 24, REG_NORMAL}, + {"bt", 25, REG_NORMAL}, + {"gp", 26, REG_NORMAL}, /* global pointer */ + {"sp", 27, REG_NORMAL}, /* stack pointer */ + {"fp", 28, REG_NORMAL | REG_LDWM | REG_POP}, /* frame pointer */ + {"ea", 29, REG_NORMAL}, /* exception return address */ + {"sstatus", 30, REG_NORMAL}, /* saved processor status */ + {"ra", 31, REG_NORMAL | REG_LDWM | REG_POP}, /* return address */ + + /* Alternative names for special registers. */ + {"r0", 0, REG_NORMAL}, + {"r1", 1, REG_NORMAL}, + {"r24", 24, REG_NORMAL}, + {"r25", 25, REG_NORMAL}, + {"r26", 26, REG_NORMAL}, + {"r27", 27, REG_NORMAL}, + {"r28", 28, REG_NORMAL | REG_LDWM | REG_POP}, + {"r29", 29, REG_NORMAL}, + {"r30", 30, REG_NORMAL}, + {"ba", 30, REG_NORMAL}, /* breakpoint return address */ + {"r31", 31, REG_NORMAL | REG_LDWM | REG_POP}, + + /* Control register names. */ + {"status", 0, REG_CONTROL}, + {"estatus", 1, REG_CONTROL}, + {"bstatus", 2, REG_CONTROL}, + {"ienable", 3, REG_CONTROL}, + {"ipending", 4, REG_CONTROL}, + {"cpuid", 5, REG_CONTROL}, + {"ctl6", 6, REG_CONTROL}, + {"exception", 7, REG_CONTROL}, + {"pteaddr", 8, REG_CONTROL}, + {"tlbacc", 9, REG_CONTROL}, + {"tlbmisc", 10, REG_CONTROL}, + {"eccinj", 11, REG_CONTROL}, + {"badaddr", 12, REG_CONTROL}, + {"config", 13, REG_CONTROL}, + {"mpubase", 14, REG_CONTROL}, + {"mpuacc", 15, REG_CONTROL}, + {"ctl16", 16, REG_CONTROL}, + {"ctl17", 17, REG_CONTROL}, + {"ctl18", 18, REG_CONTROL}, + {"ctl19", 19, REG_CONTROL}, + {"ctl20", 20, REG_CONTROL}, + {"ctl21", 21, REG_CONTROL}, + {"ctl22", 22, REG_CONTROL}, + {"ctl23", 23, REG_CONTROL}, + {"ctl24", 24, REG_CONTROL}, + {"ctl25", 25, REG_CONTROL}, + {"ctl26", 26, REG_CONTROL}, + {"ctl27", 27, REG_CONTROL}, + {"ctl28", 28, REG_CONTROL}, + {"ctl29", 29, REG_CONTROL}, + {"ctl30", 30, REG_CONTROL}, + {"ctl31", 31, REG_CONTROL}, + + /* Alternative names for special control registers. */ + {"ctl0", 0, REG_CONTROL}, + {"ctl1", 1, REG_CONTROL}, + {"ctl2", 2, REG_CONTROL}, + {"ctl3", 3, REG_CONTROL}, + {"ctl4", 4, REG_CONTROL}, + {"ctl5", 5, REG_CONTROL}, + {"ctl7", 7, REG_CONTROL}, + {"ctl8", 8, REG_CONTROL}, + {"ctl9", 9, REG_CONTROL}, + {"ctl10", 10, REG_CONTROL}, + {"ctl11", 11, REG_CONTROL}, + {"ctl12", 12, REG_CONTROL}, + {"ctl13", 13, REG_CONTROL}, + {"ctl14", 14, REG_CONTROL}, + {"ctl15", 15, REG_CONTROL}, + + /* Coprocessor register names. */ + {"c0", 0, REG_COPROCESSOR}, + {"c1", 1, REG_COPROCESSOR}, + {"c2", 2, REG_COPROCESSOR}, + {"c3", 3, REG_COPROCESSOR}, + {"c4", 4, REG_COPROCESSOR}, + {"c5", 5, REG_COPROCESSOR}, + {"c6", 6, REG_COPROCESSOR}, + {"c7", 7, REG_COPROCESSOR}, + {"c8", 8, REG_COPROCESSOR}, + {"c9", 9, REG_COPROCESSOR}, + {"c10", 10, REG_COPROCESSOR}, + {"c11", 11, REG_COPROCESSOR}, + {"c12", 12, REG_COPROCESSOR}, + {"c13", 13, REG_COPROCESSOR}, + {"c14", 14, REG_COPROCESSOR}, + {"c15", 15, REG_COPROCESSOR}, + {"c16", 16, REG_COPROCESSOR}, + {"c17", 17, REG_COPROCESSOR}, + {"c18", 18, REG_COPROCESSOR}, + {"c19", 19, REG_COPROCESSOR}, + {"c20", 20, REG_COPROCESSOR}, + {"c21", 21, REG_COPROCESSOR}, + {"c22", 22, REG_COPROCESSOR}, + {"c23", 23, REG_COPROCESSOR}, + {"c24", 24, REG_COPROCESSOR}, + {"c25", 25, REG_COPROCESSOR}, + {"c26", 26, REG_COPROCESSOR}, + {"c27", 27, REG_COPROCESSOR}, + {"c28", 28, REG_COPROCESSOR}, + {"c29", 29, REG_COPROCESSOR}, + {"c30", 30, REG_COPROCESSOR}, + {"c31", 31, REG_COPROCESSOR}, +}; + +#define NIOS2_NUM_REGS \ + ((sizeof nios2_builtin_regs) / (sizeof (nios2_builtin_regs[0]))) +const int nios2_num_builtin_regs = NIOS2_NUM_REGS; + +/* This is not const in order to allow for dynamic extensions to the + built-in instruction set. */ +struct nios2_reg *nios2_regs = (struct nios2_reg *) nios2_builtin_regs; +int nios2_num_regs = NIOS2_NUM_REGS; +#undef NIOS2_NUM_REGS + +/* This is the opcode table used by the Nios II GNU as, disassembler + and GDB. */ +const struct nios2_opcode nios2_r1_opcodes[] = +{ + /* { name, args, args_test, num_args, size, format, + match, mask, pinfo, overflow } */ + {"add", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_ADD, MASK_R1_ADD, 0, no_overflow}, + {"addi", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_ADDI, MASK_R1_ADDI, 0, signed_immed16_overflow}, + {"and", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_AND, MASK_R1_AND, 0, no_overflow}, + {"andhi", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_ANDHI, MASK_R1_ANDHI, 0, unsigned_immed16_overflow}, + {"andi", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_ANDI, MASK_R1_ANDI, 0, unsigned_immed16_overflow}, + {"beq", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BEQ, MASK_R1_BEQ, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bge", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BGE, MASK_R1_BGE, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bgeu", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BGEU, MASK_R1_BGEU, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bgt", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BGT, MASK_R1_BGT, + NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bgtu", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BGTU, MASK_R1_BGTU, + NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"ble", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BLE, MASK_R1_BLE, + NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bleu", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BLEU, MASK_R1_BLEU, + NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"blt", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BLT, MASK_R1_BLT, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bltu", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BLTU, MASK_R1_BLTU, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bne", "s,t,o", "s,t,o,E", 3, 4, iw_i_type, + MATCH_R1_BNE, MASK_R1_BNE, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"br", "o", "o,E", 1, 4, iw_i_type, + MATCH_R1_BR, MASK_R1_BR, NIOS2_INSN_UBRANCH, branch_target_overflow}, + {"break", "j", "j,E", 1, 4, iw_r_type, + MATCH_R1_BREAK, MASK_R1_BREAK, NIOS2_INSN_OPTARG, no_overflow}, + {"bret", "", "E", 0, 4, iw_r_type, + MATCH_R1_BRET, MASK_R1_BRET, 0, no_overflow}, + {"call", "m", "m,E", 1, 4, iw_j_type, + MATCH_R1_CALL, MASK_R1_CALL, NIOS2_INSN_CALL, call_target_overflow}, + {"callr", "s", "s,E", 1, 4, iw_r_type, + MATCH_R1_CALLR, MASK_R1_CALLR, 0, no_overflow}, + {"cmpeq", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPEQ, MASK_R1_CMPEQ, 0, no_overflow}, + {"cmpeqi", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_CMPEQI, MASK_R1_CMPEQI, 0, signed_immed16_overflow}, + {"cmpge", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPGE, MASK_R1_CMPGE, 0, no_overflow}, + {"cmpgei", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_CMPGEI, MASK_R1_CMPGEI, 0, signed_immed16_overflow}, + {"cmpgeu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPGEU, MASK_R1_CMPGEU, 0, no_overflow}, + {"cmpgeui", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_CMPGEUI, MASK_R1_CMPGEUI, 0, unsigned_immed16_overflow}, + {"cmpgt", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPGT, MASK_R1_CMPGT, NIOS2_INSN_MACRO, no_overflow}, + {"cmpgti", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_CMPGTI, MASK_R1_CMPGTI, NIOS2_INSN_MACRO, signed_immed16_overflow}, + {"cmpgtu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPGTU, MASK_R1_CMPGTU, NIOS2_INSN_MACRO, no_overflow}, + {"cmpgtui", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_CMPGTUI, MASK_R1_CMPGTUI, + NIOS2_INSN_MACRO, unsigned_immed16_overflow}, + {"cmple", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPLE, MASK_R1_CMPLE, NIOS2_INSN_MACRO, no_overflow}, + {"cmplei", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_CMPLEI, MASK_R1_CMPLEI, NIOS2_INSN_MACRO, signed_immed16_overflow}, + {"cmpleu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPLEU, MASK_R1_CMPLEU, NIOS2_INSN_MACRO, no_overflow}, + {"cmpleui", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_CMPLEUI, MASK_R1_CMPLEUI, + NIOS2_INSN_MACRO, unsigned_immed16_overflow}, + {"cmplt", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPLT, MASK_R1_CMPLT, 0, no_overflow}, + {"cmplti", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_CMPLTI, MASK_R1_CMPLTI, 0, signed_immed16_overflow}, + {"cmpltu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPLTU, MASK_R1_CMPLTU, 0, no_overflow}, + {"cmpltui", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_CMPLTUI, MASK_R1_CMPLTUI, 0, unsigned_immed16_overflow}, + {"cmpne", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_CMPNE, MASK_R1_CMPNE, 0, no_overflow}, + {"cmpnei", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_CMPNEI, MASK_R1_CMPNEI, 0, signed_immed16_overflow}, + {"custom", "l,d,s,t", "l,d,s,t,E", 4, 4, iw_custom_type, + MATCH_R1_CUSTOM, MASK_R1_CUSTOM, 0, custom_opcode_overflow}, + {"div", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_DIV, MASK_R1_DIV, 0, no_overflow}, + {"divu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_DIVU, MASK_R1_DIVU, 0, no_overflow}, + {"eret", "", "E", 0, 4, iw_r_type, + MATCH_R1_ERET, MASK_R1_ERET, 0, no_overflow}, + {"flushd", "i(s)", "i(s),E", 2, 4, iw_i_type, + MATCH_R1_FLUSHD, MASK_R1_FLUSHD, 0, address_offset_overflow}, + {"flushda", "i(s)", "i(s),E", 2, 4, iw_i_type, + MATCH_R1_FLUSHDA, MASK_R1_FLUSHDA, 0, address_offset_overflow}, + {"flushi", "s", "s,E", 1, 4, iw_r_type, + MATCH_R1_FLUSHI, MASK_R1_FLUSHI, 0, no_overflow}, + {"flushp", "", "E", 0, 4, iw_r_type, + MATCH_R1_FLUSHP, MASK_R1_FLUSHP, 0, no_overflow}, + {"initd", "i(s)", "i(s),E", 2, 4, iw_i_type, + MATCH_R1_INITD, MASK_R1_INITD, 0, address_offset_overflow}, + {"initda", "i(s)", "i(s),E", 2, 4, iw_i_type, + MATCH_R1_INITDA, MASK_R1_INITDA, 0, address_offset_overflow}, + {"initi", "s", "s,E", 1, 4, iw_r_type, + MATCH_R1_INITI, MASK_R1_INITI, 0, no_overflow}, + {"jmp", "s", "s,E", 1, 4, iw_r_type, + MATCH_R1_JMP, MASK_R1_JMP, 0, no_overflow}, + {"jmpi", "m", "m,E", 1, 4, iw_j_type, + MATCH_R1_JMPI, MASK_R1_JMPI, 0, call_target_overflow}, + {"ldb", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDB, MASK_R1_LDB, 0, address_offset_overflow}, + {"ldbio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDBIO, MASK_R1_LDBIO, 0, address_offset_overflow}, + {"ldbu", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDBU, MASK_R1_LDBU, 0, address_offset_overflow}, + {"ldbuio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDBUIO, MASK_R1_LDBUIO, 0, address_offset_overflow}, + {"ldh", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDH, MASK_R1_LDH, 0, address_offset_overflow}, + {"ldhio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDHIO, MASK_R1_LDHIO, 0, address_offset_overflow}, + {"ldhu", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDHU, MASK_R1_LDHU, 0, address_offset_overflow}, + {"ldhuio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDHUIO, MASK_R1_LDHUIO, 0, address_offset_overflow}, + {"ldw", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDW, MASK_R1_LDW, 0, address_offset_overflow}, + {"ldwio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_LDWIO, MASK_R1_LDWIO, 0, address_offset_overflow}, + {"mov", "d,s", "d,s,E", 2, 4, iw_r_type, + MATCH_R1_MOV, MASK_R1_MOV, NIOS2_INSN_MACRO_MOV, no_overflow}, + {"movhi", "t,u", "t,u,E", 2, 4, iw_i_type, + MATCH_R1_MOVHI, MASK_R1_MOVHI, + NIOS2_INSN_MACRO_MOVI, unsigned_immed16_overflow}, + {"movi", "t,i", "t,i,E", 2, 4, iw_i_type, + MATCH_R1_MOVI, MASK_R1_MOVI, NIOS2_INSN_MACRO_MOVI, signed_immed16_overflow}, + {"movia", "t,o", "t,o,E", 2, 4, iw_i_type, + MATCH_R1_ORHI, MASK_R1_ORHI, NIOS2_INSN_MACRO_MOVIA, no_overflow}, + {"movui", "t,u", "t,u,E", 2, 4, iw_i_type, + MATCH_R1_MOVUI, MASK_R1_MOVUI, + NIOS2_INSN_MACRO_MOVI, unsigned_immed16_overflow}, + {"mul", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_MUL, MASK_R1_MUL, 0, no_overflow}, + {"muli", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_MULI, MASK_R1_MULI, 0, signed_immed16_overflow}, + {"mulxss", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_MULXSS, MASK_R1_MULXSS, 0, no_overflow}, + {"mulxsu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_MULXSU, MASK_R1_MULXSU, 0, no_overflow}, + {"mulxuu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_MULXUU, MASK_R1_MULXUU, 0, no_overflow}, + {"nextpc", "d", "d,E", 1, 4, iw_r_type, + MATCH_R1_NEXTPC, MASK_R1_NEXTPC, 0, no_overflow}, + {"nop", "", "E", 0, 4, iw_r_type, + MATCH_R1_NOP, MASK_R1_NOP, NIOS2_INSN_MACRO_MOV, no_overflow}, + {"nor", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_NOR, MASK_R1_NOR, 0, no_overflow}, + {"or", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_OR, MASK_R1_OR, 0, no_overflow}, + {"orhi", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_ORHI, MASK_R1_ORHI, 0, unsigned_immed16_overflow}, + {"ori", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_ORI, MASK_R1_ORI, 0, unsigned_immed16_overflow}, + {"rdctl", "d,c", "d,c,E", 2, 4, iw_r_type, + MATCH_R1_RDCTL, MASK_R1_RDCTL, 0, no_overflow}, + {"rdprs", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_RDPRS, MASK_R1_RDPRS, 0, signed_immed16_overflow}, + {"ret", "", "E", 0, 4, iw_r_type, + MATCH_R1_RET, MASK_R1_RET, 0, no_overflow}, + {"rol", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_ROL, MASK_R1_ROL, 0, no_overflow}, + {"roli", "d,s,j", "d,s,j,E", 3, 4, iw_r_type, + MATCH_R1_ROLI, MASK_R1_ROLI, 0, unsigned_immed5_overflow}, + {"ror", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_ROR, MASK_R1_ROR, 0, no_overflow}, + {"sll", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_SLL, MASK_R1_SLL, 0, no_overflow}, + {"slli", "d,s,j", "d,s,j,E", 3, 4, iw_r_type, + MATCH_R1_SLLI, MASK_R1_SLLI, 0, unsigned_immed5_overflow}, + {"sra", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_SRA, MASK_R1_SRA, 0, no_overflow}, + {"srai", "d,s,j", "d,s,j,E", 3, 4, iw_r_type, + MATCH_R1_SRAI, MASK_R1_SRAI, 0, unsigned_immed5_overflow}, + {"srl", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_SRL, MASK_R1_SRL, 0, no_overflow}, + {"srli", "d,s,j", "d,s,j,E", 3, 4, iw_r_type, + MATCH_R1_SRLI, MASK_R1_SRLI, 0, unsigned_immed5_overflow}, + {"stb", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_STB, MASK_R1_STB, 0, address_offset_overflow}, + {"stbio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_STBIO, MASK_R1_STBIO, 0, address_offset_overflow}, + {"sth", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_STH, MASK_R1_STH, 0, address_offset_overflow}, + {"sthio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_STHIO, MASK_R1_STHIO, 0, address_offset_overflow}, + {"stw", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_STW, MASK_R1_STW, 0, address_offset_overflow}, + {"stwio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type, + MATCH_R1_STWIO, MASK_R1_STWIO, 0, address_offset_overflow}, + {"sub", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_SUB, MASK_R1_SUB, 0, no_overflow}, + {"subi", "t,s,i", "t,s,i,E", 3, 4, iw_i_type, + MATCH_R1_SUBI, MASK_R1_SUBI, NIOS2_INSN_MACRO, signed_immed16_overflow}, + {"sync", "", "E", 0, 4, iw_r_type, + MATCH_R1_SYNC, MASK_R1_SYNC, 0, no_overflow}, + {"trap", "j", "j,E", 1, 4, iw_r_type, + MATCH_R1_TRAP, MASK_R1_TRAP, NIOS2_INSN_OPTARG, no_overflow}, + {"wrctl", "c,s", "c,s,E", 2, 4, iw_r_type, + MATCH_R1_WRCTL, MASK_R1_WRCTL, 0, no_overflow}, + {"wrprs", "d,s", "d,s,E", 2, 4, iw_r_type, + MATCH_R1_WRPRS, MASK_R1_WRPRS, 0, no_overflow}, + {"xor", "d,s,t", "d,s,t,E", 3, 4, iw_r_type, + MATCH_R1_XOR, MASK_R1_XOR, 0, no_overflow}, + {"xorhi", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_XORHI, MASK_R1_XORHI, 0, unsigned_immed16_overflow}, + {"xori", "t,s,u", "t,s,u,E", 3, 4, iw_i_type, + MATCH_R1_XORI, MASK_R1_XORI, 0, unsigned_immed16_overflow} +}; + +#define NIOS2_NUM_R1_OPCODES \ + ((sizeof nios2_r1_opcodes) / (sizeof (nios2_r1_opcodes[0]))) +const int nios2_num_r1_opcodes = NIOS2_NUM_R1_OPCODES; + + +const struct nios2_opcode nios2_r2_opcodes[] = +{ + /* { name, args, args_test, num_args, size, format, + match, mask, pinfo, overflow } */ + {"add", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_ADD, MASK_R2_ADD, 0, no_overflow}, + {"addi", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_ADDI, MASK_R2_ADDI, 0, signed_immed16_overflow}, + {"add.n", "D,S,T", "D,S,T,E", 3, 2, iw_T3X1_type, + MATCH_R2_ADD_N, MASK_R2_ADD_N, 0, no_overflow}, + {"addi.n", "D,S,e", "D,S,e,E", 3, 2, iw_T2X1I3_type, + MATCH_R2_ADDI_N, MASK_R2_ADDI_N, 0, enumeration_overflow}, + {"and", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_AND, MASK_R2_AND, 0, no_overflow}, + {"andchi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_ANDCHI, MASK_R2_ANDCHI, 0, unsigned_immed16_overflow}, + {"andci", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_ANDCI, MASK_R2_ANDCI, 0, unsigned_immed16_overflow}, + {"andhi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_ANDHI, MASK_R2_ANDHI, 0, unsigned_immed16_overflow}, + {"andi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_ANDI, MASK_R2_ANDI, 0, unsigned_immed16_overflow}, + {"andi.n", "T,S,g", "T,S,g,E", 3, 2, iw_T2I4_type, + MATCH_R2_ANDI_N, MASK_R2_ANDI_N, 0, enumeration_overflow}, + {"and.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type, + MATCH_R2_AND_N, MASK_R2_AND_N, 0, no_overflow}, + {"beq", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BEQ, MASK_R2_BEQ, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"beqz.n", "S,P", "S,P,E", 2, 2, iw_T1I7_type, + MATCH_R2_BEQZ_N, MASK_R2_BEQZ_N, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bge", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BGE, MASK_R2_BGE, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bgeu", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BGEU, MASK_R2_BGEU, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bgt", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BGT, MASK_R2_BGT, + NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bgtu", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BGTU, MASK_R2_BGTU, + NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"ble", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BLE, MASK_R2_BLE, + NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bleu", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BLEU, MASK_R2_BLEU, + NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"blt", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BLT, MASK_R2_BLT, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bltu", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BLTU, MASK_R2_BLTU, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bne", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type, + MATCH_R2_BNE, MASK_R2_BNE, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"bnez.n", "S,P", "S,P,E", 2, 2, iw_T1I7_type, + MATCH_R2_BNEZ_N, MASK_R2_BNEZ_N, NIOS2_INSN_CBRANCH, branch_target_overflow}, + {"br", "o", "o,E", 1, 4, iw_F2I16_type, + MATCH_R2_BR, MASK_R2_BR, NIOS2_INSN_UBRANCH, branch_target_overflow}, + {"break", "j", "j,E", 1, 4, iw_F3X6L5_type, + MATCH_R2_BREAK, MASK_R2_BREAK, NIOS2_INSN_OPTARG, no_overflow}, + {"break.n", "j", "j,E", 1, 2, iw_X2L5_type, + MATCH_R2_BREAK_N, MASK_R2_BREAK_N, NIOS2_INSN_OPTARG, no_overflow}, + {"bret", "", "E", 0, 4, iw_F3X6_type, + MATCH_R2_BRET, MASK_R2_BRET, 0, no_overflow}, + {"br.n", "O", "O,E", 1, 2, iw_I10_type, + MATCH_R2_BR_N, MASK_R2_BR_N, NIOS2_INSN_UBRANCH, branch_target_overflow}, + {"call", "m", "m,E", 1, 4, iw_L26_type, + MATCH_R2_CALL, MASK_R2_CALL, NIOS2_INSN_CALL, call_target_overflow}, + {"callr", "s", "s,E", 1, 4, iw_F3X6_type, + MATCH_R2_CALLR, MASK_R2_CALLR, 0, no_overflow}, + {"callr.n", "s", "s,E", 1, 2, iw_F1X1_type, + MATCH_R2_CALLR_N, MASK_R2_CALLR_N, 0, no_overflow}, + {"cmpeq", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPEQ, MASK_R2_CMPEQ, 0, no_overflow}, + {"cmpeqi", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPEQI, MASK_R2_CMPEQI, 0, signed_immed16_overflow}, + {"cmpge", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPGE, MASK_R2_CMPGE, 0, no_overflow}, + {"cmpgei", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPGEI, MASK_R2_CMPGEI, 0, signed_immed16_overflow}, + {"cmpgeu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPGEU, MASK_R2_CMPGEU, 0, no_overflow}, + {"cmpgeui", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPGEUI, MASK_R2_CMPGEUI, 0, unsigned_immed16_overflow}, + {"cmpgt", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPGT, MASK_R2_CMPGT, NIOS2_INSN_MACRO, no_overflow}, + {"cmpgti", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPGTI, MASK_R2_CMPGTI, NIOS2_INSN_MACRO, signed_immed16_overflow}, + {"cmpgtu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPGTU, MASK_R2_CMPGTU, NIOS2_INSN_MACRO, no_overflow}, + {"cmpgtui", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPGTUI, MASK_R2_CMPGTUI, + NIOS2_INSN_MACRO, unsigned_immed16_overflow}, + {"cmple", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPLE, MASK_R2_CMPLE, NIOS2_INSN_MACRO, no_overflow}, + {"cmplei", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPLEI, MASK_R2_CMPLEI, NIOS2_INSN_MACRO, signed_immed16_overflow}, + {"cmpleu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPLEU, MASK_R2_CMPLEU, NIOS2_INSN_MACRO, no_overflow}, + {"cmpleui", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPLEUI, MASK_R2_CMPLEUI, + NIOS2_INSN_MACRO, unsigned_immed16_overflow}, + {"cmplt", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPLT, MASK_R2_CMPLT, 0, no_overflow}, + {"cmplti", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPLTI, MASK_R2_CMPLTI, 0, signed_immed16_overflow}, + {"cmpltu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPLTU, MASK_R2_CMPLTU, 0, no_overflow}, + {"cmpltui", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPLTUI, MASK_R2_CMPLTUI, 0, unsigned_immed16_overflow}, + {"cmpne", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_CMPNE, MASK_R2_CMPNE, 0, no_overflow}, + {"cmpnei", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_CMPNEI, MASK_R2_CMPNEI, 0, signed_immed16_overflow}, + {"custom", "l,d,s,t", "l,d,s,t,E", 4, 4, iw_F3X8_type, + MATCH_R2_CUSTOM, MASK_R2_CUSTOM, 0, custom_opcode_overflow}, + {"div", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_DIV, MASK_R2_DIV, 0, no_overflow}, + {"divu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_DIVU, MASK_R2_DIVU, 0, no_overflow}, + {"eni", "j", "j,E", 1, 4, iw_F3X6L5_type, + MATCH_R2_ENI, MASK_R2_ENI, NIOS2_INSN_OPTARG, no_overflow}, + {"eret", "", "E", 0, 4, iw_F3X6_type, + MATCH_R2_ERET, MASK_R2_ERET, 0, no_overflow}, + {"extract", "t,s,j,k", "t,s,j,k,E", 4, 4, iw_F2X6L10_type, + MATCH_R2_EXTRACT, MASK_R2_EXTRACT, 0, no_overflow}, + {"flushd", "I(s)", "I(s),E", 2, 4, iw_F1X4I12_type, + MATCH_R2_FLUSHD, MASK_R2_FLUSHD, 0, address_offset_overflow}, + {"flushda", "I(s)", "I(s),E", 2, 4, iw_F1X4I12_type, + MATCH_R2_FLUSHDA, MASK_R2_FLUSHDA, 0, address_offset_overflow}, + {"flushi", "s", "s,E", 1, 4, iw_F3X6_type, + MATCH_R2_FLUSHI, MASK_R2_FLUSHI, 0, no_overflow}, + {"flushp", "", "E", 0, 4, iw_F3X6_type, + MATCH_R2_FLUSHP, MASK_R2_FLUSHP, 0, no_overflow}, + {"initd", "I(s)", "I(s),E", 2, 4, iw_F1X4I12_type, + MATCH_R2_INITD, MASK_R2_INITD, 0, address_offset_overflow}, + {"initda", "I(s)", "I(s),E", 2, 4, iw_F1X4I12_type, + MATCH_R2_INITDA, MASK_R2_INITDA, 0, address_offset_overflow}, + {"initi", "s", "s,E", 1, 4, iw_F3X6_type, + MATCH_R2_INITI, MASK_R2_INITI, 0, no_overflow}, + {"insert", "t,s,j,k", "t,s,j,k,E", 4, 4, iw_F2X6L10_type, + MATCH_R2_INSERT, MASK_R2_INSERT, 0, no_overflow}, + {"jmp", "s", "s,E", 1, 4, iw_F3X6_type, + MATCH_R2_JMP, MASK_R2_JMP, 0, no_overflow}, + {"jmpi", "m", "m,E", 1, 4, iw_L26_type, + MATCH_R2_JMPI, MASK_R2_JMPI, 0, call_target_overflow}, + {"jmpr.n", "s", "s,E", 1, 2, iw_F1X1_type, + MATCH_R2_JMPR_N, MASK_R2_JMPR_N, 0, no_overflow}, + {"ldb", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type, + MATCH_R2_LDB, MASK_R2_LDB, 0, address_offset_overflow}, + {"ldbio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type, + MATCH_R2_LDBIO, MASK_R2_LDBIO, 0, signed_immed12_overflow}, + {"ldbu", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type, + MATCH_R2_LDBU, MASK_R2_LDBU, 0, address_offset_overflow}, + {"ldbuio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type, + MATCH_R2_LDBUIO, MASK_R2_LDBUIO, 0, signed_immed12_overflow}, + {"ldbu.n", "T,Y(S)", "T,Y(S),E", 3, 2, iw_T2I4_type, + MATCH_R2_LDBU_N, MASK_R2_LDBU_N, 0, address_offset_overflow}, + {"ldex", "d,(s)", "d,(s),E", 2, 4, iw_F3X6_type, + MATCH_R2_LDEX, MASK_R2_LDEX, 0, no_overflow}, + {"ldh", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type, + MATCH_R2_LDH, MASK_R2_LDH, 0, address_offset_overflow}, + {"ldhio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type, + MATCH_R2_LDHIO, MASK_R2_LDHIO, 0, signed_immed12_overflow}, + {"ldhu", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type, + MATCH_R2_LDHU, MASK_R2_LDHU, 0, address_offset_overflow}, + {"ldhuio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type, + MATCH_R2_LDHUIO, MASK_R2_LDHUIO, 0, signed_immed12_overflow}, + {"ldhu.n", "T,X(S)", "T,X(S),E", 3, 2, iw_T2I4_type, + MATCH_R2_LDHU_N, MASK_R2_LDHU_N, 0, address_offset_overflow}, + {"ldsex", "d,(s)", "d,(s),E", 2, 4, iw_F3X6_type, + MATCH_R2_LDSEX, MASK_R2_LDSEX, 0, no_overflow}, + {"ldw", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type, + MATCH_R2_LDW, MASK_R2_LDW, 0, address_offset_overflow}, + {"ldwio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type, + MATCH_R2_LDWIO, MASK_R2_LDWIO, 0, signed_immed12_overflow}, + {"ldwm", "R,B", "R,B,E", 2, 4, iw_F1X4L17_type, + MATCH_R2_LDWM, MASK_R2_LDWM, 0, no_overflow}, + {"ldw.n", "T,W(S)", "T,W(S),E", 3, 2, iw_T2I4_type, + MATCH_R2_LDW_N, MASK_R2_LDW_N, 0, address_offset_overflow}, + {"ldwsp.n", "t,V(s)", "t,V(s),E", 3, 2, iw_F1I5_type, + MATCH_R2_LDWSP_N, MASK_R2_LDWSP_N, 0, address_offset_overflow}, + {"merge", "t,s,j,k", "t,s,j,k,E", 4, 4, iw_F2X6L10_type, + MATCH_R2_MERGE, MASK_R2_MERGE, 0, no_overflow}, + {"mov", "d,s", "d,s,E", 2, 4, iw_F3X6_type, + MATCH_R2_MOV, MASK_R2_MOV, NIOS2_INSN_MACRO_MOV, no_overflow}, + {"mov.n", "d,s", "d,s,E", 2, 2, iw_F2_type, + MATCH_R2_MOV_N, MASK_R2_MOV_N, 0, no_overflow}, + {"movi.n", "D,h", "D,h,E", 2, 2, iw_T1I7_type, + MATCH_R2_MOVI_N, MASK_R2_MOVI_N, 0, enumeration_overflow}, + {"movhi", "t,u", "t,u,E", 2, 4, iw_F2I16_type, + MATCH_R2_MOVHI, MASK_R2_MOVHI, + NIOS2_INSN_MACRO_MOVI, unsigned_immed16_overflow}, + {"movi", "t,i", "t,i,E", 2, 4, iw_F2I16_type, + MATCH_R2_MOVI, MASK_R2_MOVI, NIOS2_INSN_MACRO_MOVI, signed_immed16_overflow}, + {"movia", "t,o", "t,o,E", 2, 4, iw_F2I16_type, + MATCH_R2_ORHI, MASK_R2_ORHI, NIOS2_INSN_MACRO_MOVIA, no_overflow}, + {"movui", "t,u", "t,u,E", 2, 4, iw_F2I16_type, + MATCH_R2_MOVUI, MASK_R2_MOVUI, + NIOS2_INSN_MACRO_MOVI, unsigned_immed16_overflow}, + {"mul", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_MUL, MASK_R2_MUL, 0, no_overflow}, + {"muli", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_MULI, MASK_R2_MULI, 0, signed_immed16_overflow}, + {"mulxss", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_MULXSS, MASK_R2_MULXSS, 0, no_overflow}, + {"mulxsu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_MULXSU, MASK_R2_MULXSU, 0, no_overflow}, + {"mulxuu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_MULXUU, MASK_R2_MULXUU, 0, no_overflow}, + /* The encoding of the neg.n operands is backwards, not + the interpretation -- the first operand is still the + destination and the second the source. */ + {"neg.n", "S,D", "S,D,E", 2, 2, iw_T2X3_type, + MATCH_R2_NEG_N, MASK_R2_NEG_N, 0, no_overflow}, + {"nextpc", "d", "d,E", 1, 4, iw_F3X6_type, + MATCH_R2_NEXTPC, MASK_R2_NEXTPC, 0, no_overflow}, + {"nop", "", "E", 0, 4, iw_F3X6_type, + MATCH_R2_NOP, MASK_R2_NOP, NIOS2_INSN_MACRO_MOV, no_overflow}, + {"nop.n", "", "E", 0, 2, iw_F2_type, + MATCH_R2_NOP_N, MASK_R2_NOP_N, NIOS2_INSN_MACRO_MOV, no_overflow}, + {"nor", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_NOR, MASK_R2_NOR, 0, no_overflow}, + {"not.n", "D,S", "D,S,E", 2, 2, iw_T2X3_type, + MATCH_R2_NOT_N, MASK_R2_NOT_N, 0, no_overflow}, + {"or", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_OR, MASK_R2_OR, 0, no_overflow}, + {"orhi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_ORHI, MASK_R2_ORHI, 0, unsigned_immed16_overflow}, + {"ori", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_ORI, MASK_R2_ORI, 0, unsigned_immed16_overflow}, + {"or.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type, + MATCH_R2_OR_N, MASK_R2_OR_N, 0, no_overflow}, + {"pop.n", "R,W", "R,W,E", 2, 2, iw_L5I4X1_type, + MATCH_R2_POP_N, MASK_R2_POP_N, NIOS2_INSN_OPTARG, no_overflow}, + {"push.n", "R,W", "R,W,E", 2, 2, iw_L5I4X1_type, + MATCH_R2_PUSH_N, MASK_R2_PUSH_N, NIOS2_INSN_OPTARG, no_overflow}, + {"rdctl", "d,c", "d,c,E", 2, 4, iw_F3X6L5_type, + MATCH_R2_RDCTL, MASK_R2_RDCTL, 0, no_overflow}, + {"rdprs", "t,s,I", "t,s,I,E", 3, 4, iw_F2X4I12_type, + MATCH_R2_RDPRS, MASK_R2_RDPRS, 0, signed_immed12_overflow}, + {"ret", "", "E", 0, 4, iw_F3X6_type, + MATCH_R2_RET, MASK_R2_RET, 0, no_overflow}, + {"ret.n", "", "E", 0, 2, iw_X2L5_type, + MATCH_R2_RET_N, MASK_R2_RET_N, 0, no_overflow}, + {"rol", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_ROL, MASK_R2_ROL, 0, no_overflow}, + {"roli", "d,s,j", "d,s,j,E", 3, 4, iw_F3X6L5_type, + MATCH_R2_ROLI, MASK_R2_ROLI, 0, unsigned_immed5_overflow}, + {"ror", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_ROR, MASK_R2_ROR, 0, no_overflow}, + {"sll", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_SLL, MASK_R2_SLL, 0, no_overflow}, + {"slli", "d,s,j", "d,s,j,E", 3, 4, iw_F3X6L5_type, + MATCH_R2_SLLI, MASK_R2_SLLI, 0, unsigned_immed5_overflow}, + {"sll.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type, + MATCH_R2_SLL_N, MASK_R2_SLL_N, 0, no_overflow}, + {"slli.n", "D,S,f", "D,S,f,E", 3, 2, iw_T2X1L3_type, + MATCH_R2_SLLI_N, MASK_R2_SLLI_N, 0, enumeration_overflow}, + {"spaddi.n", "D,U", "D,U,E", 2, 2, iw_T1I7_type, + MATCH_R2_SPADDI_N, MASK_R2_SPADDI_N, 0, address_offset_overflow}, + {"spdeci.n", "U", "U,E", 1, 2, iw_X1I7_type, + MATCH_R2_SPDECI_N, MASK_R2_SPDECI_N, 0, address_offset_overflow}, + {"spinci.n", "U", "U,E", 1, 2, iw_X1I7_type, + MATCH_R2_SPINCI_N, MASK_R2_SPINCI_N, 0, address_offset_overflow}, + {"sra", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_SRA, MASK_R2_SRA, 0, no_overflow}, + {"srai", "d,s,j", "d,s,j,E", 3, 4, iw_F3X6L5_type, + MATCH_R2_SRAI, MASK_R2_SRAI, 0, unsigned_immed5_overflow}, + {"srl", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_SRL, MASK_R2_SRL, 0, no_overflow}, + {"srli", "d,s,j", "d,s,j,E", 3, 4, iw_F3X6L5_type, + MATCH_R2_SRLI, MASK_R2_SRLI, 0, unsigned_immed5_overflow}, + {"srl.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type, + MATCH_R2_SRL_N, MASK_R2_SRL_N, 0, no_overflow}, + {"srli.n", "D,S,f", "D,S,f,E", 3, 2, iw_T2X1L3_type, + MATCH_R2_SRLI_N, MASK_R2_SRLI_N, 0, enumeration_overflow}, + {"stb", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type, + MATCH_R2_STB, MASK_R2_STB, 0, address_offset_overflow}, + {"stbio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type, + MATCH_R2_STBIO, MASK_R2_STBIO, 0, signed_immed12_overflow}, + {"stb.n", "T,Y(S)", "T,Y(S),E", 3, 2, iw_T2I4_type, + MATCH_R2_STB_N, MASK_R2_STB_N, 0, address_offset_overflow}, + {"stbz.n", "t,M(S)", "t,M(S),E", 3, 2, iw_T1X1I6_type, + MATCH_R2_STBZ_N, MASK_R2_STBZ_N, 0, address_offset_overflow}, + {"stex", "d,t,(s)", "d,t,(s),E", 3, 4, iw_F3X6_type, + MATCH_R2_STEX, MASK_R2_STEX, 0, no_overflow}, + {"sth", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type, + MATCH_R2_STH, MASK_R2_STH, 0, address_offset_overflow}, + {"sthio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type, + MATCH_R2_STHIO, MASK_R2_STHIO, 0, signed_immed12_overflow}, + {"sth.n", "T,X(S)", "T,X(S),E", 3, 2, iw_T2I4_type, + MATCH_R2_STH_N, MASK_R2_STH_N, 0, address_offset_overflow}, + {"stsex", "d,t,(s)", "d,t,(s),E", 3, 4, iw_F3X6_type, + MATCH_R2_STSEX, MASK_R2_STSEX, 0, no_overflow}, + {"stw", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type, + MATCH_R2_STW, MASK_R2_STW, 0, address_offset_overflow}, + {"stwio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type, + MATCH_R2_STWIO, MASK_R2_STWIO, 0, signed_immed12_overflow}, + {"stwm", "R,B", "R,B,E", 2, 4, iw_F1X4L17_type, + MATCH_R2_STWM, MASK_R2_STWM, 0, no_overflow}, + {"stwsp.n", "t,V(s)", "t,V(s),E", 3, 2, iw_F1I5_type, + MATCH_R2_STWSP_N, MASK_R2_STWSP_N, 0, address_offset_overflow}, + {"stw.n", "T,W(S)", "T,W(S),E", 3, 2, iw_T2I4_type, + MATCH_R2_STW_N, MASK_R2_STW_N, 0, address_offset_overflow}, + {"stwz.n", "t,N(S)", "t,N(S),E", 3, 2, iw_T1X1I6_type, + MATCH_R2_STWZ_N, MASK_R2_STWZ_N, 0, address_offset_overflow}, + {"sub", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_SUB, MASK_R2_SUB, 0, no_overflow}, + {"subi", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type, + MATCH_R2_SUBI, MASK_R2_SUBI, NIOS2_INSN_MACRO, signed_immed16_overflow}, + {"sub.n", "D,S,T", "D,S,T,E", 3, 2, iw_T3X1_type, + MATCH_R2_SUB_N, MASK_R2_SUB_N, 0, no_overflow}, + {"subi.n", "D,S,e", "D,S,e,E", 3, 2, iw_T2X1I3_type, + MATCH_R2_SUBI_N, MASK_R2_SUBI_N, 0, enumeration_overflow}, + {"sync", "", "E", 0, 4, iw_F3X6_type, + MATCH_R2_SYNC, MASK_R2_SYNC, 0, no_overflow}, + {"trap", "j", "j,E", 1, 4, iw_F3X6L5_type, + MATCH_R2_TRAP, MASK_R2_TRAP, NIOS2_INSN_OPTARG, no_overflow}, + {"trap.n", "j", "j,E", 1, 2, iw_X2L5_type, + MATCH_R2_TRAP_N, MASK_R2_TRAP_N, NIOS2_INSN_OPTARG, no_overflow}, + {"wrctl", "c,s", "c,s,E", 2, 4, iw_F3X6L5_type, + MATCH_R2_WRCTL, MASK_R2_WRCTL, 0, no_overflow}, + {"wrpie", "d,s", "d,s,E", 2, 4, iw_F3X6L5_type, + MATCH_R2_WRPIE, MASK_R2_WRPIE, 0, no_overflow}, + {"wrprs", "d,s", "d,s,E", 2, 4, iw_F3X6_type, + MATCH_R2_WRPRS, MASK_R2_WRPRS, 0, no_overflow}, + {"xor", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type, + MATCH_R2_XOR, MASK_R2_XOR, 0, no_overflow}, + {"xorhi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_XORHI, MASK_R2_XORHI, 0, unsigned_immed16_overflow}, + {"xori", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type, + MATCH_R2_XORI, MASK_R2_XORI, 0, unsigned_immed16_overflow}, + {"xor.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type, + MATCH_R2_XOR_N, MASK_R2_XOR_N, 0, no_overflow}, +}; + +#define NIOS2_NUM_R2_OPCODES \ + ((sizeof nios2_r2_opcodes) / (sizeof (nios2_r2_opcodes[0]))) +const int nios2_num_r2_opcodes = NIOS2_NUM_R2_OPCODES; + +/* Default to using the R1 instruction tables. */ +struct nios2_opcode *nios2_opcodes = (struct nios2_opcode *) nios2_r1_opcodes; +int nios2_num_opcodes = NIOS2_NUM_R1_OPCODES; +#undef NIOS2_NUM_R1_OPCODES +#undef NIOS2_NUM_R2_OPCODES + +/* Decodings for R2 asi.n (addi.n/subi.n) immediate values. */ +unsigned int nios2_r2_asi_n_mappings[] = + {1, 2, 4, 8, 16, 32, 64, 128}; +const int nios2_num_r2_asi_n_mappings = 8; + +/* Decodings for R2 shi.n (slli.n/srli.n) immediate values. */ +unsigned int nios2_r2_shi_n_mappings[] = + {1, 2, 3, 8, 12, 16, 24, 31}; +const int nios2_num_r2_shi_n_mappings = 8; + +/* Decodings for R2 andi.n immediate values. */ +unsigned int nios2_r2_andi_n_mappings[] = + {1, 2, 3, 4, 8, 0xf, 0x10, 0x1f, + 0x20, 0x3f, 0x7f, 0x80, 0xff, 0x7ff, 0xff00, 0xffff}; +const int nios2_num_r2_andi_n_mappings = 16; + +/* Decodings for R2 3-bit register fields. */ +int nios2_r2_reg3_mappings[] = + {16, 17, 2, 3, 4, 5, 6, 7}; +const int nios2_num_r2_reg3_mappings = 8; + +/* Decodings for R2 push.n/pop.n REG_RANGE value list. */ +unsigned long nios2_r2_reg_range_mappings[] = { + 0x00010000, + 0x00030000, + 0x00070000, + 0x000f0000, + 0x001f0000, + 0x003f0000, + 0x007f0000, + 0x00ff0000 +}; +const int nios2_num_r2_reg_range_mappings = 8; + +/*#include "sysdep.h" +#include "dis-asm.h" +#include "opcode/nios2.h" +#include "libiberty.h" +#include <string.h> +#include <assert.h> +*/ +/* No symbol table is available when this code runs out in an embedded + system as when it is used for disassembler support in a monitor. */ +#if !defined(EMBEDDED_ENV) +#define SYMTAB_AVAILABLE 1 +/* +#include "elf-bfd.h" +#include "elf/nios2.h" +*/ +#endif + +/* Default length of Nios II instruction in bytes. */ +#define INSNLEN 4 + +/* Data structures used by the opcode hash table. */ +typedef struct _nios2_opcode_hash +{ + const struct nios2_opcode *opcode; + struct _nios2_opcode_hash *next; +} nios2_opcode_hash; + +/* Hash table size. */ +#define OPCODE_HASH_SIZE (IW_R1_OP_UNSHIFTED_MASK + 1) + +/* Extract the opcode from an instruction word. */ +static unsigned int +nios2_r1_extract_opcode (unsigned int x) +{ + return GET_IW_R1_OP (x); +} + +static unsigned int +nios2_r2_extract_opcode (unsigned int x) +{ + return GET_IW_R2_OP (x); +} + +/* We maintain separate hash tables for R1 and R2 opcodes, and pseudo-ops + are stored in a different table than regular instructions. */ + +typedef struct _nios2_disassembler_state +{ + const struct nios2_opcode *opcodes; + const int *num_opcodes; + unsigned int (*extract_opcode) (unsigned int); + nios2_opcode_hash *hash[OPCODE_HASH_SIZE]; + nios2_opcode_hash *ps_hash[OPCODE_HASH_SIZE]; + const struct nios2_opcode *nop; + bfd_boolean init; +} nios2_disassembler_state; + +static nios2_disassembler_state +nios2_r1_disassembler_state = { + nios2_r1_opcodes, + &nios2_num_r1_opcodes, + nios2_r1_extract_opcode, + {}, + {}, + NULL, + 0 +}; + +static nios2_disassembler_state +nios2_r2_disassembler_state = { + nios2_r2_opcodes, + &nios2_num_r2_opcodes, + nios2_r2_extract_opcode, + {}, + {}, + NULL, + 0 +}; + +/* Function to initialize the opcode hash table. */ +static void +nios2_init_opcode_hash (nios2_disassembler_state *state) +{ + unsigned int i; + register const struct nios2_opcode *op; + + for (i = 0; i < OPCODE_HASH_SIZE; i++) + for (op = state->opcodes; op < &state->opcodes[*(state->num_opcodes)]; op++) + { + nios2_opcode_hash *new_hash; + nios2_opcode_hash **bucket = NULL; + + if ((op->pinfo & NIOS2_INSN_MACRO) == NIOS2_INSN_MACRO) + { + if (i == state->extract_opcode (op->match) + && (op->pinfo & (NIOS2_INSN_MACRO_MOV | NIOS2_INSN_MACRO_MOVI) + & 0x7fffffff)) + { + bucket = &(state->ps_hash[i]); + if (strcmp (op->name, "nop") == 0) + state->nop = op; + } + } + else if (i == state->extract_opcode (op->match)) + bucket = &(state->hash[i]); + + if (bucket) + { + new_hash = + (nios2_opcode_hash *) malloc (sizeof (nios2_opcode_hash)); + if (new_hash == NULL) + { + fprintf (stderr, + "error allocating memory...broken disassembler\n"); + abort (); + } + new_hash->opcode = op; + new_hash->next = NULL; + while (*bucket) + bucket = &((*bucket)->next); + *bucket = new_hash; + } + } + state->init = 1; + +#ifdef DEBUG_HASHTABLE + for (i = 0; i < OPCODE_HASH_SIZE; ++i) + { + nios2_opcode_hash *tmp_hash = state->hash[i]; + printf ("index: 0x%02X ops: ", i); + while (tmp_hash != NULL) + { + printf ("%s ", tmp_hash->opcode->name); + tmp_hash = tmp_hash->next; + } + printf ("\n"); + } + + for (i = 0; i < OPCODE_HASH_SIZE; ++i) + { + nios2_opcode_hash *tmp_hash = state->ps_hash[i]; + printf ("index: 0x%02X ops: ", i); + while (tmp_hash != NULL) + { + printf ("%s ", tmp_hash->opcode->name); + tmp_hash = tmp_hash->next; + } + printf ("\n"); + } +#endif /* DEBUG_HASHTABLE */ +} + +/* Return a pointer to an nios2_opcode struct for a given instruction + word OPCODE for bfd machine MACH, or NULL if there is an error. */ +const struct nios2_opcode * +nios2_find_opcode_hash (unsigned long opcode, unsigned long mach) +{ + nios2_opcode_hash *entry; + nios2_disassembler_state *state; + + /* Select the right instruction set, hash tables, and opcode accessor + for the mach variant. */ + if (mach == bfd_mach_nios2r2) + state = &nios2_r2_disassembler_state; + else + state = &nios2_r1_disassembler_state; + + /* Build a hash table to shorten the search time. */ + if (!state->init) + nios2_init_opcode_hash (state); + + /* Check for NOP first. Both NOP and MOV are macros that expand into + an ADD instruction, and we always want to give priority to NOP. */ + if (state->nop->match == (opcode & state->nop->mask)) + return state->nop; + + /* First look in the pseudo-op hashtable. */ + for (entry = state->ps_hash[state->extract_opcode (opcode)]; + entry; entry = entry->next) + if (entry->opcode->match == (opcode & entry->opcode->mask)) + return entry->opcode; + + /* Otherwise look in the main hashtable. */ + for (entry = state->hash[state->extract_opcode (opcode)]; + entry; entry = entry->next) + if (entry->opcode->match == (opcode & entry->opcode->mask)) + return entry->opcode; + + return NULL; +} + +/* There are 32 regular registers, 32 coprocessor registers, + and 32 control registers. */ +#define NUMREGNAMES 32 + +/* Return a pointer to the base of the coprocessor register name array. */ +static struct nios2_reg * +nios2_coprocessor_regs (void) +{ + static struct nios2_reg *cached = NULL; + + if (!cached) + { + int i; + for (i = NUMREGNAMES; i < nios2_num_regs; i++) + if (!strcmp (nios2_regs[i].name, "c0")) + { + cached = nios2_regs + i; + break; + } + assert (cached); + } + return cached; +} + +/* Return a pointer to the base of the control register name array. */ +static struct nios2_reg * +nios2_control_regs (void) +{ + static struct nios2_reg *cached = NULL; + + if (!cached) + { + int i; + for (i = NUMREGNAMES; i < nios2_num_regs; i++) + if (!strcmp (nios2_regs[i].name, "status")) + { + cached = nios2_regs + i; + break; + } + assert (cached); + } + return cached; +} + +/* Helper routine to report internal errors. */ +static void +bad_opcode (const struct nios2_opcode *op) +{ + fprintf (stderr, "Internal error: broken opcode descriptor for `%s %s'\n", + op->name, op->args); + abort (); +} + +/* The function nios2_print_insn_arg uses the character pointed + to by ARGPTR to determine how it print the next token or separator + character in the arguments to an instruction. */ +static int +nios2_print_insn_arg (const char *argptr, + unsigned long opcode, bfd_vma address, + disassemble_info *info, + const struct nios2_opcode *op) +{ + unsigned long i = 0; + struct nios2_reg *reg_base; + + switch (*argptr) + { + case ',': + case '(': + case ')': + (*info->fprintf_func) (info->stream, "%c", *argptr); + break; + + case 'c': + /* Control register index. */ + switch (op->format) + { + case iw_r_type: + i = GET_IW_R_IMM5 (opcode); + break; + case iw_F3X6L5_type: + i = GET_IW_F3X6L5_IMM5 (opcode); + break; + default: + bad_opcode (op); + } + reg_base = nios2_control_regs (); + (*info->fprintf_func) (info->stream, "%s", reg_base[i].name); + break; + + case 'd': + reg_base = nios2_regs; + switch (op->format) + { + case iw_r_type: + i = GET_IW_R_C (opcode); + break; + case iw_custom_type: + i = GET_IW_CUSTOM_C (opcode); + if (GET_IW_CUSTOM_READC (opcode) == 0) + reg_base = nios2_coprocessor_regs (); + break; + case iw_F3X6L5_type: + case iw_F3X6_type: + i = GET_IW_F3X6L5_C (opcode); + break; + case iw_F3X8_type: + i = GET_IW_F3X8_C (opcode); + if (GET_IW_F3X8_READC (opcode) == 0) + reg_base = nios2_coprocessor_regs (); + break; + case iw_F2_type: + i = GET_IW_F2_B (opcode); + break; + default: + bad_opcode (op); + } + if (i < NUMREGNAMES) + (*info->fprintf_func) (info->stream, "%s", reg_base[i].name); + else + (*info->fprintf_func) (info->stream, "unknown"); + break; + + case 's': + reg_base = nios2_regs; + switch (op->format) + { + case iw_r_type: + i = GET_IW_R_A (opcode); + break; + case iw_i_type: + i = GET_IW_I_A (opcode); + break; + case iw_custom_type: + i = GET_IW_CUSTOM_A (opcode); + if (GET_IW_CUSTOM_READA (opcode) == 0) + reg_base = nios2_coprocessor_regs (); + break; + case iw_F2I16_type: + i = GET_IW_F2I16_A (opcode); + break; + case iw_F2X4I12_type: + i = GET_IW_F2X4I12_A (opcode); + break; + case iw_F1X4I12_type: + i = GET_IW_F1X4I12_A (opcode); + break; + case iw_F1X4L17_type: + i = GET_IW_F1X4L17_A (opcode); + break; + case iw_F3X6L5_type: + case iw_F3X6_type: + i = GET_IW_F3X6L5_A (opcode); + break; + case iw_F2X6L10_type: + i = GET_IW_F2X6L10_A (opcode); + break; + case iw_F3X8_type: + i = GET_IW_F3X8_A (opcode); + if (GET_IW_F3X8_READA (opcode) == 0) + reg_base = nios2_coprocessor_regs (); + break; + case iw_F1X1_type: + i = GET_IW_F1X1_A (opcode); + break; + case iw_F1I5_type: + i = 27; /* Implicit stack pointer reference. */ + break; + case iw_F2_type: + i = GET_IW_F2_A (opcode); + break; + default: + bad_opcode (op); + } + if (i < NUMREGNAMES) + (*info->fprintf_func) (info->stream, "%s", reg_base[i].name); + else + (*info->fprintf_func) (info->stream, "unknown"); + break; + + case 't': + reg_base = nios2_regs; + switch (op->format) + { + case iw_r_type: + i = GET_IW_R_B (opcode); + break; + case iw_i_type: + i = GET_IW_I_B (opcode); + break; + case iw_custom_type: + i = GET_IW_CUSTOM_B (opcode); + if (GET_IW_CUSTOM_READB (opcode) == 0) + reg_base = nios2_coprocessor_regs (); + break; + case iw_F2I16_type: + i = GET_IW_F2I16_B (opcode); + break; + case iw_F2X4I12_type: + i = GET_IW_F2X4I12_B (opcode); + break; + case iw_F3X6L5_type: + case iw_F3X6_type: + i = GET_IW_F3X6L5_B (opcode); + break; + case iw_F2X6L10_type: + i = GET_IW_F2X6L10_B (opcode); + break; + case iw_F3X8_type: + i = GET_IW_F3X8_B (opcode); + if (GET_IW_F3X8_READB (opcode) == 0) + reg_base = nios2_coprocessor_regs (); + break; + case iw_F1I5_type: + i = GET_IW_F1I5_B (opcode); + break; + case iw_F2_type: + i = GET_IW_F2_B (opcode); + break; + case iw_T1X1I6_type: + i = 0; + break; + default: + bad_opcode (op); + } + if (i < NUMREGNAMES) + (*info->fprintf_func) (info->stream, "%s", reg_base[i].name); + else + (*info->fprintf_func) (info->stream, "unknown"); + break; + + case 'D': + switch (op->format) + { + case iw_T1I7_type: + i = GET_IW_T1I7_A3 (opcode); + break; + case iw_T2X1L3_type: + i = GET_IW_T2X1L3_B3 (opcode); + break; + case iw_T2X1I3_type: + i = GET_IW_T2X1I3_B3 (opcode); + break; + case iw_T3X1_type: + i = GET_IW_T3X1_C3 (opcode); + break; + case iw_T2X3_type: + if (op->num_args == 3) + i = GET_IW_T2X3_A3 (opcode); + else + i = GET_IW_T2X3_B3 (opcode); + break; + default: + bad_opcode (op); + } + i = nios2_r2_reg3_mappings[i]; + (*info->fprintf_func) (info->stream, "%s", nios2_regs[i].name); + break; + + case 'M': + /* 6-bit unsigned immediate with no shift. */ + switch (op->format) + { + case iw_T1X1I6_type: + i = GET_IW_T1X1I6_IMM6 (opcode); + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'N': + /* 6-bit unsigned immediate with 2-bit shift. */ + switch (op->format) + { + case iw_T1X1I6_type: + i = GET_IW_T1X1I6_IMM6 (opcode) << 2; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'S': + switch (op->format) + { + case iw_T1I7_type: + i = GET_IW_T1I7_A3 (opcode); + break; + case iw_T2I4_type: + i = GET_IW_T2I4_A3 (opcode); + break; + case iw_T2X1L3_type: + i = GET_IW_T2X1L3_A3 (opcode); + break; + case iw_T2X1I3_type: + i = GET_IW_T2X1I3_A3 (opcode); + break; + case iw_T3X1_type: + i = GET_IW_T3X1_A3 (opcode); + break; + case iw_T2X3_type: + i = GET_IW_T2X3_A3 (opcode); + break; + case iw_T1X1I6_type: + i = GET_IW_T1X1I6_A3 (opcode); + break; + default: + bad_opcode (op); + } + i = nios2_r2_reg3_mappings[i]; + (*info->fprintf_func) (info->stream, "%s", nios2_regs[i].name); + break; + + case 'T': + switch (op->format) + { + case iw_T2I4_type: + i = GET_IW_T2I4_B3 (opcode); + break; + case iw_T3X1_type: + i = GET_IW_T3X1_B3 (opcode); + break; + case iw_T2X3_type: + i = GET_IW_T2X3_B3 (opcode); + break; + default: + bad_opcode (op); + } + i = nios2_r2_reg3_mappings[i]; + (*info->fprintf_func) (info->stream, "%s", nios2_regs[i].name); + break; + + case 'i': + /* 16-bit signed immediate. */ + switch (op->format) + { + case iw_i_type: + i = (signed) (GET_IW_I_IMM16 (opcode) << 16) >> 16; + break; + case iw_F2I16_type: + i = (signed) (GET_IW_F2I16_IMM16 (opcode) << 16) >> 16; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'I': + /* 12-bit signed immediate. */ + switch (op->format) + { + case iw_F2X4I12_type: + i = (signed) (GET_IW_F2X4I12_IMM12 (opcode) << 20) >> 20; + break; + case iw_F1X4I12_type: + i = (signed) (GET_IW_F1X4I12_IMM12 (opcode) << 20) >> 20; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'u': + /* 16-bit unsigned immediate. */ + switch (op->format) + { + case iw_i_type: + i = GET_IW_I_IMM16 (opcode); + break; + case iw_F2I16_type: + i = GET_IW_F2I16_IMM16 (opcode); + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'U': + /* 7-bit unsigned immediate with 2-bit shift. */ + switch (op->format) + { + case iw_T1I7_type: + i = GET_IW_T1I7_IMM7 (opcode) << 2; + break; + case iw_X1I7_type: + i = GET_IW_X1I7_IMM7 (opcode) << 2; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'V': + /* 5-bit unsigned immediate with 2-bit shift. */ + switch (op->format) + { + case iw_F1I5_type: + i = GET_IW_F1I5_IMM5 (opcode) << 2; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'W': + /* 4-bit unsigned immediate with 2-bit shift. */ + switch (op->format) + { + case iw_T2I4_type: + i = GET_IW_T2I4_IMM4 (opcode) << 2; + break; + case iw_L5I4X1_type: + i = GET_IW_L5I4X1_IMM4 (opcode) << 2; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'X': + /* 4-bit unsigned immediate with 1-bit shift. */ + switch (op->format) + { + case iw_T2I4_type: + i = GET_IW_T2I4_IMM4 (opcode) << 1; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'Y': + /* 4-bit unsigned immediate without shift. */ + switch (op->format) + { + case iw_T2I4_type: + i = GET_IW_T2I4_IMM4 (opcode); + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'o': + /* 16-bit signed immediate address offset. */ + switch (op->format) + { + case iw_i_type: + i = (signed) (GET_IW_I_IMM16 (opcode) << 16) >> 16; + break; + case iw_F2I16_type: + i = (signed) (GET_IW_F2I16_IMM16 (opcode) << 16) >> 16; + break; + default: + bad_opcode (op); + } + address = address + 4 + i; + (*info->print_address_func) (address, info); + break; + + case 'O': + /* 10-bit signed address offset with 1-bit shift. */ + switch (op->format) + { + case iw_I10_type: + i = (signed) (GET_IW_I10_IMM10 (opcode) << 22) >> 21; + break; + default: + bad_opcode (op); + } + address = address + 2 + i; + (*info->print_address_func) (address, info); + break; + + case 'P': + /* 7-bit signed address offset with 1-bit shift. */ + switch (op->format) + { + case iw_T1I7_type: + i = (signed) (GET_IW_T1I7_IMM7 (opcode) << 25) >> 24; + break; + default: + bad_opcode (op); + } + address = address + 2 + i; + (*info->print_address_func) (address, info); + break; + + case 'j': + /* 5-bit unsigned immediate. */ + switch (op->format) + { + case iw_r_type: + i = GET_IW_R_IMM5 (opcode); + break; + case iw_F3X6L5_type: + i = GET_IW_F3X6L5_IMM5 (opcode); + break; + case iw_F2X6L10_type: + i = GET_IW_F2X6L10_MSB (opcode); + break; + case iw_X2L5_type: + i = GET_IW_X2L5_IMM5 (opcode); + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'k': + /* Second 5-bit unsigned immediate field. */ + switch (op->format) + { + case iw_F2X6L10_type: + i = GET_IW_F2X6L10_LSB (opcode); + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'l': + /* 8-bit unsigned immediate. */ + switch (op->format) + { + case iw_custom_type: + i = GET_IW_CUSTOM_N (opcode); + break; + case iw_F3X8_type: + i = GET_IW_F3X8_N (opcode); + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%lu", i); + break; + + case 'm': + /* 26-bit unsigned immediate. */ + switch (op->format) + { + case iw_j_type: + i = GET_IW_J_IMM26 (opcode); + break; + case iw_L26_type: + i = GET_IW_L26_IMM26 (opcode); + break; + default: + bad_opcode (op); + } + /* This translates to an address because it's only used in call + instructions. */ + address = (address & 0xf0000000) | (i << 2); + (*info->print_address_func) (address, info); + break; + + case 'e': + /* Encoded enumeration for addi.n/subi.n. */ + switch (op->format) + { + case iw_T2X1I3_type: + i = nios2_r2_asi_n_mappings[GET_IW_T2X1I3_IMM3 (opcode)]; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%lu", i); + break; + + case 'f': + /* Encoded enumeration for slli.n/srli.n. */ + switch (op->format) + { + case iw_T2X1L3_type: + i = nios2_r2_shi_n_mappings[GET_IW_T2X1I3_IMM3 (opcode)]; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%lu", i); + break; + + case 'g': + /* Encoded enumeration for andi.n. */ + switch (op->format) + { + case iw_T2I4_type: + i = nios2_r2_andi_n_mappings[GET_IW_T2I4_IMM4 (opcode)]; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%lu", i); + break; + + case 'h': + /* Encoded enumeration for movi.n. */ + switch (op->format) + { + case iw_T1I7_type: + i = GET_IW_T1I7_IMM7 (opcode); + if (i == 125) + i = 0xff; + else if (i == 126) + i = -2; + else if (i == 127) + i = -1; + break; + default: + bad_opcode (op); + } + (*info->fprintf_func) (info->stream, "%ld", i); + break; + + case 'R': + { + unsigned long reglist = 0; + int dir = 1; + int k, t; + + switch (op->format) + { + case iw_F1X4L17_type: + /* Encoding for ldwm/stwm. */ + i = GET_IW_F1X4L17_REGMASK (opcode); + if (GET_IW_F1X4L17_RS (opcode)) + { + reglist = ((i << 14) & 0x00ffc000); + if (i & (1 << 10)) + reglist |= (1 << 28); + if (i & (1 << 11)) + reglist |= (1 << 31); + } + else + reglist = i << 2; + dir = GET_IW_F1X4L17_REGMASK (opcode) ? 1 : -1; + break; + + case iw_L5I4X1_type: + /* Encoding for push.n/pop.n. */ + reglist |= (1 << 31); + if (GET_IW_L5I4X1_FP (opcode)) + reglist |= (1 << 28); + if (GET_IW_L5I4X1_CS (opcode)) + { + int val = GET_IW_L5I4X1_REGRANGE (opcode); + reglist |= nios2_r2_reg_range_mappings[val]; + } + dir = (op->match == MATCH_R2_POP_N ? 1 : -1); + break; + + default: + bad_opcode (op); + } + + t = 0; + (*info->fprintf_func) (info->stream, "{"); + for (k = (dir == 1 ? 0 : 31); + (dir == 1 && k < 32) || (dir == -1 && k >= 0); + k += dir) + if (reglist & (1 << k)) + { + if (t) + (*info->fprintf_func) (info->stream, ","); + else + t++; + (*info->fprintf_func) (info->stream, "%s", nios2_regs[k].name); + } + (*info->fprintf_func) (info->stream, "}"); + break; + } + + case 'B': + /* Base register and options for ldwm/stwm. */ + switch (op->format) + { + case iw_F1X4L17_type: + if (GET_IW_F1X4L17_ID (opcode) == 0) + (*info->fprintf_func) (info->stream, "--"); + + i = GET_IW_F1X4I12_A (opcode); + (*info->fprintf_func) (info->stream, "(%s)", + nios2_builtin_regs[i].name); + + if (GET_IW_F1X4L17_ID (opcode)) + (*info->fprintf_func) (info->stream, "++"); + if (GET_IW_F1X4L17_WB (opcode)) + (*info->fprintf_func) (info->stream, ",writeback"); + if (GET_IW_F1X4L17_PC (opcode)) + (*info->fprintf_func) (info->stream, ",ret"); + break; + default: + bad_opcode (op); + } + break; + + default: + (*info->fprintf_func) (info->stream, "unknown"); + break; + } + return 0; +} + +/* nios2_disassemble does all the work of disassembling a Nios II + instruction opcode. */ +static int +nios2_disassemble (bfd_vma address, unsigned long opcode, + disassemble_info *info) +{ + const struct nios2_opcode *op; + + info->bytes_per_line = INSNLEN; + info->bytes_per_chunk = INSNLEN; + info->display_endian = info->endian; + info->insn_info_valid = 1; + info->branch_delay_insns = 0; + info->data_size = 0; + info->insn_type = dis_nonbranch; + info->target = 0; + info->target2 = 0; + + /* Find the major opcode and use this to disassemble + the instruction and its arguments. */ + op = nios2_find_opcode_hash (opcode, info->mach); + + if (op != NULL) + { + const char *argstr = op->args; + (*info->fprintf_func) (info->stream, "%s", op->name); + if (argstr != NULL && *argstr != '\0') + { + (*info->fprintf_func) (info->stream, "\t"); + while (*argstr != '\0') + { + nios2_print_insn_arg (argstr, opcode, address, info, op); + ++argstr; + } + } + /* Tell the caller how far to advance the program counter. */ + info->bytes_per_chunk = op->size; + return op->size; + } + else + { + /* Handle undefined instructions. */ + info->insn_type = dis_noninsn; + (*info->fprintf_func) (info->stream, "0x%lx", opcode); + return INSNLEN; + } +} + + +/* print_insn_nios2 is the main disassemble function for Nios II. + The function diassembler(abfd) (source in disassemble.c) returns a + pointer to this either print_insn_big_nios2 or + print_insn_little_nios2, which in turn call this function when the + bfd machine type is Nios II. print_insn_nios2 reads the + instruction word at the address given, and prints the disassembled + instruction on the stream info->stream using info->fprintf_func. */ + +static int +print_insn_nios2 (bfd_vma address, disassemble_info *info, + enum bfd_endian endianness) +{ + bfd_byte buffer[INSNLEN]; + int status; + + status = (*info->read_memory_func) (address, buffer, INSNLEN, info); + if (status == 0) + { + unsigned long insn; + if (endianness == BFD_ENDIAN_BIG) + insn = (unsigned long) bfd_getb32 (buffer); + else + insn = (unsigned long) bfd_getl32 (buffer); + return nios2_disassemble (address, insn, info); + } + + /* We might have a 16-bit R2 instruction at the end of memory. Try that. */ + if (info->mach == bfd_mach_nios2r2) + { + status = (*info->read_memory_func) (address, buffer, 2, info); + if (status == 0) + { + unsigned long insn; + if (endianness == BFD_ENDIAN_BIG) + insn = (unsigned long) bfd_getb16 (buffer); + else + insn = (unsigned long) bfd_getl16 (buffer); + return nios2_disassemble (address, insn, info); + } + } + + /* If we got here, we couldn't read anything. */ + (*info->memory_error_func) (status, address, info); + return -1; +} + +/* These two functions are the main entry points, accessed from + disassemble.c. */ +int +print_insn_big_nios2 (bfd_vma address, disassemble_info *info) +{ + return print_insn_nios2 (address, info, BFD_ENDIAN_BIG); +} + +int +print_insn_little_nios2 (bfd_vma address, disassemble_info *info) +{ + return print_insn_nios2 (address, info, BFD_ENDIAN_LITTLE); +} diff --git a/docs/specs/pci-ids.txt b/docs/specs/pci-ids.txt index fd27c677d4..16fdb0c93f 100644 --- a/docs/specs/pci-ids.txt +++ b/docs/specs/pci-ids.txt @@ -57,7 +57,10 @@ PCI devices (other than virtio): 1b36:0005 PCI test device (docs/specs/pci-testdev.txt) 1b36:0006 PCI Rocker Ethernet switch device 1b36:0007 PCI SD Card Host Controller Interface (SDHCI) +1b36:0008 PCIe host bridge +1b36:0009 PCI Expander Bridge (-device pxb) 1b36:000a PCI-PCI bridge (multiseat) +1b36:000b PCIe Expander Bridge (-device pxb-pcie) All these devices are documented in docs/specs. diff --git a/docs/usb-storage.txt b/docs/usb-storage.txt index fbc1f2edd8..551af6f88b 100644 --- a/docs/usb-storage.txt +++ b/docs/usb-storage.txt @@ -34,7 +34,7 @@ with tree logical units: Number three emulates the classic bulk-only transport protocol too. It's called "usb-bot". It shares most code with "usb-storage", and the guest will not be able to see the difference. The qemu command -line interface is simliar to usb-uas though, i.e. no automatic scsi +line interface is similar to usb-uas though, i.e. no automatic scsi disk creation. It also features support for up to 16 LUNs. The LUN numbers must be continuous, i.e. for three devices you must use 0+1+2. The 0+1+5 numbering from the "usb-uas" example isn't going to work diff --git a/docs/usb2.txt b/docs/usb2.txt index c7a445afcd..b9e7548073 100644 --- a/docs/usb2.txt +++ b/docs/usb2.txt @@ -19,7 +19,7 @@ the controller so the USB 2.0 bus gets a individual name, for example '-device usb-ehci,id=ehci". This will give you a USB 2.0 bus named "ehci.0". -I strongly recomment to also use -device to attach usb devices because +I strongly recommend to also use -device to attach usb devices because you can specify the bus they should be attached to this way. Here is a complete example: diff --git a/gdbstub.c b/gdbstub.c index de9b62b8f8..2d18ed73be 100644 --- a/gdbstub.c +++ b/gdbstub.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qemu/cutils.h" #include "cpu.h" #ifdef CONFIG_USER_ONLY @@ -637,8 +638,8 @@ void gdb_register_coprocessor(CPUState *cpu, *p = s; if (g_pos) { if (g_pos != s->base_reg) { - fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n" - "Expected %d got %d\n", xml, g_pos, s->base_reg); + error_report("Error: Bad gdb register numbering for '%s', " + "expected %d got %d", xml, g_pos, s->base_reg); } else { cpu->gdb_num_g_regs = cpu->gdb_num_regs; } @@ -890,7 +891,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf) } case 'k': /* Kill the target */ - fprintf(stderr, "\nQEMU: Terminated via GDBstub\n"); + error_report("QEMU: Terminated via GDBstub"); exit(0); case 'D': /* Detach packet */ @@ -1358,8 +1359,8 @@ void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va) break; default: bad_format: - fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n", - fmt - 1); + error_report("gdbstub: Bad syscall format string '%s'", + fmt - 1); break; } } else { @@ -1732,6 +1733,12 @@ int gdbserver_start(const char *device) CharDriverState *mon_chr; ChardevCommon common = { 0 }; + if (!first_cpu) { + error_report("gdbstub: meaningless to attach gdb to a " + "machine without any CPU."); + return -1; + } + if (!device) return -1; if (strcmp(device, "none") != 0) { diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index 138d8e825d..99e94723b9 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -979,6 +979,7 @@ static void coroutine_fn v9fs_attach(void *opaque) size_t offset = 7; V9fsQID qid; ssize_t err; + Error *local_err = NULL; v9fs_string_init(&uname); v9fs_string_init(&aname); @@ -1007,26 +1008,36 @@ static void coroutine_fn v9fs_attach(void *opaque) clunk_fid(s, fid); goto out; } - err = pdu_marshal(pdu, offset, "Q", &qid); - if (err < 0) { - clunk_fid(s, fid); - goto out; - } - err += offset; - memcpy(&s->root_qid, &qid, sizeof(qid)); - trace_v9fs_attach_return(pdu->tag, pdu->id, - qid.type, qid.version, qid.path); + /* * disable migration if we haven't done already. * attach could get called multiple times for the same export. */ if (!s->migration_blocker) { - s->root_fid = fid; error_setg(&s->migration_blocker, "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'", s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag); - migrate_add_blocker(s->migration_blocker); + err = migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_free(local_err); + error_free(s->migration_blocker); + s->migration_blocker = NULL; + clunk_fid(s, fid); + goto out; + } + s->root_fid = fid; + } + + err = pdu_marshal(pdu, offset, "Q", &qid); + if (err < 0) { + clunk_fid(s, fid); + goto out; } + err += offset; + + memcpy(&s->root_qid, &qid, sizeof(qid)); + trace_v9fs_attach_return(pdu->tag, pdu->id, + qid.type, qid.version, qid.path); out: put_fid(pdu, fidp); out_nofid: diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index e90451496e..2d6eb46a04 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -147,7 +147,7 @@ typedef struct FlashPartInfo { #define CFG_DUMMY_CLK_LEN 4 #define NVCFG_DUMMY_CLK_POS 12 #define VCFG_DUMMY_CLK_POS 4 -#define EVCFG_OUT_DRIVER_STRENGHT_DEF 7 +#define EVCFG_OUT_DRIVER_STRENGTH_DEF 7 #define EVCFG_VPP_ACCELERATOR (1 << 3) #define EVCFG_RESET_HOLD_ENABLED (1 << 4) #define NVCFG_DUAL_IO_MASK (1 << 2) @@ -747,7 +747,7 @@ static void reset_memory(Flash *s) ); s->enh_volatile_cfg = 0; - s->enh_volatile_cfg |= EVCFG_OUT_DRIVER_STRENGHT_DEF; + s->enh_volatile_cfg |= EVCFG_OUT_DRIVER_STRENGTH_DEF; s->enh_volatile_cfg |= EVCFG_VPP_ACCELERATOR; s->enh_volatile_cfg |= EVCFG_RESET_HOLD_ENABLED; if (s->nonvolatile_cfg & NVCFG_DUAL_IO_MASK) { diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c index 208f549dff..58f1f02902 100644 --- a/hw/core/generic-loader.c +++ b/hw/core/generic-loader.c @@ -27,7 +27,7 @@ * this it needs a backend to manage the datas, the same as other * memory-related devices. In this case as the backend is so trivial we * have merged it with the frontend instead of creating and maintaining a - * seperate backend. + * separate backend. */ #include "qemu/osdep.h" @@ -79,7 +79,7 @@ static void generic_loader_realize(DeviceState *dev, Error **errp) "loading memory values"); return; } else if (!s->data_len) { - /* We cant' check for !data here as a value of 0 is still valid. */ + /* We can't check for !data here as a value of 0 is still valid. */ error_setg(errp, "Both data and data-len must be specified"); return; } else if (s->data_len > 8) { diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index 2a82768067..6ab4265eb4 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -711,7 +711,7 @@ static void get_pci_host_devaddr(Object *obj, Visitor *v, const char *name, /* * Catch "invalid" device reference from vfio-pci and allow the - * default buffer representing the non-existant device to be used. + * default buffer representing the non-existent device to be used. */ if (~addr->domain || ~addr->bus || ~addr->slot || ~addr->function) { rc = snprintf(buffer, sizeof(buffer), "%04x:%02x:%02x.%0d", diff --git a/hw/display/framebuffer.c b/hw/display/framebuffer.c index df51358e72..25aa46c8c7 100644 --- a/hw/display/framebuffer.c +++ b/hw/display/framebuffer.c @@ -78,7 +78,7 @@ void framebuffer_update_display( i = *first_row; *first_row = -1; - src_len = src_width * rows; + src_len = (hwaddr)src_width * rows; mem = mem_section->mr; if (!mem) { diff --git a/hw/display/milkymist-tmu2.c b/hw/display/milkymist-tmu2.c index 5c666f9b24..920374b985 100644 --- a/hw/display/milkymist-tmu2.c +++ b/hw/display/milkymist-tmu2.c @@ -257,7 +257,7 @@ static void tmu2_start(MilkymistTMU2State *s) glColor4f(m, m, m, (float)(s->regs[R_ALPHA] + 1) / 64.0f); /* Read the QEMU dest. framebuffer into the OpenGL framebuffer */ - fb_len = 2 * s->regs[R_DSTHRES] * s->regs[R_DSTVRES]; + fb_len = 2ULL * s->regs[R_DSTHRES] * s->regs[R_DSTVRES]; fb = cpu_physical_memory_map(s->regs[R_DSTFBUF], &fb_len, 0); if (fb == NULL) { glDeleteTextures(1, &texture); diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 7a15c61c76..444ca064c1 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1003,7 +1003,8 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = { }, }; -static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size) +static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) { VirtIOGPU *g = opaque; struct virtio_gpu_simple_resource *res; @@ -1028,9 +1029,12 @@ static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size) qemu_put_be32(f, 0); /* end of list */ vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); + + return 0; } -static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size) +static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) { VirtIOGPU *g = opaque; struct virtio_gpu_simple_resource *res; @@ -1132,6 +1136,7 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) VirtIODevice *vdev = VIRTIO_DEVICE(qdev); VirtIOGPU *g = VIRTIO_GPU(qdev); bool have_virgl; + Error *local_err = NULL; int i; if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { @@ -1139,14 +1144,6 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) return; } - g->config_size = sizeof(struct virtio_gpu_config); - g->virtio_config.num_scanouts = g->conf.max_outputs; - virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, - g->config_size); - - g->req_state[0].width = 1024; - g->req_state[0].height = 768; - g->use_virgl_renderer = false; #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) have_virgl = false; @@ -1158,6 +1155,24 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) } if (virtio_gpu_virgl_enabled(g->conf)) { + error_setg(&g->migration_blocker, "virgl is not yet migratable"); + migrate_add_blocker(g->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(g->migration_blocker); + return; + } + } + + g->config_size = sizeof(struct virtio_gpu_config); + g->virtio_config.num_scanouts = g->conf.max_outputs; + virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, + g->config_size); + + g->req_state[0].width = 1024; + g->req_state[0].height = 768; + + if (virtio_gpu_virgl_enabled(g->conf)) { /* use larger control queue in 3d mode */ g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); @@ -1183,11 +1198,6 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) dpy_gfx_replace_surface(g->scanout[i].con, NULL); } } - - if (virtio_gpu_virgl_enabled(g->conf)) { - error_setg(&g->migration_blocker, "virgl is not yet migratable"); - migrate_add_blocker(g->migration_blocker); - } } static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c index f43eb09304..f7b7b80c68 100644 --- a/hw/display/xlnx_dp.c +++ b/hw/display/xlnx_dp.c @@ -555,7 +555,7 @@ static void xlnx_dp_recreate_surface(XlnxDPState *s) if ((width != 0) && (height != 0)) { /* * As dpy_gfx_replace_surface calls qemu_free_displaysurface on the - * surface we need to be carefull and don't free the surface associated + * surface we need to be careful and don't free the surface associated * to the console or double free will happen. */ if (s->bout_plane.surface != current_console_surface) { @@ -1160,7 +1160,7 @@ static void xlnx_dp_update_display(void *opaque) */ if (!xlnx_dpdma_start_operation(s->dpdma, 3, false)) { /* - * An error occured don't do anything with the data.. + * An error occurred don't do anything with the data.. * Trigger an underflow interrupt. */ s->core_registers[DP_INT_STATUS] |= (1 << 21); diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c index 2f767b620e..702e281dc8 100644 --- a/hw/i386/kvmvapic.c +++ b/hw/i386/kvmvapic.c @@ -535,7 +535,6 @@ static int patch_hypercalls(VAPICROMState *s) uint8_t alternates[2]; const uint8_t *pattern; const uint8_t *patch; - int patches = 0; off_t pos; uint8_t *rom; @@ -566,11 +565,6 @@ static int patch_hypercalls(VAPICROMState *s) } g_free(rom); - - if (patches != 0 && patches != 2) { - return -1; - } - return 0; } diff --git a/hw/i386/pc.c b/hw/i386/pc.c index c949cf0ecc..706e2330ac 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -1777,7 +1777,7 @@ static int pc_apic_cmp(const void *a, const void *b) /* returns pointer to CPUArchId descriptor that matches CPU's apic_id * in pcms->possible_cpus->cpus, if pcms->possible_cpus->cpus has no - * entry correponding to CPU's apic_id returns NULL. + * entry corresponding to CPU's apic_id returns NULL. */ static CPUArchId *pc_find_cpu_slot(PCMachineState *pcms, CPUState *cpu, int *idx) diff --git a/hw/i386/pci-assign-load-rom.c b/hw/i386/pci-assign-load-rom.c index 0d8e4b2826..fd59076e7a 100644 --- a/hw/i386/pci-assign-load-rom.c +++ b/hw/i386/pci-assign-load-rom.c @@ -39,19 +39,19 @@ void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner, "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/rom", domain, bus, slot, function); - if (stat(rom_file, &st)) { - if (errno != ENOENT) { - error_report("pci-assign: Invalid ROM."); - } - return NULL; - } - /* Write "1" to the ROM file to enable it */ fp = fopen(rom_file, "r+"); if (fp == NULL) { - error_report("pci-assign: Cannot open %s: %s", rom_file, strerror(errno)); + if (errno != ENOENT) { + error_report("pci-assign: Cannot open %s: %s", rom_file, strerror(errno)); + } return NULL; } + if (fstat(fileno(fp), &st) == -1) { + error_report("pci-assign: Cannot stat %s: %s", rom_file, strerror(errno)); + goto close_rom; + } + val = 1; if (fwrite(&val, 1, 1, fp) != 1) { goto close_rom; diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs index 2f44a2da26..8948106ac4 100644 --- a/hw/intc/Makefile.objs +++ b/hw/intc/Makefile.objs @@ -41,3 +41,4 @@ obj-$(CONFIG_S390_FLIC_KVM) += s390_flic_kvm.o obj-$(CONFIG_ASPEED_SOC) += aspeed_vic.o obj-$(CONFIG_ARM_GIC) += arm_gicv3_cpuif.o obj-$(CONFIG_MIPS_CPS) += mips_gic.o +obj-$(CONFIG_NIOS2) += nios2_iic.o diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c index 11729ee902..ec952ece93 100644 --- a/hw/intc/arm_gic_kvm.c +++ b/hw/intc/arm_gic_kvm.c @@ -510,6 +510,17 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) return; } + if (!kvm_arm_gic_can_save_restore(s)) { + error_setg(&s->migration_blocker, "This operating system kernel does " + "not support vGICv2 migration"); + migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + return; + } + } + gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL); for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) { @@ -558,12 +569,6 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) KVM_VGIC_V2_ADDR_TYPE_CPU, s->dev_fd); - if (!kvm_arm_gic_can_save_restore(s)) { - error_setg(&s->migration_blocker, "This operating system kernel does " - "not support vGICv2 migration"); - migrate_add_blocker(s->migration_blocker); - } - if (kvm_has_gsi_routing()) { /* set up irq routing */ kvm_init_irq_routing(kvm_state); diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index fc246e0cb5..bd4f3aafc6 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -56,6 +56,19 @@ static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid) static void kvm_arm_its_realize(DeviceState *dev, Error **errp) { GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev); + Error *local_err = NULL; + + /* + * Block migration of a KVM GICv3 ITS device: the API for saving and + * restoring the state in the kernel is not yet available + */ + error_setg(&s->migration_blocker, "vITS migration is not implemented"); + migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + return; + } s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_ITS, false); if (s->dev_fd < 0) { @@ -73,13 +86,6 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp) gicv3_its_init_mmio(s, NULL); - /* - * Block migration of a KVM GICv3 ITS device: the API for saving and - * restoring the state in the kernel is not yet available - */ - error_setg(&s->migration_blocker, "vITS migration is not implemented"); - migrate_add_blocker(s->migration_blocker); - kvm_msi_use_devid = true; kvm_gsi_direct_mapping = false; kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 199a439ccf..d69dc47370 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -103,6 +103,18 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL); + /* Block migration of a KVM GICv3 device: the API for saving and restoring + * the state in the kernel is not yet finalised in the kernel or + * implemented in QEMU. + */ + error_setg(&s->migration_blocker, "vGICv3 migration is not implemented"); + migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + return; + } + /* Try to create the device via the device control API */ s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V3, false); if (s->dev_fd < 0) { @@ -122,13 +134,6 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd); - /* Block migration of a KVM GICv3 device: the API for saving and restoring - * the state in the kernel is not yet finalised in the kernel or - * implemented in QEMU. - */ - error_setg(&s->migration_blocker, "vGICv3 migration is not implemented"); - migrate_add_blocker(s->migration_blocker); - if (kvm_has_gsi_routing()) { /* set up irq routing */ kvm_init_irq_routing(kvm_state); diff --git a/hw/intc/nios2_iic.c b/hw/intc/nios2_iic.c new file mode 100644 index 0000000000..818ab1b315 --- /dev/null +++ b/hw/intc/nios2_iic.c @@ -0,0 +1,103 @@ +/* + * QEMU Altera Internal Interrupt Controller. + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" + +#include "hw/sysbus.h" +#include "cpu.h" + +#define TYPE_ALTERA_IIC "altera,iic" +#define ALTERA_IIC(obj) \ + OBJECT_CHECK(AlteraIIC, (obj), TYPE_ALTERA_IIC) + +typedef struct AlteraIIC { + SysBusDevice parent_obj; + void *cpu; + qemu_irq parent_irq; +} AlteraIIC; + +static void update_irq(AlteraIIC *pv) +{ + CPUNios2State *env = &((Nios2CPU *)(pv->cpu))->env; + + qemu_set_irq(pv->parent_irq, + env->regs[CR_IPENDING] & env->regs[CR_IENABLE]); +} + +static void irq_handler(void *opaque, int irq, int level) +{ + AlteraIIC *pv = opaque; + CPUNios2State *env = &((Nios2CPU *)(pv->cpu))->env; + + env->regs[CR_IPENDING] &= ~(1 << irq); + env->regs[CR_IPENDING] |= !!level << irq; + + update_irq(pv); +} + +static void altera_iic_init(Object *obj) +{ + AlteraIIC *pv = ALTERA_IIC(obj); + + qdev_init_gpio_in(DEVICE(pv), irq_handler, 32); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &pv->parent_irq); +} + +static Property altera_iic_properties[] = { + DEFINE_PROP_PTR("cpu", AlteraIIC, cpu), + DEFINE_PROP_END_OF_LIST(), +}; + +static void altera_iic_realize(DeviceState *dev, Error **errp) +{ + struct AlteraIIC *pv = ALTERA_IIC(dev); + + if (!pv->cpu) { + error_setg(errp, "altera,iic: CPU not connected"); + return; + } +} + +static void altera_iic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->props = altera_iic_properties; + /* Reason: pointer property "cpu" */ + dc->cannot_instantiate_with_device_add_yet = true; + dc->realize = altera_iic_realize; +} + +static TypeInfo altera_iic_info = { + .name = "altera,iic", + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AlteraIIC), + .instance_init = altera_iic_init, + .class_init = altera_iic_class_init, +}; + +static void altera_iic_register(void) +{ + type_register_static(&altera_iic_info); +} + +type_init(altera_iic_register) diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c index c313166fbe..da8e4dfab6 100644 --- a/hw/intc/s390_flic_kvm.c +++ b/hw/intc/s390_flic_kvm.c @@ -286,7 +286,8 @@ static void kvm_s390_release_adapter_routes(S390FLICState *fs, * increase until buffer is sufficient or maxium size is * reached */ -static void kvm_flic_save(QEMUFile *f, void *opaque, size_t size) +static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) { KVMS390FLICState *flic = opaque; int len = FLIC_SAVE_INITIAL_SIZE; @@ -319,6 +320,8 @@ static void kvm_flic_save(QEMUFile *f, void *opaque, size_t size) count * sizeof(struct kvm_s390_irq)); } g_free(buf); + + return 0; } /** @@ -331,7 +334,8 @@ static void kvm_flic_save(QEMUFile *f, void *opaque, size_t size) * Note: Do nothing when no interrupts where stored * in QEMUFile */ -static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size) +static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) { uint64_t len = 0; uint64_t count = 0; diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c index 9d07b118c0..0ffbc8dd28 100644 --- a/hw/isa/isa-bus.c +++ b/hw/isa/isa-bus.c @@ -219,6 +219,7 @@ static void isabus_bridge_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->fw_name = "isa"; } diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index abeaf3da08..fd14d7a07e 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -840,6 +840,7 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp) uint8_t *pci_conf; uint8_t attr = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_PREFETCH; + Error *local_err = NULL; /* IRQFD requires MSI */ if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) && @@ -903,9 +904,6 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp) } } - vmstate_register_ram(s->ivshmem_bar2, DEVICE(s)); - pci_register_bar(PCI_DEVICE(s), 2, attr, s->ivshmem_bar2); - if (s->master == ON_OFF_AUTO_AUTO) { s->master = s->vm_id == 0 ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; } @@ -913,8 +911,16 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp) if (!ivshmem_is_master(s)) { error_setg(&s->migration_blocker, "Migration is disabled when using feature 'peer mode' in device 'ivshmem'"); - migrate_add_blocker(s->migration_blocker); + migrate_add_blocker(s->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(s->migration_blocker); + return; + } } + + vmstate_register_ram(s->ivshmem_bar2, DEVICE(s)); + pci_register_bar(PCI_DEVICE(s), 2, attr, s->ivshmem_bar2); } static void ivshmem_exit(PCIDevice *dev) diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 7915732f74..e99d4544a2 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -896,7 +896,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) DB_PRINT("config bufsize: %d packet size: %ld\n", rxbufsize, size); - /* Find which queue we are targetting */ + /* Find which queue we are targeting */ q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize); while (bytes_to_copy) { diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c index 77a4b3e5bf..0e9a25b7ab 100644 --- a/hw/net/e1000e.c +++ b/hw/net/e1000e.c @@ -593,7 +593,7 @@ static const VMStateDescription e1000e_vmstate = { .pre_save = e1000e_pre_save, .post_load = e1000e_post_load, .fields = (VMStateField[]) { - VMSTATE_PCIE_DEVICE(parent_obj, E1000EState), + VMSTATE_PCI_DEVICE(parent_obj, E1000EState), VMSTATE_MSIX(parent_obj, E1000EState), VMSTATE_UINT32(ioaddr, E1000EState), diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c index 01ecb02773..058908d8d7 100644 --- a/hw/net/spapr_llan.c +++ b/hw/net/spapr_llan.c @@ -105,7 +105,7 @@ typedef struct VIOsPAPRVLANDevice { uint32_t add_buf_ptr, use_buf_ptr, rx_bufs; hwaddr rxq_ptr; QEMUTimer *rxp_timer; - uint32_t compat_flags; /* Compatability flags for migration */ + uint32_t compat_flags; /* Compatibility flags for migration */ RxBufPool *rx_pool[RX_MAX_POOLS]; /* Receive buffer descriptor pools */ } VIOsPAPRVLANDevice; @@ -559,7 +559,7 @@ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev, if (pool < 0) { /* * No matching pool found? Try to use a new one. If the guest used all - * pools before, but changed the size of one pool inbetween, we might + * pools before, but changed the size of one pool in the meantime, we might * need to recycle that pool here (if it's empty already). Thus scan * all buffer pools now, starting with the last (likely empty) one. */ diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 92f6af9620..2cb2731e29 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -2451,7 +2451,8 @@ static void vmxnet3_put_tx_stats_to_file(QEMUFile *f, qemu_put_be64(f, tx_stat->pktsTxDiscard); } -static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size) +static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { Vmxnet3TxqDescr *r = pv; @@ -2465,7 +2466,8 @@ static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size) return 0; } -static void vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size) +static int vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { Vmxnet3TxqDescr *r = pv; @@ -2474,6 +2476,8 @@ static void vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size) qemu_put_byte(f, r->intr_idx); qemu_put_be64(f, r->tx_stats_pa); vmxnet3_put_tx_stats_to_file(f, &r->txq_stats); + + return 0; } static const VMStateInfo txq_descr_info = { @@ -2512,7 +2516,8 @@ static void vmxnet3_put_rx_stats_to_file(QEMUFile *f, qemu_put_be64(f, rx_stat->pktsRxError); } -static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size) +static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { Vmxnet3RxqDescr *r = pv; int i; @@ -2530,7 +2535,8 @@ static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size) return 0; } -static void vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size) +static int vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { Vmxnet3RxqDescr *r = pv; int i; @@ -2543,6 +2549,8 @@ static void vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size) qemu_put_byte(f, r->intr_idx); qemu_put_be64(f, r->rx_stats_pa); vmxnet3_put_rx_stats_to_file(f, &r->rxq_stats); + + return 0; } static int vmxnet3_post_load(void *opaque, int version_id) @@ -2575,7 +2583,8 @@ static const VMStateInfo rxq_descr_info = { .put = vmxnet3_put_rxq_descr }; -static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size) +static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { Vmxnet3IntState *r = pv; @@ -2586,13 +2595,16 @@ static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size) return 0; } -static void vmxnet3_put_int_state(QEMUFile *f, void *pv, size_t size) +static int vmxnet3_put_int_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { Vmxnet3IntState *r = pv; qemu_put_byte(f, r->is_masked); qemu_put_byte(f, r->is_pending); qemu_put_byte(f, r->is_asserted); + + return 0; } static const VMStateInfo int_state_info = { @@ -2619,7 +2631,7 @@ static const VMStateDescription vmstate_vmxnet3_pcie_device = { .minimum_version_id = 1, .needed = vmxnet3_vmstate_need_pcie_device, .fields = (VMStateField[]) { - VMSTATE_PCIE_DEVICE(parent_obj, VMXNET3State), + VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State), VMSTATE_END_OF_LIST() } }; diff --git a/hw/nios2/10m50_devboard.c b/hw/nios2/10m50_devboard.c new file mode 100644 index 0000000000..62e5738b65 --- /dev/null +++ b/hw/nios2/10m50_devboard.c @@ -0,0 +1,126 @@ +/* + * Altera 10M50 Nios2 GHRD + * + * Copyright (c) 2016 Marek Vasut <marek.vasut@gmail.com> + * + * Based on LabX device code + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "cpu.h" + +#include "hw/sysbus.h" +#include "hw/hw.h" +#include "hw/char/serial.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "qemu/config-file.h" + +#include "boot.h" + +#define BINARY_DEVICE_TREE_FILE "10m50-devboard.dtb" + +static void nios2_10m50_ghrd_init(MachineState *machine) +{ + Nios2CPU *cpu; + DeviceState *dev; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *phys_tcm = g_new(MemoryRegion, 1); + MemoryRegion *phys_tcm_alias = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram = g_new(MemoryRegion, 1); + MemoryRegion *phys_ram_alias = g_new(MemoryRegion, 1); + ram_addr_t tcm_base = 0x0; + ram_addr_t tcm_size = 0x1000; /* 1 kiB, but QEMU limit is 4 kiB */ + ram_addr_t ram_base = 0x08000000; + ram_addr_t ram_size = 0x08000000; + qemu_irq *cpu_irq, irq[32]; + int i; + + /* Physical TCM (tb_ram_1k) with alias at 0xc0000000 */ + memory_region_init_ram(phys_tcm, NULL, "nios2.tcm", tcm_size, &error_abort); + memory_region_init_alias(phys_tcm_alias, NULL, "nios2.tcm.alias", + phys_tcm, 0, tcm_size); + vmstate_register_ram_global(phys_tcm); + memory_region_add_subregion(address_space_mem, tcm_base, phys_tcm); + memory_region_add_subregion(address_space_mem, 0xc0000000 + tcm_base, + phys_tcm_alias); + + /* Physical DRAM with alias at 0xc0000000 */ + memory_region_init_ram(phys_ram, NULL, "nios2.ram", ram_size, &error_abort); + memory_region_init_alias(phys_ram_alias, NULL, "nios2.ram.alias", + phys_ram, 0, ram_size); + vmstate_register_ram_global(phys_ram); + memory_region_add_subregion(address_space_mem, ram_base, phys_ram); + memory_region_add_subregion(address_space_mem, 0xc0000000 + ram_base, + phys_ram_alias); + + /* Create CPU -- FIXME */ + cpu = cpu_nios2_init("nios2"); + + /* Register: CPU interrupt controller (PIC) */ + cpu_irq = nios2_cpu_pic_init(cpu); + + /* Register: Internal Interrupt Controller (IIC) */ + dev = qdev_create(NULL, "altera,iic"); + qdev_prop_set_ptr(dev, "cpu", cpu); + qdev_init_nofail(dev); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq[0]); + for (i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in(dev, i); + } + + /* Register: Altera 16550 UART */ + serial_mm_init(address_space_mem, 0xf8001600, 2, irq[1], 115200, + serial_hds[0], DEVICE_NATIVE_ENDIAN); + + /* Register: Timer sys_clk_timer */ + dev = qdev_create(NULL, "ALTR.timer"); + qdev_prop_set_uint32(dev, "clock-frequency", 75 * 1000000); + qdev_init_nofail(dev); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xf8001440); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[0]); + + /* Register: Timer sys_clk_timer_1 */ + dev = qdev_create(NULL, "ALTR.timer"); + qdev_prop_set_uint32(dev, "clock-frequency", 75 * 1000000); + qdev_init_nofail(dev); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xe0000880); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[5]); + + /* Configure new exception vectors and reset CPU for it to take effect. */ + cpu->reset_addr = 0xd4000000; + cpu->exception_addr = 0xc8000120; + cpu->fast_tlb_miss_addr = 0xc0000100; + + nios2_load_kernel(cpu, ram_base, ram_size, machine->initrd_filename, + BINARY_DEVICE_TREE_FILE, NULL); +} + +static void nios2_10m50_ghrd_machine_init(struct MachineClass *mc) +{ + mc->desc = "Altera 10M50 GHRD Nios II design"; + mc->init = nios2_10m50_ghrd_init; + mc->is_default = 1; +} + +DEFINE_MACHINE("10m50-ghrd", nios2_10m50_ghrd_machine_init); diff --git a/hw/nios2/Makefile.objs b/hw/nios2/Makefile.objs new file mode 100644 index 0000000000..6b5c421760 --- /dev/null +++ b/hw/nios2/Makefile.objs @@ -0,0 +1 @@ +obj-y = boot.o cpu_pic.o 10m50_devboard.o diff --git a/hw/nios2/boot.c b/hw/nios2/boot.c new file mode 100644 index 0000000000..e0a9aff2f4 --- /dev/null +++ b/hw/nios2/boot.c @@ -0,0 +1,223 @@ +/* + * Nios2 kernel loader + * + * Copyright (c) 2016 Marek Vasut <marek.vasut@gmail.com> + * + * Based on microblaze kernel loader + * + * Copyright (c) 2012 Peter Crosthwaite <peter.crosthwaite@petalogix.com> + * Copyright (c) 2012 PetaLogix + * Copyright (c) 2009 Edgar E. Iglesias. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "qemu/error-report.h" +#include "qemu-common.h" +#include "sysemu/device_tree.h" +#include "sysemu/sysemu.h" +#include "hw/loader.h" +#include "elf.h" +#include "qemu/cutils.h" + +#include "boot.h" + +#define NIOS2_MAGIC 0x534f494e + +static struct nios2_boot_info { + void (*machine_cpu_reset)(Nios2CPU *); + uint32_t bootstrap_pc; + uint32_t cmdline; + uint32_t initrd_start; + uint32_t initrd_end; + uint32_t fdt; +} boot_info; + +static void main_cpu_reset(void *opaque) +{ + Nios2CPU *cpu = opaque; + CPUState *cs = CPU(cpu); + CPUNios2State *env = &cpu->env; + + cpu_reset(CPU(cpu)); + + env->regs[R_ARG0] = NIOS2_MAGIC; + env->regs[R_ARG1] = boot_info.initrd_start; + env->regs[R_ARG2] = boot_info.fdt; + env->regs[R_ARG3] = boot_info.cmdline; + + cpu_set_pc(cs, boot_info.bootstrap_pc); + if (boot_info.machine_cpu_reset) { + boot_info.machine_cpu_reset(cpu); + } +} + +static uint64_t translate_kernel_address(void *opaque, uint64_t addr) +{ + return addr - 0xc0000000LL; +} + +static int nios2_load_dtb(struct nios2_boot_info bi, const uint32_t ramsize, + const char *kernel_cmdline, const char *dtb_filename) +{ + int fdt_size; + void *fdt = NULL; + int r; + + if (dtb_filename) { + fdt = load_device_tree(dtb_filename, &fdt_size); + } + if (!fdt) { + return 0; + } + + if (kernel_cmdline) { + r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", + kernel_cmdline); + if (r < 0) { + fprintf(stderr, "couldn't set /chosen/bootargs\n"); + } + } + + if (bi.initrd_start) { + qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", + translate_kernel_address(NULL, bi.initrd_start)); + + qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", + translate_kernel_address(NULL, bi.initrd_end)); + } + + cpu_physical_memory_write(bi.fdt, fdt, fdt_size); + return fdt_size; +} + +void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base, + uint32_t ramsize, + const char *initrd_filename, + const char *dtb_filename, + void (*machine_cpu_reset)(Nios2CPU *)) +{ + QemuOpts *machine_opts; + const char *kernel_filename; + const char *kernel_cmdline; + const char *dtb_arg; + char *filename = NULL; + + machine_opts = qemu_get_machine_opts(); + kernel_filename = qemu_opt_get(machine_opts, "kernel"); + kernel_cmdline = qemu_opt_get(machine_opts, "append"); + dtb_arg = qemu_opt_get(machine_opts, "dtb"); + /* default to pcbios dtb as passed by machine_init */ + if (!dtb_arg) { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename); + } + + boot_info.machine_cpu_reset = machine_cpu_reset; + qemu_register_reset(main_cpu_reset, cpu); + + if (kernel_filename) { + int kernel_size, fdt_size; + uint64_t entry, low, high; + uint32_t base32; + int big_endian = 0; + +#ifdef TARGET_WORDS_BIGENDIAN + big_endian = 1; +#endif + + /* Boots a kernel elf binary. */ + kernel_size = load_elf(kernel_filename, NULL, NULL, + &entry, &low, &high, + big_endian, EM_ALTERA_NIOS2, 0, 0); + base32 = entry; + if (base32 == 0xc0000000) { + kernel_size = load_elf(kernel_filename, translate_kernel_address, + NULL, &entry, NULL, NULL, + big_endian, EM_ALTERA_NIOS2, 0, 0); + } + + /* Always boot into physical ram. */ + boot_info.bootstrap_pc = ddr_base + 0xc0000000 + (entry & 0x07ffffff); + + /* If it wasn't an ELF image, try an u-boot image. */ + if (kernel_size < 0) { + hwaddr uentry, loadaddr; + + kernel_size = load_uimage(kernel_filename, &uentry, &loadaddr, 0, + NULL, NULL); + boot_info.bootstrap_pc = uentry; + high = loadaddr + kernel_size; + } + + /* Not an ELF image nor an u-boot image, try a RAW image. */ + if (kernel_size < 0) { + kernel_size = load_image_targphys(kernel_filename, ddr_base, + ram_size); + boot_info.bootstrap_pc = ddr_base; + high = ddr_base + kernel_size; + } + + high = ROUND_UP(high, 1024 * 1024); + + /* If initrd is available, it goes after the kernel, aligned to 1M. */ + if (initrd_filename) { + int initrd_size; + uint32_t initrd_offset; + + boot_info.initrd_start = high; + initrd_offset = boot_info.initrd_start - ddr_base; + + initrd_size = load_ramdisk(initrd_filename, + boot_info.initrd_start, + ram_size - initrd_offset); + if (initrd_size < 0) { + initrd_size = load_image_targphys(initrd_filename, + boot_info.initrd_start, + ram_size - initrd_offset); + } + if (initrd_size < 0) { + error_report("qemu: could not load initrd '%s'", + initrd_filename); + exit(EXIT_FAILURE); + } + high += initrd_size; + } + high = ROUND_UP(high, 4); + boot_info.initrd_end = high; + + /* Device tree must be placed right after initrd (if available) */ + boot_info.fdt = high; + fdt_size = nios2_load_dtb(boot_info, ram_size, kernel_cmdline, + /* Preference a -dtb argument */ + dtb_arg ? dtb_arg : filename); + high += fdt_size; + + /* Kernel command is at the end, 4k aligned. */ + boot_info.cmdline = ROUND_UP(high, 4096); + if (kernel_cmdline && strlen(kernel_cmdline)) { + pstrcpy_targphys("cmdline", boot_info.cmdline, 256, kernel_cmdline); + } + } + g_free(filename); +} diff --git a/hw/nios2/boot.h b/hw/nios2/boot.h new file mode 100644 index 0000000000..3116753818 --- /dev/null +++ b/hw/nios2/boot.h @@ -0,0 +1,11 @@ +#ifndef NIOS2_BOOT_H +#define NIOS2_BOOT_H + +#include "hw/hw.h" +#include "cpu.h" + +void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base, uint32_t ramsize, + const char *initrd_filename, const char *dtb_filename, + void (*machine_cpu_reset)(Nios2CPU *)); + +#endif /* NIOS2_BOOT_H */ diff --git a/hw/nios2/cpu_pic.c b/hw/nios2/cpu_pic.c new file mode 100644 index 0000000000..0f95987ef3 --- /dev/null +++ b/hw/nios2/cpu_pic.c @@ -0,0 +1,70 @@ +/* + * Altera Nios2 CPU PIC + * + * Copyright (c) 2016 Marek Vasut <marek.vasut@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "cpu.h" + +#include "qemu/config-file.h" + +#include "boot.h" + +static void nios2_pic_cpu_handler(void *opaque, int irq, int level) +{ + Nios2CPU *cpu = opaque; + CPUNios2State *env = &cpu->env; + CPUState *cs = CPU(cpu); + int type = irq ? CPU_INTERRUPT_NMI : CPU_INTERRUPT_HARD; + + if (type == CPU_INTERRUPT_HARD) { + env->irq_pending = level; + + if (level && (env->regs[CR_STATUS] & CR_STATUS_PIE)) { + env->irq_pending = 0; + cpu_interrupt(cs, type); + } else if (!level) { + env->irq_pending = 0; + cpu_reset_interrupt(cs, type); + } + } else { + if (level) { + cpu_interrupt(cs, type); + } else { + cpu_reset_interrupt(cs, type); + } + } +} + +void nios2_check_interrupts(CPUNios2State *env) +{ + Nios2CPU *cpu = nios2_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + if (env->irq_pending) { + env->irq_pending = 0; + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +qemu_irq *nios2_cpu_pic_init(Nios2CPU *cpu) +{ + return qemu_allocate_irqs(nios2_pic_cpu_handler, cpu, 2); +} diff --git a/hw/nvram/eeprom93xx.c b/hw/nvram/eeprom93xx.c index 2c16fc23df..848692abc0 100644 --- a/hw/nvram/eeprom93xx.c +++ b/hw/nvram/eeprom93xx.c @@ -94,18 +94,22 @@ struct _eeprom_t { This is a Big hack, but it is how the old state did it. */ -static int get_uint16_from_uint8(QEMUFile *f, void *pv, size_t size) +static int get_uint16_from_uint8(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint16_t *v = pv; *v = qemu_get_ubyte(f); return 0; } -static void put_unused(QEMUFile *f, void *pv, size_t size) +static int put_unused(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { fprintf(stderr, "uint16_from_uint8 is used only for backwards compatibility.\n"); fprintf(stderr, "Never should be used to write a new state.\n"); exit(0); + + return 0; } static const VMStateInfo vmstate_hack_uint16_from_uint8 = { diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 523d585dcf..316fca9bc1 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -555,17 +555,21 @@ static void fw_cfg_reset(DeviceState *d) Or we broke compatibility in the state, or we can't use struct tm */ -static int get_uint32_as_uint16(QEMUFile *f, void *pv, size_t size) +static int get_uint32_as_uint16(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint32_t *v = pv; *v = qemu_get_be16(f); return 0; } -static void put_unused(QEMUFile *f, void *pv, size_t size) +static int put_unused(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { fprintf(stderr, "uint32_as_uint16 is only used for backward compatibility.\n"); fprintf(stderr, "This functions shouldn't be called.\n"); + + return 0; } static const VMStateInfo vmstate_hack_uint32_as_uint16 = { diff --git a/hw/pci-bridge/ioh3420.c b/hw/pci-bridge/ioh3420.c index 84b7946c31..0eef87a4f8 100644 --- a/hw/pci-bridge/ioh3420.c +++ b/hw/pci-bridge/ioh3420.c @@ -180,7 +180,7 @@ static const VMStateDescription vmstate_ioh3420 = { .minimum_version_id = 1, .post_load = pcie_cap_slot_post_load, .fields = (VMStateField[]) { - VMSTATE_PCIE_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), + VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), VMSTATE_END_OF_LIST() diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c index 04b8e5b847..cfe8a3657f 100644 --- a/hw/pci-bridge/xio3130_downstream.c +++ b/hw/pci-bridge/xio3130_downstream.c @@ -166,7 +166,7 @@ static const VMStateDescription vmstate_xio3130_downstream = { .minimum_version_id = 1, .post_load = pcie_cap_slot_post_load, .fields = (VMStateField[]) { - VMSTATE_PCIE_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), + VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), VMSTATE_END_OF_LIST() diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c index d1f59c8834..401c78452b 100644 --- a/hw/pci-bridge/xio3130_upstream.c +++ b/hw/pci-bridge/xio3130_upstream.c @@ -138,7 +138,7 @@ static const VMStateDescription vmstate_xio3130_upstream = { .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { - VMSTATE_PCIE_DEVICE(parent_obj.parent_obj, PCIEPort), + VMSTATE_PCI_DEVICE(parent_obj.parent_obj, PCIEPort), VMSTATE_STRUCT(parent_obj.parent_obj.exp.aer_log, PCIEPort, 0, vmstate_pcie_aer_log, PCIEAERLog), VMSTATE_END_OF_LIST() diff --git a/hw/pci/msix.c b/hw/pci/msix.c index 0ec1cb14fc..ee1714d2cf 100644 --- a/hw/pci/msix.c +++ b/hw/pci/msix.c @@ -587,12 +587,16 @@ void msix_unset_vector_notifiers(PCIDevice *dev) dev->msix_vector_poll_notifier = NULL; } -static void put_msix_state(QEMUFile *f, void *pv, size_t size) +static int put_msix_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { msix_save(pv, f); + + return 0; } -static int get_msix_state(QEMUFile *f, void *pv, size_t size) +static int get_msix_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { msix_load(pv, f); return 0; diff --git a/hw/pci/pci.c b/hw/pci/pci.c index fe9acecbbf..47ca3af69a 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -445,7 +445,8 @@ int pci_bus_numa_node(PCIBus *bus) return PCI_BUS_GET_CLASS(bus)->numa_node(bus); } -static int get_pci_config_device(QEMUFile *f, void *pv, size_t size) +static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { PCIDevice *s = container_of(pv, PCIDevice, config); PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(s); @@ -484,11 +485,14 @@ static int get_pci_config_device(QEMUFile *f, void *pv, size_t size) } /* just put buffer */ -static void put_pci_config_device(QEMUFile *f, void *pv, size_t size) +static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { const uint8_t **v = pv; assert(size == pci_config_size(container_of(pv, PCIDevice, config))); qemu_put_buffer(f, *v, size); + + return 0; } static VMStateInfo vmstate_info_pci_config = { @@ -497,7 +501,8 @@ static VMStateInfo vmstate_info_pci_config = { .put = put_pci_config_device, }; -static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size) +static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { PCIDevice *s = container_of(pv, PCIDevice, irq_state); uint32_t irq_state[PCI_NUM_PINS]; @@ -518,7 +523,8 @@ static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_pci_irq_state(QEMUFile *f, void *pv, size_t size) +static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { int i; PCIDevice *s = container_of(pv, PCIDevice, irq_state); @@ -526,6 +532,8 @@ static void put_pci_irq_state(QEMUFile *f, void *pv, size_t size) for (i = 0; i < PCI_NUM_PINS; ++i) { qemu_put_be32(f, pci_irq_state(s, i)); } + + return 0; } static VMStateInfo vmstate_info_pci_irq_state = { @@ -534,30 +542,29 @@ static VMStateInfo vmstate_info_pci_irq_state = { .put = put_pci_irq_state, }; +static bool migrate_is_pcie(void *opaque, int version_id) +{ + return pci_is_express((PCIDevice *)opaque); +} + +static bool migrate_is_not_pcie(void *opaque, int version_id) +{ + return !pci_is_express((PCIDevice *)opaque); +} + const VMStateDescription vmstate_pci_device = { .name = "PCIDevice", .version_id = 2, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), - VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0, - vmstate_info_pci_config, + VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, + migrate_is_not_pcie, + 0, vmstate_info_pci_config, PCI_CONFIG_SPACE_SIZE), - VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, - vmstate_info_pci_irq_state, - PCI_NUM_PINS * sizeof(int32_t)), - VMSTATE_END_OF_LIST() - } -}; - -const VMStateDescription vmstate_pcie_device = { - .name = "PCIEDevice", - .version_id = 2, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), - VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0, - vmstate_info_pci_config, + VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, + migrate_is_pcie, + 0, vmstate_info_pci_config, PCIE_CONFIG_SPACE_SIZE), VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2, vmstate_info_pci_irq_state, @@ -566,10 +573,6 @@ const VMStateDescription vmstate_pcie_device = { } }; -static inline const VMStateDescription *pci_get_vmstate(PCIDevice *s) -{ - return pci_is_express(s) ? &vmstate_pcie_device : &vmstate_pci_device; -} void pci_device_save(PCIDevice *s, QEMUFile *f) { @@ -578,7 +581,7 @@ void pci_device_save(PCIDevice *s, QEMUFile *f) * This makes us compatible with old devices * which never set or clear this bit. */ s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; - vmstate_save_state(f, pci_get_vmstate(s), s, NULL); + vmstate_save_state(f, &vmstate_pci_device, s, NULL); /* Restore the interrupt status bit. */ pci_update_irq_status(s); } @@ -586,7 +589,7 @@ void pci_device_save(PCIDevice *s, QEMUFile *f) int pci_device_load(PCIDevice *s, QEMUFile *f) { int ret; - ret = vmstate_load_state(f, pci_get_vmstate(s), s, s->version_id); + ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); /* Restore the interrupt status bit. */ pci_update_irq_status(s); return ret; diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index adeda04036..cbd4bb4f8c 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -656,7 +656,7 @@ static void pcie_ext_cap_set_next(PCIDevice *dev, uint16_t pos, uint16_t next) } /* - * caller must supply valid (offset, size) * such that the range shouldn't + * Caller must supply valid (offset, size) such that the range wouldn't * overlap with other capability or other registers. * This function doesn't check it. */ diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c index 3dcd472eba..42fafac91b 100644 --- a/hw/pci/shpc.c +++ b/hw/pci/shpc.c @@ -695,13 +695,16 @@ void shpc_cap_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int l) shpc_cap_update_dword(d); } -static void shpc_save(QEMUFile *f, void *pv, size_t size) +static int shpc_save(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { PCIDevice *d = container_of(pv, PCIDevice, shpc); qemu_put_buffer(f, d->shpc->config, SHPC_SIZEOF(d)); + + return 0; } -static int shpc_load(QEMUFile *f, void *pv, size_t size) +static int shpc_load(QEMUFile *f, void *pv, size_t size, VMStateField *field) { PCIDevice *d = container_of(pv, PCIDevice, shpc); int ret = qemu_get_buffer(f, d->shpc->config, SHPC_SIZEOF(d)); diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index a0c44ee593..2de6377cca 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -59,7 +59,7 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc, trace_spapr_drc_set_isolation_state(get_index(drc), state); if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) { - /* cannot unisolate a non-existant resource, and, or resources + /* cannot unisolate a non-existent resource, and, or resources * which are in an 'UNUSABLE' allocation state. (PAPR 2.7, 13.5.3.5) */ if (!drc->dev || diff --git a/hw/s390x/s390-pci-bus.h b/hw/s390x/s390-pci-bus.h index b0adefa788..0aad9cc272 100644 --- a/hw/s390x/s390-pci-bus.h +++ b/hw/s390x/s390-pci-bus.h @@ -183,8 +183,8 @@ enum ZpciIoatDtype { * may enter an error state * blocked: ignore all DMA and interrupts; transition back to enabled or from * error state via mpcifc - * error: an error occured; transition back to enabled via mpcifc - * permanent error: an unrecoverable error occured; transition to standby via + * error: an error occurred; transition back to enabled via mpcifc + * permanent error: an unrecoverable error occurred; transition to standby via * sclp deconfigure */ typedef enum { diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index 6233865494..6aad7c9a06 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -2289,7 +2289,7 @@ static const VMStateDescription vmstate_megasas_gen2 = { .minimum_version_id = 0, .minimum_version_id_old = 0, .fields = (VMStateField[]) { - VMSTATE_PCIE_DEVICE(parent_obj, MegasasState), + VMSTATE_PCI_DEVICE(parent_obj, MegasasState), VMSTATE_MSIX(parent_obj, MegasasState), VMSTATE_INT32(fw_state, MegasasState), diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index 297216dfcb..5940cb160c 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -1945,7 +1945,8 @@ SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) /* SCSI request list. For simplicity, pv points to the whole device */ -static void put_scsi_requests(QEMUFile *f, void *pv, size_t size) +static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { SCSIDevice *s = pv; SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); @@ -1968,9 +1969,12 @@ static void put_scsi_requests(QEMUFile *f, void *pv, size_t size) } } qemu_put_sbyte(f, 0); + + return 0; } -static int get_scsi_requests(QEMUFile *f, void *pv, size_t size) +static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { SCSIDevice *s = pv; SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index c080888413..cc06fe5f6c 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -2157,6 +2157,7 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", (command & 0xe) == 0xe ? "And Verify " : "", r->req.cmd.lba, len); + /* fall through */ case VERIFY_10: case VERIFY_12: case VERIFY_16: diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 5b2694615f..c491ece1f2 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -238,8 +238,16 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) vhost_dummy_handle_output); if (err != NULL) { error_propagate(errp, err); - close(vhostfd); - return; + goto close_fd; + } + + error_setg(&s->migration_blocker, + "vhost-scsi does not support migration"); + migrate_add_blocker(s->migration_blocker, &err); + if (err) { + error_propagate(errp, err); + error_free(s->migration_blocker); + goto close_fd; } s->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues; @@ -252,7 +260,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) if (ret < 0) { error_setg(errp, "vhost-scsi: vhost initialization failed: %s", strerror(-ret)); - return; + goto free_vqs; } /* At present, channel and lun both are 0 for bootable vhost-scsi disk */ @@ -261,9 +269,14 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) /* Note: we can also get the minimum tpgt from kernel */ s->target = vs->conf.boot_tpgt; - error_setg(&s->migration_blocker, - "vhost-scsi does not support migration"); - migrate_add_blocker(s->migration_blocker); + return; + + free_vqs: + migrate_del_blocker(s->migration_blocker); + g_free(s->dev.vqs); + close_fd: + close(vhostfd); + return; } static void vhost_scsi_unrealize(DeviceState *dev, Error **errp) diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index a5ce7dea8e..75575461e2 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -1207,7 +1207,7 @@ static const VMStateDescription vmstate_pvscsi_pcie_device = { .name = "pvscsi/pcie", .needed = pvscsi_vmstate_need_pcie_device, .fields = (VMStateField[]) { - VMSTATE_PCIE_DEVICE(parent_obj, PVSCSIState), + VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState), VMSTATE_END_OF_LIST() } }; diff --git a/hw/timer/Makefile.objs b/hw/timer/Makefile.objs index c1e93a3924..71994f2d88 100644 --- a/hw/timer/Makefile.objs +++ b/hw/timer/Makefile.objs @@ -18,6 +18,7 @@ common-obj-$(CONFIG_IMX) += imx_gpt.o common-obj-$(CONFIG_LM32) += lm32_timer.o common-obj-$(CONFIG_MILKYMIST) += milkymist-sysctl.o +obj-$(CONFIG_ALTERA_TIMER) += altera_timer.o obj-$(CONFIG_EXYNOS4) += exynos4210_mct.o obj-$(CONFIG_EXYNOS4) += exynos4210_pwm.o obj-$(CONFIG_EXYNOS4) += exynos4210_rtc.o diff --git a/hw/timer/altera_timer.c b/hw/timer/altera_timer.c new file mode 100644 index 0000000000..6d4862661d --- /dev/null +++ b/hw/timer/altera_timer.c @@ -0,0 +1,237 @@ +/* + * QEMU model of the Altera timer. + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" + +#include "hw/sysbus.h" +#include "sysemu/sysemu.h" +#include "hw/ptimer.h" + +#define R_STATUS 0 +#define R_CONTROL 1 +#define R_PERIODL 2 +#define R_PERIODH 3 +#define R_SNAPL 4 +#define R_SNAPH 5 +#define R_MAX 6 + +#define STATUS_TO 0x0001 +#define STATUS_RUN 0x0002 + +#define CONTROL_ITO 0x0001 +#define CONTROL_CONT 0x0002 +#define CONTROL_START 0x0004 +#define CONTROL_STOP 0x0008 + +#define TYPE_ALTERA_TIMER "ALTR.timer" +#define ALTERA_TIMER(obj) \ + OBJECT_CHECK(AlteraTimer, (obj), TYPE_ALTERA_TIMER) + +typedef struct AlteraTimer { + SysBusDevice busdev; + MemoryRegion mmio; + qemu_irq irq; + uint32_t freq_hz; + QEMUBH *bh; + ptimer_state *ptimer; + uint32_t regs[R_MAX]; +} AlteraTimer; + +static int timer_irq_state(AlteraTimer *t) +{ + bool irq = (t->regs[R_STATUS] & STATUS_TO) && + (t->regs[R_CONTROL] & CONTROL_ITO); + return irq; +} + +static uint64_t timer_read(void *opaque, hwaddr addr, + unsigned int size) +{ + AlteraTimer *t = opaque; + uint64_t r = 0; + + addr >>= 2; + + switch (addr) { + case R_CONTROL: + r = t->regs[R_CONTROL] & (CONTROL_ITO | CONTROL_CONT); + break; + + default: + if (addr < ARRAY_SIZE(t->regs)) { + r = t->regs[addr]; + } + break; + } + + return r; +} + +static void timer_write(void *opaque, hwaddr addr, + uint64_t value, unsigned int size) +{ + AlteraTimer *t = opaque; + uint64_t tvalue; + uint32_t count = 0; + int irqState = timer_irq_state(t); + + addr >>= 2; + + switch (addr) { + case R_STATUS: + /* The timeout bit is cleared by writing the status register. */ + t->regs[R_STATUS] &= ~STATUS_TO; + break; + + case R_CONTROL: + t->regs[R_CONTROL] = value & (CONTROL_ITO | CONTROL_CONT); + if ((value & CONTROL_START) && + !(t->regs[R_STATUS] & STATUS_RUN)) { + ptimer_run(t->ptimer, 1); + t->regs[R_STATUS] |= STATUS_RUN; + } + if ((value & CONTROL_STOP) && (t->regs[R_STATUS] & STATUS_RUN)) { + ptimer_stop(t->ptimer); + t->regs[R_STATUS] &= ~STATUS_RUN; + } + break; + + case R_PERIODL: + case R_PERIODH: + t->regs[addr] = value & 0xFFFF; + if (t->regs[R_STATUS] & STATUS_RUN) { + ptimer_stop(t->ptimer); + t->regs[R_STATUS] &= ~STATUS_RUN; + } + tvalue = (t->regs[R_PERIODH] << 16) | t->regs[R_PERIODL]; + ptimer_set_limit(t->ptimer, tvalue + 1, 1); + break; + + case R_SNAPL: + case R_SNAPH: + count = ptimer_get_count(t->ptimer); + t->regs[R_SNAPL] = count & 0xFFFF; + t->regs[R_SNAPH] = count >> 16; + break; + + default: + break; + } + + if (irqState != timer_irq_state(t)) { + qemu_set_irq(t->irq, timer_irq_state(t)); + } +} + +static const MemoryRegionOps timer_ops = { + .read = timer_read, + .write = timer_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4 + } +}; + +static void timer_hit(void *opaque) +{ + AlteraTimer *t = opaque; + const uint64_t tvalue = (t->regs[R_PERIODH] << 16) | t->regs[R_PERIODL]; + + t->regs[R_STATUS] |= STATUS_TO; + + ptimer_set_limit(t->ptimer, tvalue + 1, 1); + + if (!(t->regs[R_CONTROL] & CONTROL_CONT)) { + t->regs[R_STATUS] &= ~STATUS_RUN; + ptimer_set_count(t->ptimer, tvalue); + } else { + ptimer_run(t->ptimer, 1); + } + + qemu_set_irq(t->irq, timer_irq_state(t)); +} + +static void altera_timer_realize(DeviceState *dev, Error **errp) +{ + AlteraTimer *t = ALTERA_TIMER(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + if (t->freq_hz == 0) { + error_setg(errp, "\"clock-frequency\" property must be provided."); + return; + } + + t->bh = qemu_bh_new(timer_hit, t); + t->ptimer = ptimer_init(t->bh, PTIMER_POLICY_DEFAULT); + ptimer_set_freq(t->ptimer, t->freq_hz); + + memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t, + TYPE_ALTERA_TIMER, R_MAX * sizeof(uint32_t)); + sysbus_init_mmio(sbd, &t->mmio); +} + +static void altera_timer_init(Object *obj) +{ + AlteraTimer *t = ALTERA_TIMER(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + sysbus_init_irq(sbd, &t->irq); +} + +static void altera_timer_reset(DeviceState *dev) +{ + AlteraTimer *t = ALTERA_TIMER(dev); + + ptimer_stop(t->ptimer); + ptimer_set_limit(t->ptimer, 0xffffffff, 1); + memset(t->regs, 0, ARRAY_SIZE(t->regs)); +} + +static Property altera_timer_properties[] = { + DEFINE_PROP_UINT32("clock-frequency", AlteraTimer, freq_hz, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void altera_timer_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = altera_timer_realize; + dc->props = altera_timer_properties; + dc->reset = altera_timer_reset; +} + +static const TypeInfo altera_timer_info = { + .name = TYPE_ALTERA_TIMER, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AlteraTimer), + .instance_init = altera_timer_init, + .class_init = altera_timer_class_init, +}; + +static void altera_timer_register(void) +{ + type_register_static(&altera_timer_info); +} + +type_init(altera_timer_register) diff --git a/hw/timer/twl92230.c b/hw/timer/twl92230.c index b8d914e49b..c0aa8ae3de 100644 --- a/hw/timer/twl92230.c +++ b/hw/timer/twl92230.c @@ -749,17 +749,21 @@ static int menelaus_rx(I2CSlave *i2c) Or we broke compatibility in the state, or we can't use struct tm */ -static int get_int32_as_uint16(QEMUFile *f, void *pv, size_t size) +static int get_int32_as_uint16(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { int *v = pv; *v = qemu_get_be16(f); return 0; } -static void put_int32_as_uint16(QEMUFile *f, void *pv, size_t size) +static int put_int32_as_uint16(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { int *v = pv; qemu_put_be16(f, *v); + + return 0; } static const VMStateInfo vmstate_hack_int32_as_uint16 = { diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 25913ad488..1dcc35c8f8 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -8,6 +8,7 @@ #include "monitor/monitor.h" #include "trace.h" #include "qemu/cutils.h" +#include "migration/migration.h" static void usb_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent); @@ -686,6 +687,8 @@ USBDevice *usbdevice_create(const char *cmdline) const char *params; int len; USBDevice *dev; + ObjectClass *klass; + DeviceClass *dc; params = strchr(cmdline,':'); if (params) { @@ -720,6 +723,22 @@ USBDevice *usbdevice_create(const char *cmdline) return NULL; } + klass = object_class_by_name(f->name); + if (klass == NULL) { + error_report("Device '%s' not found", f->name); + return NULL; + } + + dc = DEVICE_CLASS(klass); + + if (only_migratable) { + if (dc->vmsd->unmigratable) { + error_report("Device %s is not migratable, but --only-migratable " + "was specified", f->name); + return NULL; + } + } + if (f->usbdevice_init) { dev = f->usbdevice_init(bus, params); } else { diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c index 9cb0f50750..94c2e94f10 100644 --- a/hw/usb/dev-mtp.c +++ b/hw/usb/dev-mtp.c @@ -1093,7 +1093,7 @@ static MTPData *usb_mtp_get_object_prop_value(MTPState *s, MTPControl *c, } break; case PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER: - /* Should be persistant between sessions, + /* Should be persistent between sessions, * but using our objedt ID is "good enough" * for now */ usb_mtp_add_u64(d, 0x0000000000000000); @@ -1580,6 +1580,8 @@ static void usb_mtp_class_initfn(ObjectClass *klass, void *data) uc->handle_reset = usb_mtp_handle_reset; uc->handle_control = usb_mtp_handle_control; uc->handle_data = usb_mtp_handle_data; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->desc = "USB Media Transfer Protocol device"; dc->fw_name = "mtp"; dc->vmsd = &vmstate_usb_mtp; dc->props = mtp_properties; diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index 4acf0c6dd8..e0b516987f 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -3894,7 +3894,7 @@ static const VMStateDescription vmstate_xhci = { .version_id = 1, .post_load = usb_xhci_post_load, .fields = (VMStateField[]) { - VMSTATE_PCIE_DEVICE(parent_obj, XHCIState), + VMSTATE_PCI_DEVICE(parent_obj, XHCIState), VMSTATE_MSIX(parent_obj, XHCIState), VMSTATE_STRUCT_VARRAY_UINT32(ports, XHCIState, numports, 1, diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index a65723781e..4a0ebbfb32 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -2165,7 +2165,8 @@ static int usbredir_post_load(void *priv, int version_id) } /* For usbredirparser migration */ -static void usbredir_put_parser(QEMUFile *f, void *priv, size_t unused) +static int usbredir_put_parser(QEMUFile *f, void *priv, size_t unused, + VMStateField *field, QJSON *vmdesc) { USBRedirDevice *dev = priv; uint8_t *data; @@ -2173,7 +2174,7 @@ static void usbredir_put_parser(QEMUFile *f, void *priv, size_t unused) if (dev->parser == NULL) { qemu_put_be32(f, 0); - return; + return 0; } usbredirparser_serialize(dev->parser, &data, &len); @@ -2183,9 +2184,12 @@ static void usbredir_put_parser(QEMUFile *f, void *priv, size_t unused) qemu_put_buffer(f, data, len); free(data); + + return 0; } -static int usbredir_get_parser(QEMUFile *f, void *priv, size_t unused) +static int usbredir_get_parser(QEMUFile *f, void *priv, size_t unused, + VMStateField *field) { USBRedirDevice *dev = priv; uint8_t *data; @@ -2228,7 +2232,8 @@ static const VMStateInfo usbredir_parser_vmstate_info = { /* For buffered packets (iso/irq) queue migration */ -static void usbredir_put_bufpq(QEMUFile *f, void *priv, size_t unused) +static int usbredir_put_bufpq(QEMUFile *f, void *priv, size_t unused, + VMStateField *field, QJSON *vmdesc) { struct endp_data *endp = priv; USBRedirDevice *dev = endp->dev; @@ -2246,9 +2251,12 @@ static void usbredir_put_bufpq(QEMUFile *f, void *priv, size_t unused) i++; } assert(i == endp->bufpq_size); + + return 0; } -static int usbredir_get_bufpq(QEMUFile *f, void *priv, size_t unused) +static int usbredir_get_bufpq(QEMUFile *f, void *priv, size_t unused, + VMStateField *field) { struct endp_data *endp = priv; USBRedirDevice *dev = endp->dev; @@ -2351,7 +2359,8 @@ static const VMStateDescription usbredir_ep_vmstate = { /* For PacketIdQueue migration */ -static void usbredir_put_packet_id_q(QEMUFile *f, void *priv, size_t unused) +static int usbredir_put_packet_id_q(QEMUFile *f, void *priv, size_t unused, + VMStateField *field, QJSON *vmdesc) { struct PacketIdQueue *q = priv; USBRedirDevice *dev = q->dev; @@ -2365,9 +2374,12 @@ static void usbredir_put_packet_id_q(QEMUFile *f, void *priv, size_t unused) remain--; } assert(remain == 0); + + return 0; } -static int usbredir_get_packet_id_q(QEMUFile *f, void *priv, size_t unused) +static int usbredir_get_packet_id_q(QEMUFile *f, void *priv, size_t unused, + VMStateField *field) { struct PacketIdQueue *q = priv; USBRedirDevice *dev = q->dev; diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index 811eecd1b4..6c771f778b 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -1171,7 +1171,7 @@ static int vfio_pci_igd_host_init(VFIOPCIDevice *vdev, * IGD LPC/ISA bridge support code. The vBIOS needs this, but we can't write * arbitrary values into just any bridge, so we must create our own. We try * to handle if the user has created it for us, which they might want to do - * to enable multifuction so we don't occupy the whole PCI slot. + * to enable multifunction so we don't occupy the whole PCI slot. */ static void vfio_pci_igd_lpc_bridge_realize(PCIDevice *pdev, Error **errp) { diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index d7dbe0e3e0..882d3a91b6 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -1881,8 +1881,8 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev) * 0 is reserved for this since absence of capabilities is indicated by * 0 for the ID, version, AND next pointer. However, pcie_add_capability() * uses ID 0 as reserved for list management and will incorrectly match and - * assert if we attempt to pre-load the head of the chain with with this - * ID. Use ID 0xFFFF temporarily since it is also seems to be reserved in + * assert if we attempt to pre-load the head of the chain with this ID. + * Use ID 0xFFFF temporarily since it is also seems to be reserved in * part for identifying absence of capabilities in a root complex register * block. If the ID still exists after adding capabilities, switch back to * zero. We'll mark this entire first dword as emulated for this purpose. diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 9cacf557f2..b124d97d7c 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1176,6 +1176,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, { uint64_t features; int i, r, n_initialized_vqs = 0; + Error *local_err = NULL; hdev->vdev = NULL; hdev->migration_blocker = NULL; @@ -1256,7 +1257,12 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, } if (hdev->migration_blocker != NULL) { - migrate_add_blocker(hdev->migration_blocker); + r = migrate_add_blocker(hdev->migration_blocker, &local_err); + if (local_err) { + error_report_err(local_err); + error_free(hdev->migration_blocker); + goto fail_busyloop; + } } hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 296472fc6e..0353eb6d5d 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -786,7 +786,7 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1); if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) { error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " - "must be a postive integer less than %d.", + "must be a positive integer less than %d.", vcrypto->max_queues, VIRTIO_QUEUE_MAX); return; } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 09230c05df..b5af2a00f3 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -108,7 +108,8 @@ static bool virtio_pci_has_extra_state(DeviceState *d) return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; } -static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size) +static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { VirtIOPCIProxy *proxy = pv; int i; @@ -137,7 +138,8 @@ static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq, qemu_put_be32(f, vq->used[1]); } -static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size) +static int put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { VirtIOPCIProxy *proxy = pv; int i; @@ -149,6 +151,8 @@ static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size) for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { virtio_pci_save_modern_queue_state(&proxy->vqs[i], f); } + + return 0; } static const VMStateInfo vmstate_info_virtio_pci_modern_state = { diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index cc17b97899..f292a53940 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -1555,7 +1555,8 @@ static const VMStateDescription vmstate_virtio_ringsize = { } }; -static int get_extra_state(QEMUFile *f, void *pv, size_t size) +static int get_extra_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { VirtIODevice *vdev = pv; BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); @@ -1568,13 +1569,15 @@ static int get_extra_state(QEMUFile *f, void *pv, size_t size) } } -static void put_extra_state(QEMUFile *f, void *pv, size_t size) +static int put_extra_state(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { VirtIODevice *vdev = pv; BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); k->save_extra_state(qbus->parent, f); + return 0; } static const VMStateInfo vmstate_info_extra_state = { @@ -1709,13 +1712,17 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f) } /* A wrapper for use as a VMState .put function */ -static void virtio_device_put(QEMUFile *f, void *opaque, size_t size) +static int virtio_device_put(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) { virtio_save(VIRTIO_DEVICE(opaque), f); + + return 0; } /* A wrapper for use as a VMState .get function */ -static int virtio_device_get(QEMUFile *f, void *opaque, size_t size) +static int virtio_device_get(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) { VirtIODevice *vdev = VIRTIO_DEVICE(opaque); DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev)); diff --git a/hw/xtensa/pic_cpu.c b/hw/xtensa/pic_cpu.c index 2bed64f15b..0e812d7f06 100644 --- a/hw/xtensa/pic_cpu.c +++ b/hw/xtensa/pic_cpu.c @@ -31,22 +31,6 @@ #include "qemu/log.h" #include "qemu/timer.h" -void xtensa_advance_ccount(CPUXtensaState *env, uint32_t d) -{ - uint32_t old_ccount = env->sregs[CCOUNT] + 1; - - env->sregs[CCOUNT] += d; - - if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) { - int i; - for (i = 0; i < env->config->nccompare; ++i) { - if (env->sregs[CCOMPARE + i] - old_ccount < d) { - xtensa_timer_irq(env, i, 1); - } - } - } -} - void check_interrupts(CPUXtensaState *env) { CPUState *cs = CPU(xtensa_env_get_cpu(env)); @@ -54,17 +38,6 @@ void check_interrupts(CPUXtensaState *env) uint32_t int_set_enabled = env->sregs[INTSET] & env->sregs[INTENABLE]; int level; - /* If the CPU is halted advance CCOUNT according to the QEMU_CLOCK_VIRTUAL time - * elapsed since the moment when it was advanced last time. - */ - if (cs->halted) { - int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - - xtensa_advance_ccount(env, - muldiv64(now - env->halt_clock, - env->config->clock_freq_khz, 1000000)); - env->halt_clock = now; - } for (level = env->config->nlevel; level > minlevel; --level) { if (env->config->level_mask[level] & int_set_enabled) { env->pending_irq_level = level; @@ -109,49 +82,29 @@ void xtensa_timer_irq(CPUXtensaState *env, uint32_t id, uint32_t active) qemu_set_irq(env->irq_inputs[env->config->timerint[id]], active); } -void xtensa_rearm_ccompare_timer(CPUXtensaState *env) -{ - int i; - uint32_t wake_ccount = env->sregs[CCOUNT] - 1; - - for (i = 0; i < env->config->nccompare; ++i) { - if (env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] < - wake_ccount - env->sregs[CCOUNT]) { - wake_ccount = env->sregs[CCOMPARE + i]; - } - } - env->wake_ccount = wake_ccount; - timer_mod(env->ccompare_timer, env->halt_clock + - (uint64_t)(wake_ccount - env->sregs[CCOUNT]) * - 1000000 / env->config->clock_freq_khz); -} - static void xtensa_ccompare_cb(void *opaque) { - XtensaCPU *cpu = opaque; - CPUXtensaState *env = &cpu->env; - CPUState *cs = CPU(cpu); + XtensaCcompareTimer *ccompare = opaque; + CPUXtensaState *env = ccompare->env; + unsigned i = ccompare - env->ccompare; - if (cs->halted) { - env->halt_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); - xtensa_advance_ccount(env, env->wake_ccount - env->sregs[CCOUNT]); - if (!cpu_has_work(cs)) { - env->sregs[CCOUNT] = env->wake_ccount + 1; - xtensa_rearm_ccompare_timer(env); - } - } + xtensa_timer_irq(env, i, 1); } void xtensa_irq_init(CPUXtensaState *env) { - XtensaCPU *cpu = xtensa_env_get_cpu(env); - env->irq_inputs = (void **)qemu_allocate_irqs( xtensa_set_irq, env, env->config->ninterrupt); - if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT) && - env->config->nccompare > 0) { - env->ccompare_timer = - timer_new_ns(QEMU_CLOCK_VIRTUAL, &xtensa_ccompare_cb, cpu); + if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) { + unsigned i; + + env->time_base = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + env->ccount_base = env->sregs[CCOUNT]; + for (i = 0; i < env->config->nccompare; ++i) { + env->ccompare[i].env = env; + env->ccompare[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + xtensa_ccompare_cb, env->ccompare + i); + } } } diff --git a/include/disas/bfd.h b/include/disas/bfd.h index 8a3488c2c5..0435b8c9f9 100644 --- a/include/disas/bfd.h +++ b/include/disas/bfd.h @@ -222,6 +222,10 @@ enum bfd_architecture bfd_arch_ia64, /* HP/Intel ia64 */ #define bfd_mach_ia64_elf64 64 #define bfd_mach_ia64_elf32 32 + bfd_arch_nios2, /* Nios II */ +#define bfd_mach_nios2 0 +#define bfd_mach_nios2r1 1 +#define bfd_mach_nios2r2 2 bfd_arch_lm32, /* Lattice Mico32 */ #define bfd_mach_lm32 1 bfd_arch_last @@ -415,6 +419,8 @@ int print_insn_crisv10 (bfd_vma, disassemble_info*); int print_insn_microblaze (bfd_vma, disassemble_info*); int print_insn_ia64 (bfd_vma, disassemble_info*); int print_insn_lm32 (bfd_vma, disassemble_info*); +int print_insn_big_nios2 (bfd_vma, disassemble_info*); +int print_insn_little_nios2 (bfd_vma, disassemble_info*); #if 0 /* Fetch the disassembler for a given BFD, if that support is available. */ diff --git a/include/elf.h b/include/elf.h index 1c2975dc82..0dbd3e968b 100644 --- a/include/elf.h +++ b/include/elf.h @@ -126,6 +126,8 @@ typedef int64_t Elf64_Sxword; */ #define EM_S390_OLD 0xA390 +#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ + #define EM_MICROBLAZE 189 #define EM_MICROBLAZE_OLD 0xBAAB diff --git a/include/glib-compat.h b/include/glib-compat.h index acf254d2a0..0cd24ffbe9 100644 --- a/include/glib-compat.h +++ b/include/glib-compat.h @@ -39,7 +39,7 @@ static inline gint64 qemu_g_get_monotonic_time(void) #define g_get_monotonic_time() qemu_g_get_monotonic_time() #endif -#ifdef _WIN32 +#if defined(_WIN32) && !GLIB_CHECK_VERSION(2, 50, 0) /* * g_poll has a problem on Windows when using * timeouts < 10ms, so use wrapper. diff --git a/include/hw/dma/xlnx_dpdma.h b/include/hw/dma/xlnx_dpdma.h index 664df28ae6..7a304a5bb4 100644 --- a/include/hw/dma/xlnx_dpdma.h +++ b/include/hw/dma/xlnx_dpdma.h @@ -53,7 +53,8 @@ typedef struct XlnxDPDMAState XlnxDPDMAState; * data to the buffer specified by * dpdma_set_host_data_location(). * - * Returns The number of bytes transfered by the DPDMA or 0 if an error occured. + * Returns The number of bytes transferred by the DPDMA + * or 0 if an error occurred. * * @s The DPDMA state. * @channel The channel to start. diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h index 94486fdd37..53b6760c16 100644 --- a/include/hw/pci-host/q35.h +++ b/include/hw/pci-host/q35.h @@ -180,7 +180,7 @@ typedef struct Q35PCIHost { uint64_t mch_mcfg_base(void); /* - * Arbitary but unique BNF number for IOAPIC device. + * Arbitrary but unique BNF number for IOAPIC device. * * TODO: make sure there would have no conflict with real PCI bus */ diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index b08451d2c5..163c5195b6 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -125,16 +125,6 @@ void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn); void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num); void pcie_ats_init(PCIDevice *dev, uint16_t offset); -extern const VMStateDescription vmstate_pcie_device; - -#define VMSTATE_PCIE_DEVICE(_field, _state) { \ - .name = (stringify(_field)), \ - .size = sizeof(PCIDevice), \ - .vmsd = &vmstate_pcie_device, \ - .flags = VMS_STRUCT, \ - .offset = vmstate_offset_value(_state, _field, PCIDevice), \ -} - void pcie_cap_slot_hotplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); void pcie_cap_slot_hot_unplug_request_cb(HotplugHandler *hotplug_dev, diff --git a/include/hw/register.h b/include/hw/register.h index 8c12233b75..5b6dc32091 100644 --- a/include/hw/register.h +++ b/include/hw/register.h @@ -92,7 +92,7 @@ struct RegisterInfo { * This structure is used to group all of the individual registers which are * modeled using the RegisterInfo structure. * - * @r is an aray containing of all the relevent RegisterInfo structures. + * @r is an array containing of all the relevant RegisterInfo structures. * * @num_elements is the number of elements in the array r * diff --git a/include/migration/migration.h b/include/migration/migration.h index c309d23370..af9135f0a7 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -38,6 +38,9 @@ #define QEMU_VM_COMMAND 0x08 #define QEMU_VM_SECTION_FOOTER 0x7e +/* for vl.c */ +extern int only_migratable; + struct MigrationParams { bool blk; bool shared; @@ -177,6 +180,9 @@ struct MigrationState /* Flag set once the migration thread is running (and needs joining) */ bool migration_thread_running; + /* Flag set once the migration thread called bdrv_inactivate_all */ + bool block_inactive; + /* Queue of outstanding page requests from the destination */ QemuMutex src_page_req_mutex; QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests; @@ -240,6 +246,7 @@ void remove_migration_state_change_notifier(Notifier *notify); MigrationState *migrate_init(const MigrationParams *params); bool migration_is_blocked(Error **errp); bool migration_in_setup(MigrationState *); +bool migration_is_idle(MigrationState *s); bool migration_has_finished(MigrationState *); bool migration_has_failed(MigrationState *); /* True if outgoing migration has entered postcopy phase */ @@ -284,8 +291,12 @@ int ram_postcopy_incoming_init(MigrationIncomingState *mis); * @migrate_add_blocker - prevent migration from proceeding * * @reason - an error to be returned whenever migration is attempted + * + * @errp - [out] The reason (if any) we cannot block migration right now. + * + * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set. */ -void migrate_add_blocker(Error *reason); +int migrate_add_blocker(Error *reason, Error **errp); /** * @migrate_del_blocker - remove a blocking error from migration diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 2125829a16..3bbe3ed984 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -81,11 +81,20 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque); typedef struct VMStateInfo VMStateInfo; typedef struct VMStateDescription VMStateDescription; - +typedef struct VMStateField VMStateField; + +/* VMStateInfo allows customized migration of objects that don't fit in + * any category in VMStateFlags. Additional information is always passed + * into get and put in terms of field and vmdesc parameters. However + * these two parameters should only be used in cases when customized + * handling is needed, such as QTAILQ. For primitive data types such as + * integer, field and vmdesc parameters should be ignored inside get/put. + */ struct VMStateInfo { const char *name; - int (*get)(QEMUFile *f, void *pv, size_t size); - void (*put)(QEMUFile *f, void *pv, size_t size); + int (*get)(QEMUFile *f, void *pv, size_t size, VMStateField *field); + int (*put)(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc); }; enum VMStateFlags { @@ -192,7 +201,7 @@ typedef enum { MIG_PRI_MAX, } MigrationPriority; -typedef struct { +struct VMStateField { const char *name; size_t offset; size_t size; @@ -205,7 +214,7 @@ typedef struct { const VMStateDescription *vmsd; int version_id; bool (*field_exists)(void *opaque, int version_id); -} VMStateField; +}; struct VMStateDescription { const char *name; @@ -251,6 +260,7 @@ extern const VMStateInfo vmstate_info_timer; extern const VMStateInfo vmstate_info_buffer; extern const VMStateInfo vmstate_info_unused_buffer; extern const VMStateInfo vmstate_info_bitmap; +extern const VMStateInfo vmstate_info_qtailq; #define type_check_2darray(t1,t2,n,m) ((t1(*)[n][m])0 - (t2*)0) #define type_check_array(t1,t2,n) ((t1(*)[n])0 - (t2*)0) @@ -662,6 +672,25 @@ extern const VMStateInfo vmstate_info_bitmap; .offset = offsetof(_state, _field), \ } +/* For migrating a QTAILQ. + * Target QTAILQ needs be properly initialized. + * _type: type of QTAILQ element + * _next: name of QTAILQ entry field in QTAILQ element + * _vmsd: VMSD for QTAILQ element + * size: size of QTAILQ element + * start: offset of QTAILQ entry in QTAILQ element + */ +#define VMSTATE_QTAILQ_V(_field, _state, _version, _vmsd, _type, _next) \ +{ \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .vmsd = &(_vmsd), \ + .size = sizeof(_type), \ + .info = &vmstate_info_qtailq, \ + .offset = offsetof(_state, _field), \ + .start = offsetof(_type, _next), \ +} + /* _f : field name _f_n : num of elements field_name _n : num of elements diff --git a/include/qapi/dealloc-visitor.h b/include/qapi/dealloc-visitor.h index b3e5c85fd8..c36715fdf3 100644 --- a/include/qapi/dealloc-visitor.h +++ b/include/qapi/dealloc-visitor.h @@ -19,7 +19,7 @@ typedef struct QapiDeallocVisitor QapiDeallocVisitor; /* - * The dealloc visitor is primarly used only by generated + * The dealloc visitor is primarily used only by generated * qapi_free_FOO() functions, and is the only visitor designed to work * correctly in the face of a partially-constructed QAPI tree. */ diff --git a/include/qemu/qht.h b/include/qemu/qht.h index 311139b85a..56c2c7784c 100644 --- a/include/qemu/qht.h +++ b/include/qemu/qht.h @@ -72,7 +72,7 @@ void qht_destroy(struct qht *ht); * In case of successful operation, smp_wmb() is implied before the pointer is * inserted into the hash table. * - * Returns true on sucess. + * Returns true on success. * Returns false if the @p-@hash pair already exists in the hash table. */ bool qht_insert(struct qht *ht, void *p, uint32_t hash); diff --git a/include/qemu/queue.h b/include/qemu/queue.h index 342073fb4d..35292c3155 100644 --- a/include/qemu/queue.h +++ b/include/qemu/queue.h @@ -438,4 +438,64 @@ struct { \ #define QTAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) +#define field_at_offset(base, offset, type) \ + ((type) (((char *) (base)) + (offset))) + +typedef struct DUMMY_Q_ENTRY DUMMY_Q_ENTRY; +typedef struct DUMMY_Q DUMMY_Q; + +struct DUMMY_Q_ENTRY { + QTAILQ_ENTRY(DUMMY_Q_ENTRY) next; +}; + +struct DUMMY_Q { + QTAILQ_HEAD(DUMMY_Q_HEAD, DUMMY_Q_ENTRY) head; +}; + +#define dummy_q ((DUMMY_Q *) 0) +#define dummy_qe ((DUMMY_Q_ENTRY *) 0) + +/* + * Offsets of layout of a tail queue head. + */ +#define QTAILQ_FIRST_OFFSET (offsetof(typeof(dummy_q->head), tqh_first)) +#define QTAILQ_LAST_OFFSET (offsetof(typeof(dummy_q->head), tqh_last)) +/* + * Raw access of elements of a tail queue + */ +#define QTAILQ_RAW_FIRST(head) \ + (*field_at_offset(head, QTAILQ_FIRST_OFFSET, void **)) +#define QTAILQ_RAW_TQH_LAST(head) \ + (*field_at_offset(head, QTAILQ_LAST_OFFSET, void ***)) + +/* + * Offsets of layout of a tail queue element. + */ +#define QTAILQ_NEXT_OFFSET (offsetof(typeof(dummy_qe->next), tqe_next)) +#define QTAILQ_PREV_OFFSET (offsetof(typeof(dummy_qe->next), tqe_prev)) + +/* + * Raw access of elements of a tail entry + */ +#define QTAILQ_RAW_NEXT(elm, entry) \ + (*field_at_offset(elm, entry + QTAILQ_NEXT_OFFSET, void **)) +#define QTAILQ_RAW_TQE_PREV(elm, entry) \ + (*field_at_offset(elm, entry + QTAILQ_PREV_OFFSET, void ***)) +/* + * Tail queue tranversal using pointer arithmetic. + */ +#define QTAILQ_RAW_FOREACH(elm, head, entry) \ + for ((elm) = QTAILQ_RAW_FIRST(head); \ + (elm); \ + (elm) = QTAILQ_RAW_NEXT(elm, entry)) +/* + * Tail queue insertion using pointer arithmetic. + */ +#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \ + QTAILQ_RAW_NEXT(elm, entry) = NULL; \ + QTAILQ_RAW_TQE_PREV(elm, entry) = QTAILQ_RAW_TQH_LAST(head); \ + *QTAILQ_RAW_TQH_LAST(head) = (elm); \ + QTAILQ_RAW_TQH_LAST(head) = &QTAILQ_RAW_NEXT(elm, entry); \ +} while (/*CONSTCOND*/0) + #endif /* QEMU_SYS_QUEUE_H */ diff --git a/include/qemu/xattr.h b/include/qemu/xattr.h index 83cf98cbd8..a83fe8e749 100644 --- a/include/qemu/xattr.h +++ b/include/qemu/xattr.h @@ -14,7 +14,7 @@ #define QEMU_XATTR_H /* - * Modern distributions (e.g. Fedora 15, have no libattr.so, place attr.h + * Modern distributions (e.g. Fedora 15), have no libattr.so, place attr.h * in /usr/include/sys, and don't have ENOATTR. */ diff --git a/include/qom/object.h b/include/qom/object.h index 5ecc2d166d..cd0f412ce9 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -432,7 +432,7 @@ struct Object * @class_base_init: This function is called for all base classes after all * parent class initialization has occurred, but before the class itself * is initialized. This is the function to use to undo the effects of - * memcpy from the parent class to the descendents. + * memcpy from the parent class to the descendants. * @class_finalize: This function is called during class destruction and is * meant to release and dynamic parameters allocated by @class_init. * @class_data: Data to pass to the @class_init, @class_base_init and @@ -587,18 +587,6 @@ struct InterfaceClass Object *object_new(const char *typename); /** - * object_new_with_type: - * @type: The type of the object to instantiate. - * - * This function will initialize a new object using heap allocated memory. - * The returned object has a reference count of 1, and will be freed when - * the last reference is dropped. - * - * Returns: The newly allocated and instantiated object. - */ -Object *object_new_with_type(Type type); - -/** * object_new_with_props: * @typename: The name of the type of the object to instantiate. * @parent: the parent object @@ -727,18 +715,6 @@ int object_set_propv(Object *obj, va_list vargs); /** - * object_initialize_with_type: - * @data: A pointer to the memory to be used for the object. - * @size: The maximum size available at @data for the object. - * @type: The type of the object to instantiate. - * - * This function will initialize an object. The memory for the object should - * have already been allocated. The returned object has a reference count of 1, - * and will be finalized when the last reference is dropped. - */ -void object_initialize_with_type(void *data, size_t size, Type type); - -/** * object_initialize: * @obj: A pointer to the memory to be used for the object. * @size: The maximum size available at @obj for the object. diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h index 20b01e3004..2bf16b203c 100644 --- a/include/sysemu/arch_init.h +++ b/include/sysemu/arch_init.h @@ -23,6 +23,7 @@ enum { QEMU_ARCH_UNICORE32 = (1 << 14), QEMU_ARCH_MOXIE = (1 << 15), QEMU_ARCH_TRICORE = (1 << 16), + QEMU_ARCH_NIOS2 = (1 << 17), }; extern const uint32_t arch_type; diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 51794bbb45..c66cbbe84b 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -967,6 +967,63 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env #endif /* TARGET_MICROBLAZE */ +#ifdef TARGET_NIOS2 + +#define ELF_START_MMAP 0x80000000 + +#define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2) + +#define ELF_CLASS ELFCLASS32 +#define ELF_ARCH EM_ALTERA_NIOS2 + +static void init_thread(struct target_pt_regs *regs, struct image_info *infop) +{ + regs->ea = infop->entry; + regs->sp = infop->start_stack; + regs->estatus = 0x3; +} + +#define ELF_EXEC_PAGESIZE 4096 + +#define USE_ELF_CORE_DUMP +#define ELF_NREG 49 +typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; + +/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ +static void elf_core_copy_regs(target_elf_gregset_t *regs, + const CPUNios2State *env) +{ + int i; + + (*regs)[0] = -1; + for (i = 1; i < 8; i++) /* r0-r7 */ + (*regs)[i] = tswapreg(env->regs[i + 7]); + + for (i = 8; i < 16; i++) /* r8-r15 */ + (*regs)[i] = tswapreg(env->regs[i - 8]); + + for (i = 16; i < 24; i++) /* r16-r23 */ + (*regs)[i] = tswapreg(env->regs[i + 7]); + (*regs)[24] = -1; /* R_ET */ + (*regs)[25] = -1; /* R_BT */ + (*regs)[26] = tswapreg(env->regs[R_GP]); + (*regs)[27] = tswapreg(env->regs[R_SP]); + (*regs)[28] = tswapreg(env->regs[R_FP]); + (*regs)[29] = tswapreg(env->regs[R_EA]); + (*regs)[30] = -1; /* R_SSTATUS */ + (*regs)[31] = tswapreg(env->regs[R_RA]); + + (*regs)[32] = tswapreg(env->regs[R_PC]); + + (*regs)[33] = -1; /* R_STATUS */ + (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]); + + for (i = 35; i < 49; i++) /* ... */ + (*regs)[i] = -1; +} + +#endif /* TARGET_NIOS2 */ + #ifdef TARGET_OPENRISC #define ELF_START_MMAP 0x08000000 diff --git a/linux-user/main.c b/linux-user/main.c index db4eb682a2..f5c85574f9 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -68,8 +68,11 @@ do { \ * This way we will never overlap with our own libraries or binaries or stack * or anything else that QEMU maps. */ -# ifdef TARGET_MIPS -/* MIPS only supports 31 bits of virtual address space for user space */ +# if defined(TARGET_MIPS) || defined(TARGET_NIOS2) +/* + * MIPS only supports 31 bits of virtual address space for user space. + * Nios2 also only supports 31 bits. + */ unsigned long reserved_va = 0x77000000; # else unsigned long reserved_va = 0xf7000000; @@ -2462,6 +2465,109 @@ error: } #endif +#ifdef TARGET_NIOS2 + +void cpu_loop(CPUNios2State *env) +{ + CPUState *cs = ENV_GET_CPU(env); + Nios2CPU *cpu = NIOS2_CPU(cs); + target_siginfo_t info; + int trapnr, gdbsig, ret; + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_exec(cs); + cpu_exec_end(cs); + gdbsig = 0; + + switch (trapnr) { + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_TRAP: + if (env->regs[R_AT] == 0) { + abi_long ret; + qemu_log_mask(CPU_LOG_INT, "\nSyscall\n"); + + ret = do_syscall(env, env->regs[2], + env->regs[4], env->regs[5], env->regs[6], + env->regs[7], env->regs[8], env->regs[9], + 0, 0); + + if (env->regs[2] == 0) { /* FIXME: syscall 0 workaround */ + ret = 0; + } + + env->regs[2] = abs(ret); + /* Return value is 0..4096 */ + env->regs[7] = (ret > 0xfffffffffffff000ULL); + env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; + env->regs[CR_STATUS] &= ~0x3; + env->regs[R_EA] = env->regs[R_PC] + 4; + env->regs[R_PC] += 4; + break; + } else { + qemu_log_mask(CPU_LOG_INT, "\nTrap\n"); + + env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; + env->regs[CR_STATUS] &= ~0x3; + env->regs[R_EA] = env->regs[R_PC] + 4; + env->regs[R_PC] = cpu->exception_addr; + + gdbsig = TARGET_SIGTRAP; + break; + } + case 0xaa: + switch (env->regs[R_PC]) { + /*case 0x1000:*/ /* TODO:__kuser_helper_version */ + case 0x1004: /* __kuser_cmpxchg */ + start_exclusive(); + if (env->regs[4] & 0x3) { + goto kuser_fail; + } + ret = get_user_u32(env->regs[2], env->regs[4]); + if (ret) { + end_exclusive(); + goto kuser_fail; + } + env->regs[2] -= env->regs[5]; + if (env->regs[2] == 0) { + put_user_u32(env->regs[6], env->regs[4]); + } + end_exclusive(); + env->regs[R_PC] = env->regs[R_RA]; + break; + /*case 0x1040:*/ /* TODO:__kuser_sigtramp */ + default: + ; +kuser_fail: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* TODO: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->regs[R_PC]; + queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + } + break; + default: + EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n", + trapnr); + gdbsig = TARGET_SIGILL; + break; + } + if (gdbsig) { + gdb_handlesig(cs, gdbsig); + if (gdbsig != TARGET_SIGTRAP) { + exit(EXIT_FAILURE); + } + } + + process_pending_signals(env); + } +} + +#endif /* TARGET_NIOS2 */ + #ifdef TARGET_OPENRISC void cpu_loop(CPUOpenRISCState *env) @@ -4632,6 +4738,36 @@ int main(int argc, char **argv, char **envp) restore_snan_bit_mode(env); } } +#elif defined(TARGET_NIOS2) + { + env->regs[0] = 0; + env->regs[1] = regs->r1; + env->regs[2] = regs->r2; + env->regs[3] = regs->r3; + env->regs[4] = regs->r4; + env->regs[5] = regs->r5; + env->regs[6] = regs->r6; + env->regs[7] = regs->r7; + env->regs[8] = regs->r8; + env->regs[9] = regs->r9; + env->regs[10] = regs->r10; + env->regs[11] = regs->r11; + env->regs[12] = regs->r12; + env->regs[13] = regs->r13; + env->regs[14] = regs->r14; + env->regs[15] = regs->r15; + /* TODO: unsigned long orig_r2; */ + env->regs[R_RA] = regs->ra; + env->regs[R_FP] = regs->fp; + env->regs[R_SP] = regs->sp; + env->regs[R_GP] = regs->gp; + env->regs[CR_ESTATUS] = regs->estatus; + env->regs[R_EA] = regs->ea; + /* TODO: unsigned long orig_r7; */ + + /* Emulate eret when starting thread. */ + env->regs[R_PC] = regs->ea; + } #elif defined(TARGET_OPENRISC) { int i; diff --git a/linux-user/nios2/syscall_nr.h b/linux-user/nios2/syscall_nr.h new file mode 100644 index 0000000000..8b46763673 --- /dev/null +++ b/linux-user/nios2/syscall_nr.h @@ -0,0 +1,329 @@ +#define TARGET_NR_io_setup 0 +#define TARGET_NR_io_destroy 1 +#define TARGET_NR_io_submit 2 +#define TARGET_NR_io_cancel 3 +#define TARGET_NR_io_getevents 4 +#define TARGET_NR_setxattr 5 +#define TARGET_NR_lsetxattr 6 +#define TARGET_NR_fsetxattr 7 +#define TARGET_NR_getxattr 8 +#define TARGET_NR_lgetxattr 9 +#define TARGET_NR_fgetxattr 10 +#define TARGET_NR_listxattr 11 +#define TARGET_NR_llistxattr 12 +#define TARGET_NR_flistxattr 13 +#define TARGET_NR_removexattr 14 +#define TARGET_NR_lremovexattr 15 +#define TARGET_NR_fremovexattr 16 +#define TARGET_NR_getcwd 17 +#define TARGET_NR_lookup_dcookie 18 +#define TARGET_NR_eventfd2 19 +#define TARGET_NR_epoll_create1 20 +#define TARGET_NR_epoll_ctl 21 +#define TARGET_NR_epoll_pwait 22 +#define TARGET_NR_dup 23 +#define TARGET_NR_dup3 24 +#define TARGET_NR_fcntl64 25 +#define TARGET_NR_inotify_init1 26 +#define TARGET_NR_inotify_add_watch 27 +#define TARGET_NR_inotify_rm_watch 28 +#define TARGET_NR_ioctl 29 +#define TARGET_NR_ioprio_set 30 +#define TARGET_NR_ioprio_get 31 +#define TARGET_NR_flock 32 +#define TARGET_NR_mknodat 33 +#define TARGET_NR_mkdirat 34 +#define TARGET_NR_unlinkat 35 +#define TARGET_NR_symlinkat 36 +#define TARGET_NR_linkat 37 +#define TARGET_NR_renameat 38 +#define TARGET_NR_umount2 39 +#define TARGET_NR_mount 40 +#define TARGET_NR_pivot_root 41 +#define TARGET_NR_nfsservctl 42 +#define TARGET_NR_statfs64 43 +#define TARGET_NR_fstatfs64 44 +#define TARGET_NR_truncate64 45 +#define TARGET_NR_ftruncate64 46 +#define TARGET_NR_fallocate 47 +#define TARGET_NR_faccessat 48 +#define TARGET_NR_chdir 49 +#define TARGET_NR_fchdir 50 +#define TARGET_NR_chroot 51 +#define TARGET_NR_fchmod 52 +#define TARGET_NR_fchmodat 53 +#define TARGET_NR_fchownat 54 +#define TARGET_NR_fchown 55 +#define TARGET_NR_openat 56 +#define TARGET_NR_close 57 +#define TARGET_NR_vhangup 58 +#define TARGET_NR_pipe2 59 +#define TARGET_NR_quotactl 60 +#define TARGET_NR_getdents64 61 +#define TARGET_NR_read 63 +#define TARGET_NR_write 64 +#define TARGET_NR_readv 65 +#define TARGET_NR_writev 66 +#define TARGET_NR_pread64 67 +#define TARGET_NR_pwrite64 68 +#define TARGET_NR_preadv 69 +#define TARGET_NR_pwritev 70 +#define TARGET_NR_sendfile64 71 +#define TARGET_NR_pselect6 72 +#define TARGET_NR_ppoll 73 +#define TARGET_NR_signalfd4 74 +#define TARGET_NR_vmsplice 75 +#define TARGET_NR_splice 76 +#define TARGET_NR_tee 77 +#define TARGET_NR_readlinkat 78 +#define TARGET_NR_fstatat64 79 +#define TARGET_NR_fstat64 80 +#define TARGET_NR_sync 81 +#define TARGET_NR_fsync 82 +#define TARGET_NR_fdatasync 83 +#define TARGET_NR_sync_file_range 84 +#define TARGET_NR_timerfd_create 85 +#define TARGET_NR_timerfd_settime 86 +#define TARGET_NR_timerfd_gettime 87 +#define TARGET_NR_utimensat 88 +#define TARGET_NR_acct 89 +#define TARGET_NR_capget 90 +#define TARGET_NR_capset 91 +#define TARGET_NR_personality 92 +#define TARGET_NR_exit 93 +#define TARGET_NR_exit_group 94 +#define TARGET_NR_waitid 95 +#define TARGET_NR_set_tid_address 96 +#define TARGET_NR_unshare 97 +#define TARGET_NR_futex 98 +#define TARGET_NR_set_robust_list 99 +#define TARGET_NR_get_robust_list 100 +#define TARGET_NR_nanosleep 101 +#define TARGET_NR_getitimer 102 +#define TARGET_NR_setitimer 103 +#define TARGET_NR_kexec_load 104 +#define TARGET_NR_init_module 105 +#define TARGET_NR_delete_module 106 +#define TARGET_NR_timer_create 107 +#define TARGET_NR_timer_gettime 108 +#define TARGET_NR_timer_getoverrun 109 +#define TARGET_NR_timer_settime 110 +#define TARGET_NR_timer_delete 111 +#define TARGET_NR_clock_settime 112 +#define TARGET_NR_clock_gettime 113 +#define TARGET_NR_clock_getres 114 +#define TARGET_NR_clock_nanosleep 115 +#define TARGET_NR_syslog 116 +#define TARGET_NR_ptrace 117 +#define TARGET_NR_sched_setparam 118 +#define TARGET_NR_sched_setscheduler 119 +#define TARGET_NR_sched_getscheduler 120 +#define TARGET_NR_sched_getparam 121 +#define TARGET_NR_sched_setaffinity 122 +#define TARGET_NR_sched_getaffinity 123 +#define TARGET_NR_sched_yield 124 +#define TARGET_NR_sched_get_priority_max 125 +#define TARGET_NR_sched_get_priority_min 126 +#define TARGET_NR_sched_rr_get_interval 127 +#define TARGET_NR_restart_syscall 128 +#define TARGET_NR_kill 129 +#define TARGET_NR_tkill 130 +#define TARGET_NR_tgkill 131 +#define TARGET_NR_sigaltstack 132 +#define TARGET_NR_rt_sigsuspend 133 +#define TARGET_NR_rt_sigaction 134 +#define TARGET_NR_rt_sigprocmask 135 +#define TARGET_NR_rt_sigpending 136 +#define TARGET_NR_rt_sigtimedwait 137 +#define TARGET_NR_rt_sigqueueinfo 138 +#define TARGET_NR_rt_sigreturn 139 +#define TARGET_NR_setpriority 140 +#define TARGET_NR_getpriority 141 +#define TARGET_NR_reboot 142 +#define TARGET_NR_setregid 143 +#define TARGET_NR_setgid 144 +#define TARGET_NR_setreuid 145 +#define TARGET_NR_setuid 146 +#define TARGET_NR_setresuid 147 +#define TARGET_NR_getresuid 148 +#define TARGET_NR_setresgid 149 +#define TARGET_NR_getresgid 150 +#define TARGET_NR_setfsuid 151 +#define TARGET_NR_setfsgid 152 +#define TARGET_NR_times 153 +#define TARGET_NR_setpgid 154 +#define TARGET_NR_getpgid 155 +#define TARGET_NR_getsid 156 +#define TARGET_NR_setsid 157 +#define TARGET_NR_getgroups 158 +#define TARGET_NR_setgroups 159 +#define TARGET_NR_uname 160 +#define TARGET_NR_sethostname 161 +#define TARGET_NR_setdomainname 162 +#define TARGET_NR_getrlimit 163 +#define TARGET_NR_setrlimit 164 +#define TARGET_NR_getrusage 165 +#define TARGET_NR_umask 166 +#define TARGET_NR_prctl 167 +#define TARGET_NR_getcpu 168 +#define TARGET_NR_gettimeofday 169 +#define TARGET_NR_settimeofday 170 +#define TARGET_NR_adjtimex 171 +#define TARGET_NR_getpid 172 +#define TARGET_NR_getppid 173 +#define TARGET_NR_getuid 174 +#define TARGET_NR_geteuid 175 +#define TARGET_NR_getgid 176 +#define TARGET_NR_getegid 177 +#define TARGET_NR_gettid 178 +#define TARGET_NR_sysinfo 179 +#define TARGET_NR_mq_open 180 +#define TARGET_NR_mq_unlink 181 +#define TARGET_NR_mq_timedsend 182 +#define TARGET_NR_mq_timedreceive 183 +#define TARGET_NR_mq_notify 184 +#define TARGET_NR_mq_getsetattr 185 +#define TARGET_NR_msgget 186 +#define TARGET_NR_msgctl 187 +#define TARGET_NR_msgrcv 188 +#define TARGET_NR_msgsnd 189 +#define TARGET_NR_semget 190 +#define TARGET_NR_semctl 191 +#define TARGET_NR_semtimedop 192 +#define TARGET_NR_semop 193 +#define TARGET_NR_shmget 194 +#define TARGET_NR_shmctl 195 +#define TARGET_NR_shmat 196 +#define TARGET_NR_shmdt 197 +#define TARGET_NR_socket 198 +#define TARGET_NR_socketpair 199 +#define TARGET_NR_bind 200 +#define TARGET_NR_listen 201 +#define TARGET_NR_accept 202 +#define TARGET_NR_connect 203 +#define TARGET_NR_getsockname 204 +#define TARGET_NR_getpeername 205 +#define TARGET_NR_sendto 206 +#define TARGET_NR_recvfrom 207 +#define TARGET_NR_setsockopt 208 +#define TARGET_NR_getsockopt 209 +#define TARGET_NR_shutdown 210 +#define TARGET_NR_sendmsg 211 +#define TARGET_NR_recvmsg 212 +#define TARGET_NR_readahead 213 +#define TARGET_NR_brk 214 +#define TARGET_NR_munmap 215 +#define TARGET_NR_mremap 216 +#define TARGET_NR_add_key 217 +#define TARGET_NR_request_key 218 +#define TARGET_NR_keyctl 219 +#define TARGET_NR_clone 220 +#define TARGET_NR_execve 221 +#define TARGET_NR_mmap2 222 +#define TARGET_NR_fadvise64_64 223 +#define TARGET_NR_swapon 224 +#define TARGET_NR_swapoff 225 +#define TARGET_NR_mprotect 226 +#define TARGET_NR_msync 227 +#define TARGET_NR_mlock 228 +#define TARGET_NR_munlock 229 +#define TARGET_NR_mlockall 230 +#define TARGET_NR_munlockall 231 +#define TARGET_NR_mincore 232 +#define TARGET_NR_madvise 233 +#define TARGET_NR_remap_file_pages 234 +#define TARGET_NR_mbind 235 +#define TARGET_NR_get_mempolicy 236 +#define TARGET_NR_set_mempolicy 237 +#define TARGET_NR_migrate_pages 238 +#define TARGET_NR_move_pages 239 +#define TARGET_NR_rt_tgsigqueueinfo 240 +#define TARGET_NR_perf_event_open 241 +#define TARGET_NR_accept4 242 +#define TARGET_NR_recvmmsg 243 +#define TARGET_NR_cacheflush 244 +#define TARGET_NR_arch_specific_syscall 244 +#define TARGET_NR_wait4 260 +#define TARGET_NR_prlimit64 261 +#define TARGET_NR_fanotify_init 262 +#define TARGET_NR_fanotify_mark 263 +#define TARGET_NR_name_to_handle_at 264 +#define TARGET_NR_open_by_handle_at 265 +#define TARGET_NR_clock_adjtime 266 +#define TARGET_NR_syncfs 267 +#define TARGET_NR_setns 268 +#define TARGET_NR_sendmmsg 269 +#define TARGET_NR_process_vm_readv 270 +#define TARGET_NR_process_vm_writev 271 +#define TARGET_NR_kcmp 272 +#define TARGET_NR_finit_module 273 +#define TARGET_NR_sched_setattr 274 +#define TARGET_NR_sched_getattr 275 +#define TARGET_NR_renameat2 276 +#define TARGET_NR_seccomp 277 +#define TARGET_NR_getrandom 278 +#define TARGET_NR_memfd_create 279 +#define TARGET_NR_bpf 280 +#define TARGET_NR_execveat 281 +#define TARGET_NR_userfaultfd 282 +#define TARGET_NR_membarrier 283 +#define TARGET_NR_mlock2 284 +#define TARGET_NR_copy_file_range 285 +#define TARGET_NR_preadv2 286 +#define TARGET_NR_pwritev2 287 +#define TARGET_NR_open 1024 +#define TARGET_NR_link 1025 +#define TARGET_NR_unlink 1026 +#define TARGET_NR_mknod 1027 +#define TARGET_NR_chmod 1028 +#define TARGET_NR_chown 1029 +#define TARGET_NR_mkdir 1030 +#define TARGET_NR_rmdir 1031 +#define TARGET_NR_lchown 1032 +#define TARGET_NR_access 1033 +#define TARGET_NR_rename 1034 +#define TARGET_NR_readlink 1035 +#define TARGET_NR_symlink 1036 +#define TARGET_NR_utimes 1037 +#define TARGET_NR_3264_stat 1038 +#define TARGET_NR_3264_lstat 1039 +#define TARGET_NR_pipe 1040 +#define TARGET_NR_dup2 1041 +#define TARGET_NR_epoll_create 1042 +#define TARGET_NR_inotify_init 1043 +#define TARGET_NR_eventfd 1044 +#define TARGET_NR_signalfd 1045 +#define TARGET_NR_sendfile 1046 +#define TARGET_NR_ftruncate 1047 +#define TARGET_NR_truncate 1048 +#define TARGET_NR_stat 1049 +#define TARGET_NR_lstat 1050 +#define TARGET_NR_fstat 1051 +#define TARGET_NR_fcntl 1052 +#define TARGET_NR_fadvise64 1053 +#define TARGET_NR_newfstatat 1054 +#define TARGET_NR_fstatfs 1055 +#define TARGET_NR_statfs 1056 +#define TARGET_NR_lseek 1057 +#define TARGET_NR_mmap 1058 +#define TARGET_NR_alarm 1059 +#define TARGET_NR_getpgrp 1060 +#define TARGET_NR_pause 1061 +#define TARGET_NR_time 1062 +#define TARGET_NR_utime 1063 +#define TARGET_NR_creat 1064 +#define TARGET_NR_getdents 1065 +#define TARGET_NR_futimesat 1066 +#define TARGET_NR_select 1067 +#define TARGET_NR_poll 1068 +#define TARGET_NR_epoll_wait 1069 +#define TARGET_NR_ustat 1070 +#define TARGET_NR_vfork 1071 +#define TARGET_NR_oldwait4 1072 +#define TARGET_NR_recv 1073 +#define TARGET_NR_send 1074 +#define TARGET_NR_bdflush 1075 +#define TARGET_NR_umount 1076 +#define TARGET_NR_uselib 1077 +#define TARGET_NR__sysctl 1078 +#define TARGET_NR_fork 1079 diff --git a/linux-user/nios2/target_cpu.h b/linux-user/nios2/target_cpu.h new file mode 100644 index 0000000000..20ab4790a9 --- /dev/null +++ b/linux-user/nios2/target_cpu.h @@ -0,0 +1,39 @@ +/* + * Nios2 specific CPU ABI and functions for linux-user + * + * Copyright (c) 2016 Marek Vasut <marex@denx.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef TARGET_CPU_H +#define TARGET_CPU_H + +static inline void cpu_clone_regs(CPUNios2State *env, target_ulong newsp) +{ + if (newsp) { + env->regs[R_SP] = newsp; + } + env->regs[R_RET0] = 0; +} + +static inline void cpu_set_tls(CPUNios2State *env, target_ulong newtls) +{ + /* + * Linux kernel 3.10 does not pay any attention to CLONE_SETTLS + * in copy_thread(), so QEMU need not do so either. + */ +} + +#endif diff --git a/linux-user/nios2/target_signal.h b/linux-user/nios2/target_signal.h new file mode 100644 index 0000000000..23a8267696 --- /dev/null +++ b/linux-user/nios2/target_signal.h @@ -0,0 +1,26 @@ +#ifndef TARGET_SIGNAL_H +#define TARGET_SIGNAL_H + +#include "cpu.h" + +/* this struct defines a stack used during syscall handling */ + +typedef struct target_sigaltstack { + abi_long ss_sp; + abi_ulong ss_size; + abi_long ss_flags; +} target_stack_t; + +/* sigaltstack controls */ +#define TARGET_SS_ONSTACK 1 +#define TARGET_SS_DISABLE 2 + +#define TARGET_MINSIGSTKSZ 2048 +#define TARGET_SIGSTKSZ 8192 + +static inline abi_ulong get_sp_from_cpustate(CPUNios2State *state) +{ + return state->regs[R_SP]; +} + +#endif /* TARGET_SIGNAL_H */ diff --git a/linux-user/nios2/target_structs.h b/linux-user/nios2/target_structs.h new file mode 100644 index 0000000000..8713772089 --- /dev/null +++ b/linux-user/nios2/target_structs.h @@ -0,0 +1,58 @@ +/* + * Nios2 specific structures for linux-user + * + * Copyright (c) 2016 Marek Vasut <marex@denx.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef TARGET_STRUCTS_H +#define TARGET_STRUCTS_H + +struct target_ipc_perm { + abi_int __key; /* Key. */ + abi_uint uid; /* Owner's user ID. */ + abi_uint gid; /* Owner's group ID. */ + abi_uint cuid; /* Creator's user ID. */ + abi_uint cgid; /* Creator's group ID. */ + abi_ushort mode; /* Read/write permission. */ + abi_ushort __pad1; + abi_ushort __seq; /* Sequence number. */ + abi_ushort __pad2; + abi_ulong __unused1; + abi_ulong __unused2; +}; + +struct target_shmid_ds { + struct target_ipc_perm shm_perm; /* operation permission struct */ + abi_long shm_segsz; /* size of segment in bytes */ + abi_ulong shm_atime; /* time of last shmat() */ +#if TARGET_ABI_BITS == 32 + abi_ulong __unused1; +#endif + abi_ulong shm_dtime; /* time of last shmdt() */ +#if TARGET_ABI_BITS == 32 + abi_ulong __unused2; +#endif + abi_ulong shm_ctime; /* time of last change by shmctl() */ +#if TARGET_ABI_BITS == 32 + abi_ulong __unused3; +#endif + abi_int shm_cpid; /* pid of creator */ + abi_int shm_lpid; /* pid of last shmop */ + abi_ulong shm_nattch; /* number of current attaches */ + abi_ulong __unused4; + abi_ulong __unused5; +}; + +#endif diff --git a/linux-user/nios2/target_syscall.h b/linux-user/nios2/target_syscall.h new file mode 100644 index 0000000000..ca6b7e69f6 --- /dev/null +++ b/linux-user/nios2/target_syscall.h @@ -0,0 +1,37 @@ +#ifndef TARGET_SYSCALL_H +#define TARGET_SYSCALL_H + +#define UNAME_MACHINE "nios2" +#define UNAME_MINIMUM_RELEASE "3.19.0" + +struct target_pt_regs { + unsigned long r8; /* r8-r15 Caller-saved GP registers */ + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long r1; /* Assembler temporary */ + unsigned long r2; /* Retval LS 32bits */ + unsigned long r3; /* Retval MS 32bits */ + unsigned long r4; /* r4-r7 Register arguments */ + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long orig_r2; /* Copy of r2 ?? */ + unsigned long ra; /* Return address */ + unsigned long fp; /* Frame pointer */ + unsigned long sp; /* Stack pointer */ + unsigned long gp; /* Global pointer */ + unsigned long estatus; + unsigned long ea; /* Exception return address (pc) */ + unsigned long orig_r7; +}; + +#define TARGET_MINSIGSTKSZ 2048 +#define TARGET_MLOCKALL_MCL_CURRENT 1 +#define TARGET_MLOCKALL_MCL_FUTURE 2 + +#endif /* TARGET_SYSCALL_H */ diff --git a/linux-user/nios2/termbits.h b/linux-user/nios2/termbits.h new file mode 100644 index 0000000000..b64ba974cf --- /dev/null +++ b/linux-user/nios2/termbits.h @@ -0,0 +1,220 @@ +/* from asm/termbits.h */ +/* NOTE: exactly the same as i386 */ + +#define TARGET_NCCS 19 + +struct target_termios { + unsigned int c_iflag; /* input mode flags */ + unsigned int c_oflag; /* output mode flags */ + unsigned int c_cflag; /* control mode flags */ + unsigned int c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[TARGET_NCCS]; /* control characters */ +}; + +/* c_iflag bits */ +#define TARGET_IGNBRK 0000001 +#define TARGET_BRKINT 0000002 +#define TARGET_IGNPAR 0000004 +#define TARGET_PARMRK 0000010 +#define TARGET_INPCK 0000020 +#define TARGET_ISTRIP 0000040 +#define TARGET_INLCR 0000100 +#define TARGET_IGNCR 0000200 +#define TARGET_ICRNL 0000400 +#define TARGET_IUCLC 0001000 +#define TARGET_IXON 0002000 +#define TARGET_IXANY 0004000 +#define TARGET_IXOFF 0010000 +#define TARGET_IMAXBEL 0020000 +#define TARGET_IUTF8 0040000 + +/* c_oflag bits */ +#define TARGET_OPOST 0000001 +#define TARGET_OLCUC 0000002 +#define TARGET_ONLCR 0000004 +#define TARGET_OCRNL 0000010 +#define TARGET_ONOCR 0000020 +#define TARGET_ONLRET 0000040 +#define TARGET_OFILL 0000100 +#define TARGET_OFDEL 0000200 +#define TARGET_NLDLY 0000400 +#define TARGET_NL0 0000000 +#define TARGET_NL1 0000400 +#define TARGET_CRDLY 0003000 +#define TARGET_CR0 0000000 +#define TARGET_CR1 0001000 +#define TARGET_CR2 0002000 +#define TARGET_CR3 0003000 +#define TARGET_TABDLY 0014000 +#define TARGET_TAB0 0000000 +#define TARGET_TAB1 0004000 +#define TARGET_TAB2 0010000 +#define TARGET_TAB3 0014000 +#define TARGET_XTABS 0014000 +#define TARGET_BSDLY 0020000 +#define TARGET_BS0 0000000 +#define TARGET_BS1 0020000 +#define TARGET_VTDLY 0040000 +#define TARGET_VT0 0000000 +#define TARGET_VT1 0040000 +#define TARGET_FFDLY 0100000 +#define TARGET_FF0 0000000 +#define TARGET_FF1 0100000 + +/* c_cflag bit meaning */ +#define TARGET_CBAUD 0010017 +#define TARGET_B0 0000000 /* hang up */ +#define TARGET_B50 0000001 +#define TARGET_B75 0000002 +#define TARGET_B110 0000003 +#define TARGET_B134 0000004 +#define TARGET_B150 0000005 +#define TARGET_B200 0000006 +#define TARGET_B300 0000007 +#define TARGET_B600 0000010 +#define TARGET_B1200 0000011 +#define TARGET_B1800 0000012 +#define TARGET_B2400 0000013 +#define TARGET_B4800 0000014 +#define TARGET_B9600 0000015 +#define TARGET_B19200 0000016 +#define TARGET_B38400 0000017 +#define TARGET_EXTA B19200 +#define TARGET_EXTB B38400 +#define TARGET_CSIZE 0000060 +#define TARGET_CS5 0000000 +#define TARGET_CS6 0000020 +#define TARGET_CS7 0000040 +#define TARGET_CS8 0000060 +#define TARGET_CSTOPB 0000100 +#define TARGET_CREAD 0000200 +#define TARGET_PARENB 0000400 +#define TARGET_PARODD 0001000 +#define TARGET_HUPCL 0002000 +#define TARGET_CLOCAL 0004000 +#define TARGET_CBAUDEX 0010000 +#define TARGET_B57600 0010001 +#define TARGET_B115200 0010002 +#define TARGET_B230400 0010003 +#define TARGET_B460800 0010004 +#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */ +#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */ +#define TARGET_CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ +#define TARGET_ISIG 0000001 +#define TARGET_ICANON 0000002 +#define TARGET_XCASE 0000004 +#define TARGET_ECHO 0000010 +#define TARGET_ECHOE 0000020 +#define TARGET_ECHOK 0000040 +#define TARGET_ECHONL 0000100 +#define TARGET_NOFLSH 0000200 +#define TARGET_TOSTOP 0000400 +#define TARGET_ECHOCTL 0001000 +#define TARGET_ECHOPRT 0002000 +#define TARGET_ECHOKE 0004000 +#define TARGET_FLUSHO 0010000 +#define TARGET_PENDIN 0040000 +#define TARGET_IEXTEN 0100000 + +/* c_cc character offsets */ +#define TARGET_VINTR 0 +#define TARGET_VQUIT 1 +#define TARGET_VERASE 2 +#define TARGET_VKILL 3 +#define TARGET_VEOF 4 +#define TARGET_VTIME 5 +#define TARGET_VMIN 6 +#define TARGET_VSWTC 7 +#define TARGET_VSTART 8 +#define TARGET_VSTOP 9 +#define TARGET_VSUSP 10 +#define TARGET_VEOL 11 +#define TARGET_VREPRINT 12 +#define TARGET_VDISCARD 13 +#define TARGET_VWERASE 14 +#define TARGET_VLNEXT 15 +#define TARGET_VEOL2 16 + +/* ioctls */ + +#define TARGET_TCGETS 0x5401 +#define TARGET_TCSETS 0x5402 +#define TARGET_TCSETSW 0x5403 +#define TARGET_TCSETSF 0x5404 +#define TARGET_TCGETA 0x5405 +#define TARGET_TCSETA 0x5406 +#define TARGET_TCSETAW 0x5407 +#define TARGET_TCSETAF 0x5408 +#define TARGET_TCSBRK 0x5409 +#define TARGET_TCXONC 0x540A +#define TARGET_TCFLSH 0x540B + +#define TARGET_TIOCEXCL 0x540C +#define TARGET_TIOCNXCL 0x540D +#define TARGET_TIOCSCTTY 0x540E +#define TARGET_TIOCGPGRP 0x540F +#define TARGET_TIOCSPGRP 0x5410 +#define TARGET_TIOCOUTQ 0x5411 +#define TARGET_TIOCSTI 0x5412 +#define TARGET_TIOCGWINSZ 0x5413 +#define TARGET_TIOCSWINSZ 0x5414 +#define TARGET_TIOCMGET 0x5415 +#define TARGET_TIOCMBIS 0x5416 +#define TARGET_TIOCMBIC 0x5417 +#define TARGET_TIOCMSET 0x5418 +#define TARGET_TIOCGSOFTCAR 0x5419 +#define TARGET_TIOCSSOFTCAR 0x541A +#define TARGET_FIONREAD 0x541B +#define TARGET_TIOCINQ TARGET_FIONREAD +#define TARGET_TIOCLINUX 0x541C +#define TARGET_TIOCCONS 0x541D +#define TARGET_TIOCGSERIAL 0x541E +#define TARGET_TIOCSSERIAL 0x541F +#define TARGET_TIOCPKT 0x5420 +#define TARGET_FIONBIO 0x5421 +#define TARGET_TIOCNOTTY 0x5422 +#define TARGET_TIOCSETD 0x5423 +#define TARGET_TIOCGETD 0x5424 +#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TARGET_TIOCTTYGSTRUCT 0x5426 /* For debugging only */ +#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */ +#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */ +#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int) + /* Get Pty Number (of pty-mux device) */ +#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int) + /* Lock/unlock Pty */ + +#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define TARGET_FIOCLEX 0x5451 +#define TARGET_FIOASYNC 0x5452 +#define TARGET_TIOCSERCONFIG 0x5453 +#define TARGET_TIOCSERGWILD 0x5454 +#define TARGET_TIOCSERSWILD 0x5455 +#define TARGET_TIOCGLCKTRMIOS 0x5456 +#define TARGET_TIOCSLCKTRMIOS 0x5457 +#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TARGET_TIOCMIWAIT 0x545C + /* wait for a change on serial input line(s) */ +#define TARGET_TIOCGICOUNT 0x545D + /* read serial port inline interrupt counts */ +#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ + +/* Used for packet mode */ +#define TARGET_TIOCPKT_DATA 0 +#define TARGET_TIOCPKT_FLUSHREAD 1 +#define TARGET_TIOCPKT_FLUSHWRITE 2 +#define TARGET_TIOCPKT_STOP 4 +#define TARGET_TIOCPKT_START 8 +#define TARGET_TIOCPKT_NOSTOP 16 +#define TARGET_TIOCPKT_DOSTOP 32 + +#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */ diff --git a/linux-user/signal.c b/linux-user/signal.c index 0a5bb4e26b..5064de0c08 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -254,7 +254,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) } #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \ - !defined(TARGET_X86_64) + !defined(TARGET_X86_64) && !defined(TARGET_NIOS2) /* Just set the guest's signal mask to the specified value; the * caller is assumed to have called block_signals() already. */ @@ -3922,6 +3922,240 @@ long do_rt_sigreturn(CPUCRISState *env) return -TARGET_ENOSYS; } +#elif defined(TARGET_NIOS2) + +#define MCONTEXT_VERSION 2 + +struct target_sigcontext { + int version; + unsigned long gregs[32]; +}; + +struct target_ucontext { + abi_ulong tuc_flags; + abi_ulong tuc_link; + target_stack_t tuc_stack; + struct target_sigcontext tuc_mcontext; + target_sigset_t tuc_sigmask; /* mask last for extensibility */ +}; + +struct target_rt_sigframe { + struct target_siginfo info; + struct target_ucontext uc; +}; + +static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka) +{ + if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) { +#ifdef CONFIG_STACK_GROWSUP + return target_sigaltstack_used.ss_sp; +#else + return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size; +#endif + } + return sp; +} + +static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env) +{ + unsigned long *gregs = uc->tuc_mcontext.gregs; + + __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version); + __put_user(env->regs[1], &gregs[0]); + __put_user(env->regs[2], &gregs[1]); + __put_user(env->regs[3], &gregs[2]); + __put_user(env->regs[4], &gregs[3]); + __put_user(env->regs[5], &gregs[4]); + __put_user(env->regs[6], &gregs[5]); + __put_user(env->regs[7], &gregs[6]); + __put_user(env->regs[8], &gregs[7]); + __put_user(env->regs[9], &gregs[8]); + __put_user(env->regs[10], &gregs[9]); + __put_user(env->regs[11], &gregs[10]); + __put_user(env->regs[12], &gregs[11]); + __put_user(env->regs[13], &gregs[12]); + __put_user(env->regs[14], &gregs[13]); + __put_user(env->regs[15], &gregs[14]); + __put_user(env->regs[16], &gregs[15]); + __put_user(env->regs[17], &gregs[16]); + __put_user(env->regs[18], &gregs[17]); + __put_user(env->regs[19], &gregs[18]); + __put_user(env->regs[20], &gregs[19]); + __put_user(env->regs[21], &gregs[20]); + __put_user(env->regs[22], &gregs[21]); + __put_user(env->regs[23], &gregs[22]); + __put_user(env->regs[R_RA], &gregs[23]); + __put_user(env->regs[R_FP], &gregs[24]); + __put_user(env->regs[R_GP], &gregs[25]); + __put_user(env->regs[R_EA], &gregs[27]); + __put_user(env->regs[R_SP], &gregs[28]); + + return 0; +} + +static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc, + int *pr2) +{ + int temp; + abi_ulong off, frame_addr = env->regs[R_SP]; + unsigned long *gregs = uc->tuc_mcontext.gregs; + int err; + + /* Always make any pending restarted system calls return -EINTR */ + /* current->restart_block.fn = do_no_restart_syscall; */ + + __get_user(temp, &uc->tuc_mcontext.version); + if (temp != MCONTEXT_VERSION) { + return 1; + } + + /* restore passed registers */ + __get_user(env->regs[1], &gregs[0]); + __get_user(env->regs[2], &gregs[1]); + __get_user(env->regs[3], &gregs[2]); + __get_user(env->regs[4], &gregs[3]); + __get_user(env->regs[5], &gregs[4]); + __get_user(env->regs[6], &gregs[5]); + __get_user(env->regs[7], &gregs[6]); + __get_user(env->regs[8], &gregs[7]); + __get_user(env->regs[9], &gregs[8]); + __get_user(env->regs[10], &gregs[9]); + __get_user(env->regs[11], &gregs[10]); + __get_user(env->regs[12], &gregs[11]); + __get_user(env->regs[13], &gregs[12]); + __get_user(env->regs[14], &gregs[13]); + __get_user(env->regs[15], &gregs[14]); + __get_user(env->regs[16], &gregs[15]); + __get_user(env->regs[17], &gregs[16]); + __get_user(env->regs[18], &gregs[17]); + __get_user(env->regs[19], &gregs[18]); + __get_user(env->regs[20], &gregs[19]); + __get_user(env->regs[21], &gregs[20]); + __get_user(env->regs[22], &gregs[21]); + __get_user(env->regs[23], &gregs[22]); + /* gregs[23] is handled below */ + /* Verify, should this be settable */ + __get_user(env->regs[R_FP], &gregs[24]); + /* Verify, should this be settable */ + __get_user(env->regs[R_GP], &gregs[25]); + /* Not really necessary no user settable bits */ + __get_user(temp, &gregs[26]); + __get_user(env->regs[R_EA], &gregs[27]); + + __get_user(env->regs[R_RA], &gregs[23]); + __get_user(env->regs[R_SP], &gregs[28]); + + off = offsetof(struct target_rt_sigframe, uc.tuc_stack); + err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env)); + if (err == -EFAULT) { + return 1; + } + + *pr2 = env->regs[2]; + return 0; +} + +static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env, + size_t frame_size) +{ + unsigned long usp; + + /* Default to using normal stack. */ + usp = env->regs[R_SP]; + + /* This is the X/Open sanctioned signal stack switching. */ + usp = sigsp(usp, ka); + + /* Verify, is it 32 or 64 bit aligned */ + return (void *)((usp - frame_size) & -8UL); +} + +static void setup_rt_frame(int sig, struct target_sigaction *ka, + target_siginfo_t *info, + target_sigset_t *set, + CPUNios2State *env) +{ + struct target_rt_sigframe *frame; + int i, err = 0; + + frame = get_sigframe(ka, env, sizeof(*frame)); + + if (ka->sa_flags & SA_SIGINFO) { + tswap_siginfo(&frame->info, info); + } + + /* Create the ucontext. */ + __put_user(0, &frame->uc.tuc_flags); + __put_user(0, &frame->uc.tuc_link); + __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); + __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags); + __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); + err |= rt_setup_ucontext(&frame->uc, env); + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + __put_user((abi_ulong)set->sig[i], + (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); + } + + if (err) { + goto give_sigsegv; + } + + /* Set up to return from userspace; jump to fixed address sigreturn + trampoline on kuser page. */ + env->regs[R_RA] = (unsigned long) (0x1044); + + /* Set up registers for signal handler */ + env->regs[R_SP] = (unsigned long) frame; + env->regs[4] = (unsigned long) sig; + env->regs[5] = (unsigned long) &frame->info; + env->regs[6] = (unsigned long) &frame->uc; + env->regs[R_EA] = (unsigned long) ka->_sa_handler; + return; + +give_sigsegv: + if (sig == TARGET_SIGSEGV) { + ka->_sa_handler = TARGET_SIG_DFL; + } + force_sigsegv(sig); + return; +} + +long do_sigreturn(CPUNios2State *env) +{ + trace_user_do_sigreturn(env, 0); + fprintf(stderr, "do_sigreturn: not implemented\n"); + return -TARGET_ENOSYS; +} + +long do_rt_sigreturn(CPUNios2State *env) +{ + /* Verify, can we follow the stack back */ + abi_ulong frame_addr = env->regs[R_SP]; + struct target_rt_sigframe *frame; + sigset_t set; + int rval; + + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { + goto badframe; + } + + target_to_host_sigset(&set, &frame->uc.tuc_sigmask); + do_sigprocmask(SIG_SETMASK, &set, NULL); + + if (rt_restore_ucontext(env, &frame->uc, &rval)) { + goto badframe; + } + + unlock_user_struct(frame, frame_addr, 0); + return rval; + +badframe: + unlock_user_struct(frame, frame_addr, 0); + force_sig(TARGET_SIGSEGV); + return 0; +} +/* TARGET_NIOS2 */ + #elif defined(TARGET_OPENRISC) struct target_sigcontext { @@ -6178,7 +6412,8 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig, /* prepare the stack frame of the virtual CPU */ #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \ - || defined(TARGET_PPC64) || defined(TARGET_HPPA) + || defined(TARGET_PPC64) || defined(TARGET_HPPA) \ + || defined(TARGET_NIOS2) /* These targets do not have traditional signals. */ setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env); #else diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 39848a8816..4442c22bc3 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -70,7 +70,8 @@ #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \ || defined(TARGET_M68K) || defined(TARGET_CRIS) \ || defined(TARGET_UNICORE32) || defined(TARGET_S390X) \ - || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) + || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \ + || defined(TARGET_NIOS2) #define TARGET_IOC_SIZEBITS 14 #define TARGET_IOC_DIRBITS 2 @@ -426,7 +427,7 @@ int do_sigaction(int sig, const struct target_sigaction *act, || defined(TARGET_M68K) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) \ || defined(TARGET_MICROBLAZE) || defined(TARGET_UNICORE32) \ || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \ - || defined(TARGET_TILEGX) || defined(TARGET_HPPA) + || defined(TARGET_TILEGX) || defined(TARGET_HPPA) || defined(TARGET_NIOS2) #if defined(TARGET_SPARC) #define TARGET_SA_NOCLDSTOP 8u @@ -2037,7 +2038,8 @@ struct target_stat { abi_ulong target_st_ctime_nsec; unsigned int __unused[2]; }; -#elif defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) +#elif defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) || \ + defined(TARGET_NIOS2) /* These are the asm-generic versions of the stat and stat64 structures */ diff --git a/migration/migration.c b/migration/migration.c index f498ab84f2..2766d2f586 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1006,6 +1006,16 @@ static void migrate_fd_cancel(MigrationState *s) if (s->state == MIGRATION_STATUS_CANCELLING && f) { qemu_file_shutdown(f); } + if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { + Error *local_err = NULL; + + bdrv_invalidate_cache_all(&local_err); + if (local_err) { + error_report_err(local_err); + } else { + s->block_inactive = false; + } + } } void add_migration_state_change_notifier(Notifier *notify) @@ -1044,6 +1054,31 @@ bool migration_in_postcopy_after_devices(MigrationState *s) return migration_in_postcopy(s) && s->postcopy_after_devices; } +bool migration_is_idle(MigrationState *s) +{ + if (!s) { + s = migrate_get_current(); + } + + switch (s->state) { + case MIGRATION_STATUS_NONE: + case MIGRATION_STATUS_CANCELLED: + case MIGRATION_STATUS_COMPLETED: + case MIGRATION_STATUS_FAILED: + return true; + case MIGRATION_STATUS_SETUP: + case MIGRATION_STATUS_CANCELLING: + case MIGRATION_STATUS_ACTIVE: + case MIGRATION_STATUS_POSTCOPY_ACTIVE: + case MIGRATION_STATUS_COLO: + return false; + case MIGRATION_STATUS__MAX: + g_assert_not_reached(); + } + + return false; +} + MigrationState *migrate_init(const MigrationParams *params) { MigrationState *s = migrate_get_current(); @@ -1086,9 +1121,24 @@ MigrationState *migrate_init(const MigrationParams *params) static GSList *migration_blockers; -void migrate_add_blocker(Error *reason) +int migrate_add_blocker(Error *reason, Error **errp) { - migration_blockers = g_slist_prepend(migration_blockers, reason); + if (only_migratable) { + error_propagate(errp, error_copy(reason)); + error_prepend(errp, "disallowing migration blocker " + "(--only_migratable) for: "); + return -EACCES; + } + + if (migration_is_idle(NULL)) { + migration_blockers = g_slist_prepend(migration_blockers, reason); + return 0; + } + + error_propagate(errp, error_copy(reason)); + error_prepend(errp, "disallowing migration blocker (migration in " + "progress) for: "); + return -EBUSY; } void migrate_del_blocker(Error *reason) @@ -1705,6 +1755,7 @@ static void migration_completion(MigrationState *s, int current_active_state, if (ret >= 0) { qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); qemu_savevm_state_complete_precopy(s->to_dst_file, false); + s->block_inactive = true; } } qemu_mutex_unlock_iothread(); @@ -1755,10 +1806,14 @@ fail_invalidate: if (s->state == MIGRATION_STATUS_ACTIVE) { Error *local_err = NULL; + qemu_mutex_lock_iothread(); bdrv_invalidate_cache_all(&local_err); if (local_err) { error_report_err(local_err); + } else { + s->block_inactive = false; } + qemu_mutex_unlock_iothread(); } fail: @@ -1969,7 +2024,7 @@ void migrate_fd_connect(MigrationState *s) } migrate_compress_threads_create(); - qemu_thread_create(&s->thread, "migration", migration_thread, s, + qemu_thread_create(&s->thread, "live_migration", migration_thread, s, QEMU_THREAD_JOINABLE); s->migration_thread_running = true; } diff --git a/migration/ram.c b/migration/ram.c index a1c8089010..ef8fadfe69 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -45,14 +45,6 @@ #include "qemu/rcu_queue.h" #include "migration/colo.h" -#ifdef DEBUG_MIGRATION_RAM -#define DPRINTF(fmt, ...) \ - do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif - static int dirty_rate_high_cnt; static uint64_t bitmap_sync_count; @@ -507,10 +499,10 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, TARGET_PAGE_SIZE, XBZRLE.encoded_buf, TARGET_PAGE_SIZE); if (encoded_len == 0) { - DPRINTF("Skipping unmodified page\n"); + trace_save_xbzrle_page_skipping(); return 0; } else if (encoded_len == -1) { - DPRINTF("Overflow\n"); + trace_save_xbzrle_page_overflow(); acct_info.xbzrle_overflows++; /* update data in the cache */ if (!last_stage) { @@ -2020,8 +2012,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) if ((i & 63) == 0) { uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000; if (t1 > MAX_WAIT) { - DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n", - t1, i); + trace_ram_save_iterate_big_wait(t1, i); break; } } @@ -2594,8 +2585,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) wait_for_decompress_done(); rcu_read_unlock(); - DPRINTF("Completed load of VM with exit code %d seq iteration " - "%" PRIu64 "\n", ret, seq_iter); + trace_ram_load_complete(ret, seq_iter); return ret; } diff --git a/migration/savevm.c b/migration/savevm.c index f9c06e9f96..455d5bac1e 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -220,17 +220,20 @@ void timer_get(QEMUFile *f, QEMUTimer *ts) * Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c */ -static int get_timer(QEMUFile *f, void *pv, size_t size) +static int get_timer(QEMUFile *f, void *pv, size_t size, VMStateField *field) { QEMUTimer *v = pv; timer_get(f, v); return 0; } -static void put_timer(QEMUFile *f, void *pv, size_t size) +static int put_timer(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { QEMUTimer *v = pv; timer_put(f, v); + + return 0; } const VMStateInfo vmstate_info_timer = { diff --git a/migration/trace-events b/migration/trace-events index 94134f700b..48e531d3b8 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -40,6 +40,10 @@ savevm_state_iterate(void) "" savevm_state_cleanup(void) "" savevm_state_complete_precopy(void) "" vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s" +vmstate_save_state_loop(const char *name, const char *field, int n_elems) "%s/%s[%d]" +vmstate_save_state_top(const char *idstr) "%s" +vmstate_subsection_save_loop(const char *name, const char *sub) "%s/%s" +vmstate_subsection_save_top(const char *idstr) "%s" vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s" qemu_announce_self_iter(const char *mac) "%s" @@ -52,6 +56,10 @@ vmstate_n_elems(const char *name, int n_elems) "%s: %d" vmstate_subsection_load(const char *parent) "%s" vmstate_subsection_load_bad(const char *parent, const char *sub, const char *sub2) "%s: %s/%s" vmstate_subsection_load_good(const char *parent) "%s" +get_qtailq(const char *name, int version_id) "%s v%d" +get_qtailq_end(const char *name, const char *reason, int val) "%s %s/%d" +put_qtailq(const char *name, int version_id) "%s v%d" +put_qtailq_end(const char *name, const char *reason) "%s %s" # migration/qemu-file.c qemu_file_fclose(void) "" @@ -186,6 +194,10 @@ postcopy_ram_incoming_cleanup_closeuf(void) "" postcopy_ram_incoming_cleanup_entry(void) "" postcopy_ram_incoming_cleanup_exit(void) "" postcopy_ram_incoming_cleanup_join(void) "" +save_xbzrle_page_skipping(void) "" +save_xbzrle_page_overflow(void) "" +ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRIu64 " milliseconds, %d iterations" +ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64 # migration/exec.c migration_exec_outgoing(const char *cmd) "cmd=%s" diff --git a/migration/vmstate.c b/migration/vmstate.c index 0bc9f35ef8..2b2b3a58e6 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -5,7 +5,9 @@ #include "migration/vmstate.h" #include "qemu/bitops.h" #include "qemu/error-report.h" +#include "qemu/queue.h" #include "trace.h" +#include "migration/qjson.h" static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, QJSON *vmdesc); @@ -83,6 +85,9 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, trace_vmstate_load_state(vmsd->name, version_id); if (version_id > vmsd->version_id) { + error_report("%s: incoming version_id %d is too new " + "for local version_id %d", + vmsd->name, version_id, vmsd->version_id); trace_vmstate_load_state_end(vmsd->name, "too new", -EINVAL); return -EINVAL; } @@ -93,6 +98,9 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, trace_vmstate_load_state_end(vmsd->name, "old path", ret); return ret; } + error_report("%s: incoming version_id %d is too old " + "for local minimum version_id %d", + vmsd->name, version_id, vmsd->minimum_version_id); trace_vmstate_load_state_end(vmsd->name, "too old", -EINVAL); return -EINVAL; } @@ -122,8 +130,7 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, ret = vmstate_load_state(f, field->vmsd, addr, field->vmsd->version_id); } else { - ret = field->info->get(f, addr, size); - + ret = field->info->get(f, addr, size, field); } if (ret >= 0) { ret = qemu_file_get_error(f); @@ -299,6 +306,8 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd, { VMStateField *field = vmsd->fields; + trace_vmstate_save_state_top(vmsd->name); + if (vmsd->pre_save) { vmsd->pre_save(opaque); } @@ -318,6 +327,7 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd, int64_t old_offset, written_bytes; QJSON *vmdesc_loop = vmdesc; + trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems); for (i = 0; i < n_elems; i++) { void *addr = base_addr + size * i; @@ -330,7 +340,7 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd, if (field->flags & VMS_STRUCT) { vmstate_save_state(f, field->vmsd, addr, vmdesc_loop); } else { - field->info->put(f, addr, size); + field->info->put(f, addr, size, field, vmdesc_loop); } written_bytes = qemu_ftell_fast(f) - old_offset; @@ -427,11 +437,13 @@ static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, const VMStateDescription **sub = vmsd->subsections; bool subsection_found = false; + trace_vmstate_subsection_save_top(vmsd->name); while (sub && *sub && (*sub)->needed) { if ((*sub)->needed(opaque)) { - const VMStateDescription *vmsd = *sub; + const VMStateDescription *vmsdsub = *sub; uint8_t len; + trace_vmstate_subsection_save_loop(vmsd->name, vmsdsub->name); if (vmdesc) { /* Only create subsection array when we have any */ if (!subsection_found) { @@ -443,11 +455,11 @@ static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, } qemu_put_byte(f, QEMU_VM_SUBSECTION); - len = strlen(vmsd->name); + len = strlen(vmsdsub->name); qemu_put_byte(f, len); - qemu_put_buffer(f, (uint8_t *)vmsd->name, len); - qemu_put_be32(f, vmsd->version_id); - vmstate_save_state(f, vmsd, opaque, vmdesc); + qemu_put_buffer(f, (uint8_t *)vmsdsub->name, len); + qemu_put_be32(f, vmsdsub->version_id); + vmstate_save_state(f, vmsdsub, opaque, vmdesc); if (vmdesc) { json_end_object(vmdesc); @@ -463,17 +475,19 @@ static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, /* bool */ -static int get_bool(QEMUFile *f, void *pv, size_t size) +static int get_bool(QEMUFile *f, void *pv, size_t size, VMStateField *field) { bool *v = pv; *v = qemu_get_byte(f); return 0; } -static void put_bool(QEMUFile *f, void *pv, size_t size) +static int put_bool(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { bool *v = pv; qemu_put_byte(f, *v); + return 0; } const VMStateInfo vmstate_info_bool = { @@ -484,17 +498,19 @@ const VMStateInfo vmstate_info_bool = { /* 8 bit int */ -static int get_int8(QEMUFile *f, void *pv, size_t size) +static int get_int8(QEMUFile *f, void *pv, size_t size, VMStateField *field) { int8_t *v = pv; qemu_get_s8s(f, v); return 0; } -static void put_int8(QEMUFile *f, void *pv, size_t size) +static int put_int8(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { int8_t *v = pv; qemu_put_s8s(f, v); + return 0; } const VMStateInfo vmstate_info_int8 = { @@ -505,17 +521,19 @@ const VMStateInfo vmstate_info_int8 = { /* 16 bit int */ -static int get_int16(QEMUFile *f, void *pv, size_t size) +static int get_int16(QEMUFile *f, void *pv, size_t size, VMStateField *field) { int16_t *v = pv; qemu_get_sbe16s(f, v); return 0; } -static void put_int16(QEMUFile *f, void *pv, size_t size) +static int put_int16(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { int16_t *v = pv; qemu_put_sbe16s(f, v); + return 0; } const VMStateInfo vmstate_info_int16 = { @@ -526,17 +544,19 @@ const VMStateInfo vmstate_info_int16 = { /* 32 bit int */ -static int get_int32(QEMUFile *f, void *pv, size_t size) +static int get_int32(QEMUFile *f, void *pv, size_t size, VMStateField *field) { int32_t *v = pv; qemu_get_sbe32s(f, v); return 0; } -static void put_int32(QEMUFile *f, void *pv, size_t size) +static int put_int32(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { int32_t *v = pv; qemu_put_sbe32s(f, v); + return 0; } const VMStateInfo vmstate_info_int32 = { @@ -548,7 +568,8 @@ const VMStateInfo vmstate_info_int32 = { /* 32 bit int. See that the received value is the same than the one in the field */ -static int get_int32_equal(QEMUFile *f, void *pv, size_t size) +static int get_int32_equal(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { int32_t *v = pv; int32_t v2; @@ -571,7 +592,7 @@ const VMStateInfo vmstate_info_int32_equal = { * and less than or equal to the one in the field. */ -static int get_int32_le(QEMUFile *f, void *pv, size_t size) +static int get_int32_le(QEMUFile *f, void *pv, size_t size, VMStateField *field) { int32_t *cur = pv; int32_t loaded; @@ -595,17 +616,19 @@ const VMStateInfo vmstate_info_int32_le = { /* 64 bit int */ -static int get_int64(QEMUFile *f, void *pv, size_t size) +static int get_int64(QEMUFile *f, void *pv, size_t size, VMStateField *field) { int64_t *v = pv; qemu_get_sbe64s(f, v); return 0; } -static void put_int64(QEMUFile *f, void *pv, size_t size) +static int put_int64(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { int64_t *v = pv; qemu_put_sbe64s(f, v); + return 0; } const VMStateInfo vmstate_info_int64 = { @@ -616,17 +639,19 @@ const VMStateInfo vmstate_info_int64 = { /* 8 bit unsigned int */ -static int get_uint8(QEMUFile *f, void *pv, size_t size) +static int get_uint8(QEMUFile *f, void *pv, size_t size, VMStateField *field) { uint8_t *v = pv; qemu_get_8s(f, v); return 0; } -static void put_uint8(QEMUFile *f, void *pv, size_t size) +static int put_uint8(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { uint8_t *v = pv; qemu_put_8s(f, v); + return 0; } const VMStateInfo vmstate_info_uint8 = { @@ -637,17 +662,19 @@ const VMStateInfo vmstate_info_uint8 = { /* 16 bit unsigned int */ -static int get_uint16(QEMUFile *f, void *pv, size_t size) +static int get_uint16(QEMUFile *f, void *pv, size_t size, VMStateField *field) { uint16_t *v = pv; qemu_get_be16s(f, v); return 0; } -static void put_uint16(QEMUFile *f, void *pv, size_t size) +static int put_uint16(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { uint16_t *v = pv; qemu_put_be16s(f, v); + return 0; } const VMStateInfo vmstate_info_uint16 = { @@ -658,17 +685,19 @@ const VMStateInfo vmstate_info_uint16 = { /* 32 bit unsigned int */ -static int get_uint32(QEMUFile *f, void *pv, size_t size) +static int get_uint32(QEMUFile *f, void *pv, size_t size, VMStateField *field) { uint32_t *v = pv; qemu_get_be32s(f, v); return 0; } -static void put_uint32(QEMUFile *f, void *pv, size_t size) +static int put_uint32(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { uint32_t *v = pv; qemu_put_be32s(f, v); + return 0; } const VMStateInfo vmstate_info_uint32 = { @@ -680,7 +709,8 @@ const VMStateInfo vmstate_info_uint32 = { /* 32 bit uint. See that the received value is the same than the one in the field */ -static int get_uint32_equal(QEMUFile *f, void *pv, size_t size) +static int get_uint32_equal(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint32_t *v = pv; uint32_t v2; @@ -701,17 +731,19 @@ const VMStateInfo vmstate_info_uint32_equal = { /* 64 bit unsigned int */ -static int get_uint64(QEMUFile *f, void *pv, size_t size) +static int get_uint64(QEMUFile *f, void *pv, size_t size, VMStateField *field) { uint64_t *v = pv; qemu_get_be64s(f, v); return 0; } -static void put_uint64(QEMUFile *f, void *pv, size_t size) +static int put_uint64(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { uint64_t *v = pv; qemu_put_be64s(f, v); + return 0; } const VMStateInfo vmstate_info_uint64 = { @@ -723,7 +755,8 @@ const VMStateInfo vmstate_info_uint64 = { /* 64 bit unsigned int. See that the received value is the same than the one in the field */ -static int get_uint64_equal(QEMUFile *f, void *pv, size_t size) +static int get_uint64_equal(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint64_t *v = pv; uint64_t v2; @@ -745,7 +778,8 @@ const VMStateInfo vmstate_info_uint64_equal = { /* 8 bit int. See that the received value is the same than the one in the field */ -static int get_uint8_equal(QEMUFile *f, void *pv, size_t size) +static int get_uint8_equal(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint8_t *v = pv; uint8_t v2; @@ -767,7 +801,8 @@ const VMStateInfo vmstate_info_uint8_equal = { /* 16 bit unsigned int int. See that the received value is the same than the one in the field */ -static int get_uint16_equal(QEMUFile *f, void *pv, size_t size) +static int get_uint16_equal(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint16_t *v = pv; uint16_t v2; @@ -788,7 +823,8 @@ const VMStateInfo vmstate_info_uint16_equal = { /* floating point */ -static int get_float64(QEMUFile *f, void *pv, size_t size) +static int get_float64(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { float64 *v = pv; @@ -796,11 +832,13 @@ static int get_float64(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_float64(QEMUFile *f, void *pv, size_t size) +static int put_float64(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { uint64_t *v = pv; qemu_put_be64(f, float64_val(*v)); + return 0; } const VMStateInfo vmstate_info_float64 = { @@ -811,7 +849,8 @@ const VMStateInfo vmstate_info_float64 = { /* CPU_DoubleU type */ -static int get_cpudouble(QEMUFile *f, void *pv, size_t size) +static int get_cpudouble(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { CPU_DoubleU *v = pv; qemu_get_be32s(f, &v->l.upper); @@ -819,11 +858,13 @@ static int get_cpudouble(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_cpudouble(QEMUFile *f, void *pv, size_t size) +static int put_cpudouble(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { CPU_DoubleU *v = pv; qemu_put_be32s(f, &v->l.upper); qemu_put_be32s(f, &v->l.lower); + return 0; } const VMStateInfo vmstate_info_cpudouble = { @@ -834,17 +875,20 @@ const VMStateInfo vmstate_info_cpudouble = { /* uint8_t buffers */ -static int get_buffer(QEMUFile *f, void *pv, size_t size) +static int get_buffer(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint8_t *v = pv; qemu_get_buffer(f, v, size); return 0; } -static void put_buffer(QEMUFile *f, void *pv, size_t size) +static int put_buffer(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { uint8_t *v = pv; qemu_put_buffer(f, v, size); + return 0; } const VMStateInfo vmstate_info_buffer = { @@ -856,7 +900,8 @@ const VMStateInfo vmstate_info_buffer = { /* unused buffers: space that was used for some fields that are not useful anymore */ -static int get_unused_buffer(QEMUFile *f, void *pv, size_t size) +static int get_unused_buffer(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint8_t buf[1024]; int block_len; @@ -869,7 +914,8 @@ static int get_unused_buffer(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_unused_buffer(QEMUFile *f, void *pv, size_t size) +static int put_unused_buffer(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { static const uint8_t buf[1024]; int block_len; @@ -879,6 +925,8 @@ static void put_unused_buffer(QEMUFile *f, void *pv, size_t size) size -= block_len; qemu_put_buffer(f, buf, block_len); } + + return 0; } const VMStateInfo vmstate_info_unused_buffer = { @@ -894,7 +942,7 @@ const VMStateInfo vmstate_info_unused_buffer = { */ /* This is the number of 64 bit words sent over the wire */ #define BITS_TO_U64S(nr) DIV_ROUND_UP(nr, 64) -static int get_bitmap(QEMUFile *f, void *pv, size_t size) +static int get_bitmap(QEMUFile *f, void *pv, size_t size, VMStateField *field) { unsigned long *bmp = pv; int i, idx = 0; @@ -908,7 +956,8 @@ static int get_bitmap(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_bitmap(QEMUFile *f, void *pv, size_t size) +static int put_bitmap(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { unsigned long *bmp = pv; int i, idx = 0; @@ -919,6 +968,8 @@ static void put_bitmap(QEMUFile *f, void *pv, size_t size) } qemu_put_be64(f, w); } + + return 0; } const VMStateInfo vmstate_info_bitmap = { @@ -926,3 +977,71 @@ const VMStateInfo vmstate_info_bitmap = { .get = get_bitmap, .put = put_bitmap, }; + +/* get for QTAILQ + * meta data about the QTAILQ is encoded in a VMStateField structure + */ +static int get_qtailq(QEMUFile *f, void *pv, size_t unused_size, + VMStateField *field) +{ + int ret = 0; + const VMStateDescription *vmsd = field->vmsd; + /* size of a QTAILQ element */ + size_t size = field->size; + /* offset of the QTAILQ entry in a QTAILQ element */ + size_t entry_offset = field->start; + int version_id = field->version_id; + void *elm; + + trace_get_qtailq(vmsd->name, version_id); + if (version_id > vmsd->version_id) { + error_report("%s %s", vmsd->name, "too new"); + trace_get_qtailq_end(vmsd->name, "too new", -EINVAL); + + return -EINVAL; + } + if (version_id < vmsd->minimum_version_id) { + error_report("%s %s", vmsd->name, "too old"); + trace_get_qtailq_end(vmsd->name, "too old", -EINVAL); + return -EINVAL; + } + + while (qemu_get_byte(f)) { + elm = g_malloc(size); + ret = vmstate_load_state(f, vmsd, elm, version_id); + if (ret) { + return ret; + } + QTAILQ_RAW_INSERT_TAIL(pv, elm, entry_offset); + } + + trace_get_qtailq_end(vmsd->name, "end", ret); + return ret; +} + +/* put for QTAILQ */ +static int put_qtailq(QEMUFile *f, void *pv, size_t unused_size, + VMStateField *field, QJSON *vmdesc) +{ + const VMStateDescription *vmsd = field->vmsd; + /* offset of the QTAILQ entry in a QTAILQ element*/ + size_t entry_offset = field->start; + void *elm; + + trace_put_qtailq(vmsd->name, vmsd->version_id); + + QTAILQ_RAW_FOREACH(elm, pv, entry_offset) { + qemu_put_byte(f, true); + vmstate_save_state(f, vmsd, elm, vmdesc); + } + qemu_put_byte(f, false); + + trace_put_qtailq_end(vmsd->name, "end"); + + return 0; +} +const VMStateInfo vmstate_info_qtailq = { + .name = "qtailq", + .get = get_qtailq, + .put = put_qtailq, +}; diff --git a/qdev-monitor.c b/qdev-monitor.c index c73410c02e..81d01df928 100644 --- a/qdev-monitor.c +++ b/qdev-monitor.c @@ -29,6 +29,7 @@ #include "qemu/error-report.h" #include "qemu/help_option.h" #include "sysemu/block-backend.h" +#include "migration/migration.h" /* * Aliases were a bad idea from the start. Let's keep them @@ -577,6 +578,14 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) return NULL; } + if (only_migratable) { + if (dc->vmsd->unmigratable) { + error_setg(errp, "Device %s is not migratable, but " + "--only-migratable was specified", driver); + return NULL; + } + } + /* find bus */ path = qemu_opt_get(opts, "bus"); if (path != NULL) { diff --git a/qemu-doc.texi b/qemu-doc.texi index 0b2746f0b1..794ab4a080 100644 --- a/qemu-doc.texi +++ b/qemu-doc.texi @@ -1037,7 +1037,7 @@ qemu-system-i386 -iscsi initiator-name=iqn.qemu.test:my-initiator \ @node disk_images_gluster @subsection GlusterFS disk images -GlusterFS is an user space distributed file system. +GlusterFS is a user space distributed file system. You can boot from the GlusterFS disk image with the command: @example @@ -2901,6 +2901,9 @@ The binary format is detected automatically. @command{qemu-mips} TODO. @command{qemu-mipsel} TODO. +@cindex user mode (NiosII) +@command{qemu-nios2} TODO. + @cindex user mode (PowerPC) @command{qemu-ppc64abi32} TODO. @command{qemu-ppc64} TODO. diff --git a/qemu-img.c b/qemu-img.c index 5df66fe661..74e3362653 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -3455,13 +3455,11 @@ static int img_amend(int argc, char **argv) create_opts = qemu_opts_append(create_opts, bs->drv->create_opts); opts = qemu_opts_create(create_opts, NULL, 0, &error_abort); - if (options) { - qemu_opts_do_parse(opts, options, NULL, &err); - if (err) { - error_report_err(err); - ret = -1; - goto out; - } + qemu_opts_do_parse(opts, options, NULL, &err); + if (err) { + error_report_err(err); + ret = -1; + goto out; } /* In case the driver does not call amend_status_cb() */ diff --git a/qemu-options.hx b/qemu-options.hx index 780528d6ad..588e5beab3 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -32,7 +32,6 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \ " selects emulated machine ('-machine help' for list)\n" " property accel=accel1[:accel2[:...]] selects accelerator\n" " supported accelerators are kvm, xen, tcg (default: tcg)\n" - " kernel_irqchip=on|off controls accelerated irqchip support\n" " kernel_irqchip=on|off|split controls accelerated irqchip support (default=off)\n" " vmport=on|off|auto controls emulation of vmport (default: auto)\n" " kvm_shadow_mem=size of KVM shadow MMU in bytes\n" @@ -250,7 +249,7 @@ use is discouraged as it may be removed from future versions. ETEXI DEF("m", HAS_ARG, QEMU_OPTION_m, - "-m[emory] [size=]megs[,slots=n,maxmem=size]\n" + "-m [size=]megs[,slots=n,maxmem=size]\n" " configure guest RAM\n" " size: initial amount of guest memory\n" " slots: number of hotplug slots (default: none)\n" @@ -927,7 +926,7 @@ ETEXI DEF("display", HAS_ARG, QEMU_OPTION_display, "-display sdl[,frame=on|off][,alt_grab=on|off][,ctrl_grab=on|off]\n" - " [,window_close=on|off][,gl=on|off]|curses|none|\n" + " [,window_close=on|off][,gl=on|off]\n" "-display gtk[,grab_on_hover=on|off][,gl=on|off]|\n" "-display vnc=<display>[,<optargs>]\n" "-display curses\n" @@ -2589,7 +2588,7 @@ qemu-system-i386 --drive file=sheepdog://192.0.2.1:30000/MyVirtualMachine See also @url{http://http://www.osrg.net/sheepdog/}. @item GlusterFS -GlusterFS is an user space distributed file system. +GlusterFS is a user space distributed file system. QEMU supports the use of GlusterFS volumes for hosting VM disk images using TCP, Unix Domain Sockets and RDMA transport protocols. @@ -3585,6 +3584,15 @@ be used to change settings (such as migration parameters) prior to issuing the migrate_incoming to allow the migration to begin. ETEXI +DEF("only-migratable", 0, QEMU_OPTION_only_migratable, \ + "-only-migratable allow only migratable devices\n", QEMU_ARCH_ALL) +STEXI +@item -only-migratable +@findex -only-migratable +Only allow migratable devices. Devices will not be allowed to enter an +unmigratable state. +ETEXI + DEF("nodefaults", 0, QEMU_OPTION_nodefaults, \ "-nodefaults don't create default devices\n", QEMU_ARCH_ALL) STEXI diff --git a/qga/main.c b/qga/main.c index 6caf215575..538e4ee299 100644 --- a/qga/main.c +++ b/qga/main.c @@ -558,8 +558,8 @@ static void process_command(GAState *s, QDict *req) rsp = qmp_dispatch(QOBJECT(req)); if (rsp) { ret = send_response(s, rsp); - if (ret) { - g_warning("error sending response: %s", strerror(ret)); + if (ret < 0) { + g_warning("error sending response: %s", strerror(-ret)); } qobject_decref(rsp); } diff --git a/qom/object.c b/qom/object.c index 760fafb0dc..eb4bc924ff 100644 --- a/qom/object.c +++ b/qom/object.c @@ -357,7 +357,7 @@ static void object_post_init_with_type(Object *obj, TypeImpl *ti) } } -void object_initialize_with_type(void *data, size_t size, TypeImpl *type) +static void object_initialize_with_type(void *data, size_t size, TypeImpl *type) { Object *obj = data; @@ -473,7 +473,7 @@ static void object_finalize(void *data) } } -Object *object_new_with_type(Type type) +static Object *object_new_with_type(Type type) { Object *obj; diff --git a/stubs/migr-blocker.c b/stubs/migr-blocker.c index 8ab3604dfa..a5ba18f53d 100644 --- a/stubs/migr-blocker.c +++ b/stubs/migr-blocker.c @@ -2,8 +2,9 @@ #include "qemu-common.h" #include "migration/migration.h" -void migrate_add_blocker(Error *reason) +int migrate_add_blocker(Error *reason, Error **errp) { + return 0; } void migrate_del_blocker(Error *reason) diff --git a/target/alpha/machine.c b/target/alpha/machine.c index b99a123a39..a102645315 100644 --- a/target/alpha/machine.c +++ b/target/alpha/machine.c @@ -5,17 +5,19 @@ #include "hw/boards.h" #include "migration/cpu.h" -static int get_fpcr(QEMUFile *f, void *opaque, size_t size) +static int get_fpcr(QEMUFile *f, void *opaque, size_t size, VMStateField *field) { CPUAlphaState *env = opaque; cpu_alpha_store_fpcr(env, qemu_get_be64(f)); return 0; } -static void put_fpcr(QEMUFile *f, void *opaque, size_t size) +static int put_fpcr(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) { CPUAlphaState *env = opaque; qemu_put_be64(f, cpu_alpha_load_fpcr(env)); + return 0; } static const VMStateInfo vmstate_fpcr = { diff --git a/target/arm/machine.c b/target/arm/machine.c index d90943b6db..487320db1d 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -17,7 +17,8 @@ static bool vfp_needed(void *opaque) return arm_feature(env, ARM_FEATURE_VFP); } -static int get_fpscr(QEMUFile *f, void *opaque, size_t size) +static int get_fpscr(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) { ARMCPU *cpu = opaque; CPUARMState *env = &cpu->env; @@ -27,12 +28,14 @@ static int get_fpscr(QEMUFile *f, void *opaque, size_t size) return 0; } -static void put_fpscr(QEMUFile *f, void *opaque, size_t size) +static int put_fpscr(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) { ARMCPU *cpu = opaque; CPUARMState *env = &cpu->env; qemu_put_be32(f, vfp_get_fpscr(env)); + return 0; } static const VMStateInfo vmstate_fpscr = { @@ -163,7 +166,8 @@ static const VMStateDescription vmstate_pmsav7 = { } }; -static int get_cpsr(QEMUFile *f, void *opaque, size_t size) +static int get_cpsr(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) { ARMCPU *cpu = opaque; CPUARMState *env = &cpu->env; @@ -180,7 +184,8 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size) return 0; } -static void put_cpsr(QEMUFile *f, void *opaque, size_t size) +static int put_cpsr(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) { ARMCPU *cpu = opaque; CPUARMState *env = &cpu->env; @@ -193,6 +198,7 @@ static void put_cpsr(QEMUFile *f, void *opaque, size_t size) } qemu_put_be32(f, val); + return 0; } static const VMStateInfo vmstate_cpsr = { diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 3b5282186c..8e130ccf9c 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -710,6 +710,7 @@ int kvm_arch_init_vcpu(CPUState *cs) uint32_t signature[3]; int kvm_base = KVM_CPUID_SIGNATURE; int r; + Error *local_err = NULL; memset(&cpuid_data, 0, sizeof(cpuid_data)); @@ -970,7 +971,12 @@ int kvm_arch_init_vcpu(CPUState *cs) error_setg(&invtsc_mig_blocker, "State blocked by non-migratable CPU device" " (invtsc flag)"); - migrate_add_blocker(invtsc_mig_blocker); + r = migrate_add_blocker(invtsc_mig_blocker, &local_err); + if (local_err) { + error_report_err(local_err); + error_free(invtsc_mig_blocker); + goto fail; + } /* for savevm */ vmstate_x86_cpu.unmigratable = 1; } @@ -979,12 +985,12 @@ int kvm_arch_init_vcpu(CPUState *cs) cpuid_data.cpuid.padding = 0; r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data); if (r) { - return r; + goto fail; } r = kvm_arch_set_tsc_khz(cs); if (r < 0) { - return r; + goto fail; } /* vcpu's TSC frequency is either specified by user, or following @@ -1011,6 +1017,10 @@ int kvm_arch_init_vcpu(CPUState *cs) } return 0; + + fail: + migrate_del_blocker(invtsc_mig_blocker); + return r; } void kvm_arch_reset_vcpu(X86CPU *cpu) diff --git a/target/i386/machine.c b/target/i386/machine.c index e002b4fc6d..78ae2f986b 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -136,10 +136,12 @@ static const VMStateDescription vmstate_mtrr_var = { #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \ VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar) -static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size) +static int put_fpreg_error(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) { fprintf(stderr, "call put_fpreg() with invalid arguments\n"); exit(0); + return 0; } /* XXX: add that in a FPU generic layer */ @@ -164,7 +166,8 @@ static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp) p->exp = e; } -static int get_fpreg(QEMUFile *f, void *opaque, size_t size) +static int get_fpreg(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) { FPReg *fp_reg = opaque; uint64_t mant; @@ -176,7 +179,8 @@ static int get_fpreg(QEMUFile *f, void *opaque, size_t size) return 0; } -static void put_fpreg(QEMUFile *f, void *opaque, size_t size) +static int put_fpreg(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) { FPReg *fp_reg = opaque; uint64_t mant; @@ -186,6 +190,8 @@ static void put_fpreg(QEMUFile *f, void *opaque, size_t size) cpu_get_fp80(&mant, &exp, fp_reg->d); qemu_put_be64s(f, &mant); qemu_put_be16s(f, &exp); + + return 0; } static const VMStateInfo vmstate_fpreg = { @@ -194,7 +200,8 @@ static const VMStateInfo vmstate_fpreg = { .put = put_fpreg, }; -static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size) +static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) { union x86_longdouble *p = opaque; uint64_t mant; @@ -211,7 +218,8 @@ static const VMStateInfo vmstate_fpreg_1_mmx = { .put = put_fpreg_error, }; -static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size) +static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) { union x86_longdouble *p = opaque; uint64_t mant; @@ -273,17 +281,21 @@ static bool less_than_7(void *opaque, int version_id) return version_id < 7; } -static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size) +static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size, + VMStateField *field) { uint64_t *v = pv; *v = qemu_get_be32(f); return 0; } -static void put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size) +static int put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size, + VMStateField *field, QJSON *vmdesc) { uint64_t *v = pv; qemu_put_be32(f, *v); + + return 0; } static const VMStateInfo vmstate_hack_uint64_as_uint32 = { diff --git a/target/mips/machine.c b/target/mips/machine.c index d20d948457..38c8fe9328 100644 --- a/target/mips/machine.c +++ b/target/mips/machine.c @@ -19,7 +19,7 @@ static int cpu_post_load(void *opaque, int version_id) /* FPU state */ -static int get_fpr(QEMUFile *f, void *pv, size_t size) +static int get_fpr(QEMUFile *f, void *pv, size_t size, VMStateField *field) { int i; fpr_t *v = pv; @@ -30,7 +30,8 @@ static int get_fpr(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_fpr(QEMUFile *f, void *pv, size_t size) +static int put_fpr(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { int i; fpr_t *v = pv; @@ -38,6 +39,8 @@ static void put_fpr(QEMUFile *f, void *pv, size_t size) for (i = 0; i < MSA_WRLEN/64; i++) { qemu_put_sbe64s(f, &v->wr.d[i]); } + + return 0; } const VMStateInfo vmstate_info_fpr = { @@ -124,7 +127,7 @@ const VMStateDescription vmstate_mvp = { /* TLB state */ -static int get_tlb(QEMUFile *f, void *pv, size_t size) +static int get_tlb(QEMUFile *f, void *pv, size_t size, VMStateField *field) { r4k_tlb_t *v = pv; uint16_t flags; @@ -151,7 +154,8 @@ static int get_tlb(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_tlb(QEMUFile *f, void *pv, size_t size) +static int put_tlb(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { r4k_tlb_t *v = pv; @@ -175,6 +179,8 @@ static void put_tlb(QEMUFile *f, void *pv, size_t size) qemu_put_be16s(f, &flags); qemu_put_be64s(f, &v->PFN[0]); qemu_put_be64s(f, &v->PFN[1]); + + return 0; } const VMStateInfo vmstate_info_tlb = { diff --git a/target/nios2/Makefile.objs b/target/nios2/Makefile.objs new file mode 100644 index 0000000000..2a11c5ce08 --- /dev/null +++ b/target/nios2/Makefile.objs @@ -0,0 +1,4 @@ +obj-y += translate.o op_helper.o helper.o cpu.o mmu.o +obj-$(CONFIG_SOFTMMU) += monitor.o + +$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS) diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c new file mode 100644 index 0000000000..d56bb7245a --- /dev/null +++ b/target/nios2/cpu.c @@ -0,0 +1,237 @@ +/* + * QEMU Nios II CPU + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qapi/error.h" +#include "cpu.h" +#include "exec/log.h" +#include "exec/gdbstub.h" +#include "hw/qdev-properties.h" + +static void nios2_cpu_set_pc(CPUState *cs, vaddr value) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + + env->regs[R_PC] = value; +} + +static bool nios2_cpu_has_work(CPUState *cs) +{ + return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); +} + +/* CPUClass::reset() */ +static void nios2_cpu_reset(CPUState *cs) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(cpu); + CPUNios2State *env = &cpu->env; + + if (qemu_loglevel_mask(CPU_LOG_RESET)) { + qemu_log("CPU Reset (CPU %d)\n", cs->cpu_index); + log_cpu_state(cs, 0); + } + + ncc->parent_reset(cs); + + memset(env->regs, 0, sizeof(uint32_t) * NUM_CORE_REGS); + env->regs[R_PC] = cpu->reset_addr; + +#if defined(CONFIG_USER_ONLY) + /* Start in user mode with interrupts enabled. */ + env->regs[CR_STATUS] = CR_STATUS_U | CR_STATUS_PIE; +#else + env->regs[CR_STATUS] = 0; +#endif +} + +static void nios2_cpu_initfn(Object *obj) +{ + CPUState *cs = CPU(obj); + Nios2CPU *cpu = NIOS2_CPU(obj); + CPUNios2State *env = &cpu->env; + static bool tcg_initialized; + + cs->env_ptr = env; + +#if !defined(CONFIG_USER_ONLY) + mmu_init(env); +#endif + + if (tcg_enabled() && !tcg_initialized) { + tcg_initialized = true; + nios2_tcg_init(); + } +} + +Nios2CPU *cpu_nios2_init(const char *cpu_model) +{ + Nios2CPU *cpu = NIOS2_CPU(object_new(TYPE_NIOS2_CPU)); + + object_property_set_bool(OBJECT(cpu), true, "realized", NULL); + + return cpu; +} + +static void nios2_cpu_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(dev); + Error *local_err = NULL; + + cpu_exec_realizefn(cs, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + + qemu_init_vcpu(cs); + cpu_reset(cs); + + ncc->parent_realize(dev, errp); +} + +static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + + if ((interrupt_request & CPU_INTERRUPT_HARD) && + (env->regs[CR_STATUS] & CR_STATUS_PIE)) { + cs->exception_index = EXCP_IRQ; + nios2_cpu_do_interrupt(cs); + return true; + } + return false; +} + + +static void nios2_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) +{ + /* NOTE: NiosII R2 is not supported yet. */ + info->mach = bfd_arch_nios2; +#ifdef TARGET_WORDS_BIGENDIAN + info->print_insn = print_insn_big_nios2; +#else + info->print_insn = print_insn_little_nios2; +#endif +} + +static int nios2_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUClass *cc = CPU_GET_CLASS(cs); + CPUNios2State *env = &cpu->env; + + if (n > cc->gdb_num_core_regs) { + return 0; + } + + if (n < 32) { /* GP regs */ + return gdb_get_reg32(mem_buf, env->regs[n]); + } else if (n == 32) { /* PC */ + return gdb_get_reg32(mem_buf, env->regs[R_PC]); + } else if (n < 49) { /* Status regs */ + return gdb_get_reg32(mem_buf, env->regs[n - 1]); + } + + /* Invalid regs */ + return 0; +} + +static int nios2_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUClass *cc = CPU_GET_CLASS(cs); + CPUNios2State *env = &cpu->env; + + if (n > cc->gdb_num_core_regs) { + return 0; + } + + if (n < 32) { /* GP regs */ + env->regs[n] = ldl_p(mem_buf); + } else if (n == 32) { /* PC */ + env->regs[R_PC] = ldl_p(mem_buf); + } else if (n < 49) { /* Status regs */ + env->regs[n - 1] = ldl_p(mem_buf); + } + + return 4; +} + +static Property nios2_properties[] = { + DEFINE_PROP_BOOL("mmu_present", Nios2CPU, mmu_present, true), + /* ALTR,pid-num-bits */ + DEFINE_PROP_UINT32("mmu_pid_num_bits", Nios2CPU, pid_num_bits, 8), + /* ALTR,tlb-num-ways */ + DEFINE_PROP_UINT32("mmu_tlb_num_ways", Nios2CPU, tlb_num_ways, 16), + /* ALTR,tlb-num-entries */ + DEFINE_PROP_UINT32("mmu_pid_num_entries", Nios2CPU, tlb_num_entries, 256), + DEFINE_PROP_END_OF_LIST(), +}; + + +static void nios2_cpu_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + Nios2CPUClass *ncc = NIOS2_CPU_CLASS(oc); + + ncc->parent_realize = dc->realize; + dc->realize = nios2_cpu_realizefn; + dc->props = nios2_properties; + ncc->parent_reset = cc->reset; + cc->reset = nios2_cpu_reset; + + cc->has_work = nios2_cpu_has_work; + cc->do_interrupt = nios2_cpu_do_interrupt; + cc->cpu_exec_interrupt = nios2_cpu_exec_interrupt; + cc->dump_state = nios2_cpu_dump_state; + cc->set_pc = nios2_cpu_set_pc; + cc->disas_set_info = nios2_cpu_disas_set_info; +#ifdef CONFIG_USER_ONLY + cc->handle_mmu_fault = nios2_cpu_handle_mmu_fault; +#else + cc->do_unaligned_access = nios2_cpu_do_unaligned_access; + cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug; +#endif + cc->gdb_read_register = nios2_cpu_gdb_read_register; + cc->gdb_write_register = nios2_cpu_gdb_write_register; + cc->gdb_num_core_regs = 49; +} + +static const TypeInfo nios2_cpu_type_info = { + .name = TYPE_NIOS2_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(Nios2CPU), + .instance_init = nios2_cpu_initfn, + .class_size = sizeof(Nios2CPUClass), + .class_init = nios2_cpu_class_init, +}; + +static void nios2_cpu_register_types(void) +{ + type_register_static(&nios2_cpu_type_info); +} + +type_init(nios2_cpu_register_types) diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h new file mode 100644 index 0000000000..13931f3f0b --- /dev/null +++ b/target/nios2/cpu.h @@ -0,0 +1,272 @@ +/* + * Altera Nios II virtual CPU header + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ +#ifndef CPU_NIOS2_H +#define CPU_NIOS2_H + +#include "qemu/osdep.h" +#include "qemu-common.h" + +#define TARGET_LONG_BITS 32 + +#define CPUArchState struct CPUNios2State + +#include "exec/cpu-defs.h" +#include "fpu/softfloat.h" +#include "qom/cpu.h" +struct CPUNios2State; +typedef struct CPUNios2State CPUNios2State; +#if !defined(CONFIG_USER_ONLY) +#include "mmu.h" +#endif + +#define TYPE_NIOS2_CPU "nios2-cpu" + +#define NIOS2_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(Nios2CPUClass, (klass), TYPE_NIOS2_CPU) +#define NIOS2_CPU(obj) \ + OBJECT_CHECK(Nios2CPU, (obj), TYPE_NIOS2_CPU) +#define NIOS2_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(Nios2CPUClass, (obj), TYPE_NIOS2_CPU) + +/** + * Nios2CPUClass: + * @parent_reset: The parent class' reset handler. + * + * A Nios2 CPU model. + */ +typedef struct Nios2CPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} Nios2CPUClass; + +#define TARGET_HAS_ICE 1 + +/* Configuration options for Nios II */ +#define RESET_ADDRESS 0x00000000 +#define EXCEPTION_ADDRESS 0x00000004 +#define FAST_TLB_MISS_ADDRESS 0x00000008 + + +/* GP regs + CR regs + PC */ +#define NUM_CORE_REGS (32 + 32 + 1) + +/* General purpose register aliases */ +#define R_ZERO 0 +#define R_AT 1 +#define R_RET0 2 +#define R_RET1 3 +#define R_ARG0 4 +#define R_ARG1 5 +#define R_ARG2 6 +#define R_ARG3 7 +#define R_ET 24 +#define R_BT 25 +#define R_GP 26 +#define R_SP 27 +#define R_FP 28 +#define R_EA 29 +#define R_BA 30 +#define R_RA 31 + +/* Control register aliases */ +#define CR_BASE 32 +#define CR_STATUS (CR_BASE + 0) +#define CR_STATUS_PIE (1 << 0) +#define CR_STATUS_U (1 << 1) +#define CR_STATUS_EH (1 << 2) +#define CR_STATUS_IH (1 << 3) +#define CR_STATUS_IL (63 << 4) +#define CR_STATUS_CRS (63 << 10) +#define CR_STATUS_PRS (63 << 16) +#define CR_STATUS_NMI (1 << 22) +#define CR_STATUS_RSIE (1 << 23) +#define CR_ESTATUS (CR_BASE + 1) +#define CR_BSTATUS (CR_BASE + 2) +#define CR_IENABLE (CR_BASE + 3) +#define CR_IPENDING (CR_BASE + 4) +#define CR_CPUID (CR_BASE + 5) +#define CR_CTL6 (CR_BASE + 6) +#define CR_EXCEPTION (CR_BASE + 7) +#define CR_PTEADDR (CR_BASE + 8) +#define CR_PTEADDR_PTBASE_SHIFT 22 +#define CR_PTEADDR_PTBASE_MASK (0x3FF << CR_PTEADDR_PTBASE_SHIFT) +#define CR_PTEADDR_VPN_SHIFT 2 +#define CR_PTEADDR_VPN_MASK (0xFFFFF << CR_PTEADDR_VPN_SHIFT) +#define CR_TLBACC (CR_BASE + 9) +#define CR_TLBACC_IGN_SHIFT 25 +#define CR_TLBACC_IGN_MASK (0x7F << CR_TLBACC_IGN_SHIFT) +#define CR_TLBACC_C (1 << 24) +#define CR_TLBACC_R (1 << 23) +#define CR_TLBACC_W (1 << 22) +#define CR_TLBACC_X (1 << 21) +#define CR_TLBACC_G (1 << 20) +#define CR_TLBACC_PFN_MASK 0x000FFFFF +#define CR_TLBMISC (CR_BASE + 10) +#define CR_TLBMISC_WAY_SHIFT 20 +#define CR_TLBMISC_WAY_MASK (0xF << CR_TLBMISC_WAY_SHIFT) +#define CR_TLBMISC_RD (1 << 19) +#define CR_TLBMISC_WR (1 << 18) +#define CR_TLBMISC_PID_SHIFT 4 +#define CR_TLBMISC_PID_MASK (0x3FFF << CR_TLBMISC_PID_SHIFT) +#define CR_TLBMISC_DBL (1 << 3) +#define CR_TLBMISC_BAD (1 << 2) +#define CR_TLBMISC_PERM (1 << 1) +#define CR_TLBMISC_D (1 << 0) +#define CR_ENCINJ (CR_BASE + 11) +#define CR_BADADDR (CR_BASE + 12) +#define CR_CONFIG (CR_BASE + 13) +#define CR_MPUBASE (CR_BASE + 14) +#define CR_MPUACC (CR_BASE + 15) + +/* Other registers */ +#define R_PC 64 + +/* Exceptions */ +#define EXCP_BREAK -1 +#define EXCP_RESET 0 +#define EXCP_PRESET 1 +#define EXCP_IRQ 2 +#define EXCP_TRAP 3 +#define EXCP_UNIMPL 4 +#define EXCP_ILLEGAL 5 +#define EXCP_UNALIGN 6 +#define EXCP_UNALIGND 7 +#define EXCP_DIV 8 +#define EXCP_SUPERA 9 +#define EXCP_SUPERI 10 +#define EXCP_SUPERD 11 +#define EXCP_TLBD 12 +#define EXCP_TLBX 13 +#define EXCP_TLBR 14 +#define EXCP_TLBW 15 +#define EXCP_MPUI 16 +#define EXCP_MPUD 17 + +#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 + +#define NB_MMU_MODES 2 + +struct CPUNios2State { + uint32_t regs[NUM_CORE_REGS]; + +#if !defined(CONFIG_USER_ONLY) + Nios2MMU mmu; + + uint32_t irq_pending; +#endif + + CPU_COMMON +}; + +/** + * Nios2CPU: + * @env: #CPUNios2State + * + * A Nios2 CPU. + */ +typedef struct Nios2CPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNios2State env; + bool mmu_present; + uint32_t pid_num_bits; + uint32_t tlb_num_ways; + uint32_t tlb_num_entries; + + /* Addresses that are hard-coded in the FPGA build settings */ + uint32_t reset_addr; + uint32_t exception_addr; + uint32_t fast_tlb_miss_addr; +} Nios2CPU; + +static inline Nios2CPU *nios2_env_get_cpu(CPUNios2State *env) +{ + return NIOS2_CPU(container_of(env, Nios2CPU, env)); +} + +#define ENV_GET_CPU(e) CPU(nios2_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(Nios2CPU, env) + +void nios2_tcg_init(void); +Nios2CPU *cpu_nios2_init(const char *cpu_model); +void nios2_cpu_do_interrupt(CPUState *cs); +int cpu_nios2_signal_handler(int host_signum, void *pinfo, void *puc); +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUNios2State *env); +void nios2_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, + int flags); +hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +void nios2_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); + +qemu_irq *nios2_cpu_pic_init(Nios2CPU *cpu); +void nios2_check_interrupts(CPUNios2State *env); + +#define TARGET_PHYS_ADDR_SPACE_BITS 32 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 + +#define cpu_init(cpu_model) CPU(cpu_nios2_init(cpu_model)) + +#define cpu_gen_code cpu_nios2_gen_code +#define cpu_signal_handler cpu_nios2_signal_handler + +#define CPU_SAVE_VERSION 1 + +#define TARGET_PAGE_BITS 12 + +/* MMU modes definitions */ +#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE1_SUFFIX _user +#define MMU_SUPERVISOR_IDX 0 +#define MMU_USER_IDX 1 + +static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch) +{ + return (env->regs[CR_STATUS] & CR_STATUS_U) ? MMU_USER_IDX : + MMU_SUPERVISOR_IDX; +} + +int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address, + int rw, int mmu_idx); + +static inline int cpu_interrupts_enabled(CPUNios2State *env) +{ + return env->regs[CR_STATUS] & CR_STATUS_PIE; +} + +#include "exec/cpu-all.h" +#include "exec/exec-all.h" + +static inline void cpu_get_tb_cpu_state(CPUNios2State *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *pc = env->regs[R_PC]; + *cs_base = 0; + *flags = (env->regs[CR_STATUS] & (CR_STATUS_EH | CR_STATUS_U)); +} + +#endif /* CPU_NIOS2_H */ diff --git a/target/nios2/helper.c b/target/nios2/helper.c new file mode 100644 index 0000000000..ef9ee05798 --- /dev/null +++ b/target/nios2/helper.c @@ -0,0 +1,313 @@ +/* + * Altera Nios II helper routines. + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include <stdio.h> +#include <string.h> +#include <assert.h> + +#include "cpu.h" +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "qapi/error.h" +#include "exec/exec-all.h" +#include "exec/log.h" +#include "exec/helper-proto.h" + +#if defined(CONFIG_USER_ONLY) + +void nios2_cpu_do_interrupt(CPUState *cs) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + cs->exception_index = -1; + env->regs[R_EA] = env->regs[R_PC] + 4; +} + +int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx) +{ + cs->exception_index = 0xaa; + /* Page 0x1000 is kuser helper */ + if (address < 0x1000 || address >= 0x2000) { + cpu_dump_state(cs, stderr, fprintf, 0); + } + return 1; +} + +#else /* !CONFIG_USER_ONLY */ + +void nios2_cpu_do_interrupt(CPUState *cs) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + + switch (cs->exception_index) { + case EXCP_IRQ: + assert(env->regs[CR_STATUS] & CR_STATUS_PIE); + + qemu_log_mask(CPU_LOG_INT, "interrupt at pc=%x\n", env->regs[R_PC]); + + env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; + env->regs[CR_STATUS] |= CR_STATUS_IH; + env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); + + env->regs[CR_EXCEPTION] &= ~(0x1F << 2); + env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; + + env->regs[R_EA] = env->regs[R_PC] + 4; + env->regs[R_PC] = cpu->exception_addr; + break; + + case EXCP_TLBD: + if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { + qemu_log_mask(CPU_LOG_INT, "TLB MISS (fast) at pc=%x\n", + env->regs[R_PC]); + + /* Fast TLB miss */ + /* Variation from the spec. Table 3-35 of the cpu reference shows + * estatus not being changed for TLB miss but this appears to + * be incorrect. */ + env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; + env->regs[CR_STATUS] |= CR_STATUS_EH; + env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); + + env->regs[CR_EXCEPTION] &= ~(0x1F << 2); + env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; + + env->regs[CR_TLBMISC] &= ~CR_TLBMISC_DBL; + env->regs[CR_TLBMISC] |= CR_TLBMISC_WR; + + env->regs[R_EA] = env->regs[R_PC] + 4; + env->regs[R_PC] = cpu->fast_tlb_miss_addr; + } else { + qemu_log_mask(CPU_LOG_INT, "TLB MISS (double) at pc=%x\n", + env->regs[R_PC]); + + /* Double TLB miss */ + env->regs[CR_STATUS] |= CR_STATUS_EH; + env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); + + env->regs[CR_EXCEPTION] &= ~(0x1F << 2); + env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; + + env->regs[CR_TLBMISC] |= CR_TLBMISC_DBL; + + env->regs[R_PC] = cpu->exception_addr; + } + break; + + case EXCP_TLBR: + case EXCP_TLBW: + case EXCP_TLBX: + qemu_log_mask(CPU_LOG_INT, "TLB PERM at pc=%x\n", env->regs[R_PC]); + + env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; + env->regs[CR_STATUS] |= CR_STATUS_EH; + env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); + + env->regs[CR_EXCEPTION] &= ~(0x1F << 2); + env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; + + if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { + env->regs[CR_TLBMISC] |= CR_TLBMISC_WR; + } + + env->regs[R_EA] = env->regs[R_PC] + 4; + env->regs[R_PC] = cpu->exception_addr; + break; + + case EXCP_SUPERA: + case EXCP_SUPERI: + case EXCP_SUPERD: + qemu_log_mask(CPU_LOG_INT, "SUPERVISOR exception at pc=%x\n", + env->regs[R_PC]); + + if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { + env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; + env->regs[R_EA] = env->regs[R_PC] + 4; + } + + env->regs[CR_STATUS] |= CR_STATUS_EH; + env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); + + env->regs[CR_EXCEPTION] &= ~(0x1F << 2); + env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; + + env->regs[R_PC] = cpu->exception_addr; + break; + + case EXCP_ILLEGAL: + case EXCP_TRAP: + qemu_log_mask(CPU_LOG_INT, "TRAP exception at pc=%x\n", + env->regs[R_PC]); + + if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { + env->regs[CR_ESTATUS] = env->regs[CR_STATUS]; + env->regs[R_EA] = env->regs[R_PC] + 4; + } + + env->regs[CR_STATUS] |= CR_STATUS_EH; + env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); + + env->regs[CR_EXCEPTION] &= ~(0x1F << 2); + env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; + + env->regs[R_PC] = cpu->exception_addr; + break; + + case EXCP_BREAK: + if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) { + env->regs[CR_BSTATUS] = env->regs[CR_STATUS]; + env->regs[R_BA] = env->regs[R_PC] + 4; + } + + env->regs[CR_STATUS] |= CR_STATUS_EH; + env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U); + + env->regs[CR_EXCEPTION] &= ~(0x1F << 2); + env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2; + + env->regs[R_PC] = cpu->exception_addr; + break; + + default: + cpu_abort(cs, "unhandled exception type=%d\n", + cs->exception_index); + break; + } +} + +static int cpu_nios2_handle_virtual_page( + CPUState *cs, target_ulong address, int rw, int mmu_idx) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + target_ulong vaddr, paddr; + Nios2MMULookup lu; + unsigned int hit; + hit = mmu_translate(env, &lu, address, rw, mmu_idx); + if (hit) { + vaddr = address & TARGET_PAGE_MASK; + paddr = lu.paddr + vaddr - lu.vaddr; + + if (((rw == 0) && (lu.prot & PAGE_READ)) || + ((rw == 1) && (lu.prot & PAGE_WRITE)) || + ((rw == 2) && (lu.prot & PAGE_EXEC))) { + + tlb_set_page(cs, vaddr, paddr, lu.prot, + mmu_idx, TARGET_PAGE_SIZE); + return 0; + } else { + /* Permission violation */ + cs->exception_index = (rw == 0) ? EXCP_TLBR : + ((rw == 1) ? EXCP_TLBW : + EXCP_TLBX); + } + } else { + cs->exception_index = EXCP_TLBD; + } + + if (rw == 2) { + env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D; + } else { + env->regs[CR_TLBMISC] |= CR_TLBMISC_D; + } + env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK; + env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK; + env->mmu.pteaddr_wr = env->regs[CR_PTEADDR]; + env->regs[CR_BADADDR] = address; + return 1; +} + +int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + + if (cpu->mmu_present) { + if (MMU_SUPERVISOR_IDX == mmu_idx) { + if (address >= 0xC0000000) { + /* Kernel physical page - TLB bypassed */ + address &= TARGET_PAGE_MASK; + tlb_set_page(cs, address, address, PAGE_BITS, + mmu_idx, TARGET_PAGE_SIZE); + } else if (address >= 0x80000000) { + /* Kernel virtual page */ + return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx); + } else { + /* User virtual page */ + return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx); + } + } else { + if (address >= 0x80000000) { + /* Illegal access from user mode */ + cs->exception_index = EXCP_SUPERA; + env->regs[CR_BADADDR] = address; + return 1; + } else { + /* User virtual page */ + return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx); + } + } + } else { + /* No MMU */ + address &= TARGET_PAGE_MASK; + tlb_set_page(cs, address, address, PAGE_BITS, + mmu_idx, TARGET_PAGE_SIZE); + } + + return 0; +} + +hwaddr nios2_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + target_ulong vaddr, paddr = 0; + Nios2MMULookup lu; + unsigned int hit; + + if (cpu->mmu_present && (addr < 0xC0000000)) { + hit = mmu_translate(env, &lu, addr, 0, 0); + if (hit) { + vaddr = addr & TARGET_PAGE_MASK; + paddr = lu.paddr + vaddr - lu.vaddr; + } else { + paddr = -1; + qemu_log("cpu_get_phys_page debug MISS: %#" PRIx64 "\n", addr); + } + } else { + paddr = addr & TARGET_PAGE_MASK; + } + + return paddr; +} + +void nios2_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + + env->regs[CR_BADADDR] = addr; + env->regs[CR_EXCEPTION] = EXCP_UNALIGN << 2; + helper_raise_exception(env, EXCP_UNALIGN); +} +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/nios2/helper.h b/target/nios2/helper.h new file mode 100644 index 0000000000..b0cb9146a5 --- /dev/null +++ b/target/nios2/helper.h @@ -0,0 +1,27 @@ +/* + * Altera Nios II helper routines header. + * + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +DEF_HELPER_2(raise_exception, void, env, i32) + +#if !defined(CONFIG_USER_ONLY) +DEF_HELPER_2(mmu_read_debug, void, env, i32) +DEF_HELPER_3(mmu_write, void, env, i32, i32) +DEF_HELPER_1(check_interrupts, void, env) +#endif diff --git a/target/nios2/mmu.c b/target/nios2/mmu.c new file mode 100644 index 0000000000..fe9298af50 --- /dev/null +++ b/target/nios2/mmu.c @@ -0,0 +1,296 @@ +/* + * Altera Nios II MMU emulation for qemu. + * + * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "mmu.h" + +#if !defined(CONFIG_USER_ONLY) + +/* Define this to enable MMU debug messages */ +/* #define DEBUG_MMU */ + +#ifdef DEBUG_MMU +#define MMU_LOG(x) x +#else +#define MMU_LOG(x) +#endif + +void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + int ret; + + ret = nios2_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); + if (unlikely(ret)) { + if (retaddr) { + /* now we have a real cpu fault */ + cpu_restore_state(cs, retaddr); + } + cpu_loop_exit(cs); + } +} + +void mmu_read_debug(CPUNios2State *env, uint32_t rn) +{ + switch (rn) { + case CR_TLBACC: + MMU_LOG(qemu_log("TLBACC READ %08X\n", env->regs[rn])); + break; + + case CR_TLBMISC: + MMU_LOG(qemu_log("TLBMISC READ %08X\n", env->regs[rn])); + break; + + case CR_PTEADDR: + MMU_LOG(qemu_log("PTEADDR READ %08X\n", env->regs[rn])); + break; + + default: + break; + } +} + +/* rw - 0 = read, 1 = write, 2 = fetch. */ +unsigned int mmu_translate(CPUNios2State *env, + Nios2MMULookup *lu, + target_ulong vaddr, int rw, int mmu_idx) +{ + Nios2CPU *cpu = nios2_env_get_cpu(env); + int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4; + int vpn = vaddr >> 12; + + MMU_LOG(qemu_log("mmu_translate vaddr %08X, pid %08X, vpn %08X\n", + vaddr, pid, vpn)); + + int way; + for (way = 0; way < cpu->tlb_num_ways; way++) { + + Nios2TLBEntry *entry = + &env->mmu.tlb[(way * cpu->tlb_num_ways) + + (vpn & env->mmu.tlb_entry_mask)]; + + MMU_LOG(qemu_log("TLB[%d] TAG %08X, VPN %08X\n", + (way * cpu->tlb_num_ways) + + (vpn & env->mmu.tlb_entry_mask), + entry->tag, (entry->tag >> 12))); + + if (((entry->tag >> 12) != vpn) || + (((entry->tag & (1 << 11)) == 0) && + ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) != pid))) { + continue; + } + lu->vaddr = vaddr & TARGET_PAGE_MASK; + lu->paddr = (entry->data & CR_TLBACC_PFN_MASK) << TARGET_PAGE_BITS; + lu->prot = ((entry->data & CR_TLBACC_R) ? PAGE_READ : 0) | + ((entry->data & CR_TLBACC_W) ? PAGE_WRITE : 0) | + ((entry->data & CR_TLBACC_X) ? PAGE_EXEC : 0); + + MMU_LOG(qemu_log("HIT TLB[%d] %08X %08X %08X\n", + (way * cpu->tlb_num_ways) + + (vpn & env->mmu.tlb_entry_mask), + lu->vaddr, lu->paddr, lu->prot)); + return 1; + } + return 0; +} + +static void mmu_flush_pid(CPUNios2State *env, uint32_t pid) +{ + CPUState *cs = ENV_GET_CPU(env); + Nios2CPU *cpu = nios2_env_get_cpu(env); + int idx; + MMU_LOG(qemu_log("TLB Flush PID %d\n", pid)); + + for (idx = 0; idx < cpu->tlb_num_entries; idx++) { + Nios2TLBEntry *entry = &env->mmu.tlb[idx]; + + MMU_LOG(qemu_log("TLB[%d] => %08X %08X\n", + idx, entry->tag, entry->data)); + + if ((entry->tag & (1 << 10)) && (!(entry->tag & (1 << 11))) && + ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) == pid)) { + uint32_t vaddr = entry->tag & TARGET_PAGE_MASK; + + MMU_LOG(qemu_log("TLB Flush Page %08X\n", vaddr)); + + tlb_flush_page(cs, vaddr); + } + } +} + +void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v) +{ + CPUState *cs = ENV_GET_CPU(env); + Nios2CPU *cpu = nios2_env_get_cpu(env); + + MMU_LOG(qemu_log("mmu_write %08X = %08X\n", rn, v)); + + switch (rn) { + case CR_TLBACC: + MMU_LOG(qemu_log("TLBACC: IG %02X, FLAGS %c%c%c%c%c, PFN %05X\n", + v >> CR_TLBACC_IGN_SHIFT, + (v & CR_TLBACC_C) ? 'C' : '.', + (v & CR_TLBACC_R) ? 'R' : '.', + (v & CR_TLBACC_W) ? 'W' : '.', + (v & CR_TLBACC_X) ? 'X' : '.', + (v & CR_TLBACC_G) ? 'G' : '.', + v & CR_TLBACC_PFN_MASK)); + + /* if tlbmisc.WE == 1 then trigger a TLB write on writes to TLBACC */ + if (env->regs[CR_TLBMISC] & CR_TLBMISC_WR) { + int way = (env->regs[CR_TLBMISC] >> CR_TLBMISC_WAY_SHIFT); + int vpn = (env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK) >> 2; + int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4; + int g = (v & CR_TLBACC_G) ? 1 : 0; + int valid = ((vpn & CR_TLBACC_PFN_MASK) < 0xC0000) ? 1 : 0; + Nios2TLBEntry *entry = + &env->mmu.tlb[(way * cpu->tlb_num_ways) + + (vpn & env->mmu.tlb_entry_mask)]; + uint32_t newTag = (vpn << 12) | (g << 11) | (valid << 10) | pid; + uint32_t newData = v & (CR_TLBACC_C | CR_TLBACC_R | CR_TLBACC_W | + CR_TLBACC_X | CR_TLBACC_PFN_MASK); + + if ((entry->tag != newTag) || (entry->data != newData)) { + if (entry->tag & (1 << 10)) { + /* Flush existing entry */ + MMU_LOG(qemu_log("TLB Flush Page (OLD) %08X\n", + entry->tag & TARGET_PAGE_MASK)); + tlb_flush_page(cs, entry->tag & TARGET_PAGE_MASK); + } + entry->tag = newTag; + entry->data = newData; + MMU_LOG(qemu_log("TLB[%d] = %08X %08X\n", + (way * cpu->tlb_num_ways) + + (vpn & env->mmu.tlb_entry_mask), + entry->tag, entry->data)); + } + /* Auto-increment tlbmisc.WAY */ + env->regs[CR_TLBMISC] = + (env->regs[CR_TLBMISC] & ~CR_TLBMISC_WAY_MASK) | + (((way + 1) & (cpu->tlb_num_ways - 1)) << + CR_TLBMISC_WAY_SHIFT); + } + + /* Writes to TLBACC don't change the read-back value */ + env->mmu.tlbacc_wr = v; + break; + + case CR_TLBMISC: + MMU_LOG(qemu_log("TLBMISC: WAY %X, FLAGS %c%c%c%c%c%c, PID %04X\n", + v >> CR_TLBMISC_WAY_SHIFT, + (v & CR_TLBMISC_RD) ? 'R' : '.', + (v & CR_TLBMISC_WR) ? 'W' : '.', + (v & CR_TLBMISC_DBL) ? '2' : '.', + (v & CR_TLBMISC_BAD) ? 'B' : '.', + (v & CR_TLBMISC_PERM) ? 'P' : '.', + (v & CR_TLBMISC_D) ? 'D' : '.', + (v & CR_TLBMISC_PID_MASK) >> 4)); + + if ((v & CR_TLBMISC_PID_MASK) != + (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK)) { + mmu_flush_pid(env, (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> + CR_TLBMISC_PID_SHIFT); + } + /* if tlbmisc.RD == 1 then trigger a TLB read on writes to TLBMISC */ + if (v & CR_TLBMISC_RD) { + int way = (v >> CR_TLBMISC_WAY_SHIFT); + int vpn = (env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK) >> 2; + Nios2TLBEntry *entry = + &env->mmu.tlb[(way * cpu->tlb_num_ways) + + (vpn & env->mmu.tlb_entry_mask)]; + + env->regs[CR_TLBACC] &= CR_TLBACC_IGN_MASK; + env->regs[CR_TLBACC] |= entry->data; + env->regs[CR_TLBACC] |= (entry->tag & (1 << 11)) ? CR_TLBACC_G : 0; + env->regs[CR_TLBMISC] = + (v & ~CR_TLBMISC_PID_MASK) | + ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) << + CR_TLBMISC_PID_SHIFT); + env->regs[CR_PTEADDR] &= ~CR_PTEADDR_VPN_MASK; + env->regs[CR_PTEADDR] |= (entry->tag >> 12) << CR_PTEADDR_VPN_SHIFT; + MMU_LOG(qemu_log("TLB READ way %d, vpn %05X, tag %08X, data %08X, " + "tlbacc %08X, tlbmisc %08X, pteaddr %08X\n", + way, vpn, entry->tag, entry->data, + env->regs[CR_TLBACC], env->regs[CR_TLBMISC], + env->regs[CR_PTEADDR])); + } else { + env->regs[CR_TLBMISC] = v; + } + + env->mmu.tlbmisc_wr = v; + break; + + case CR_PTEADDR: + MMU_LOG(qemu_log("PTEADDR: PTBASE %03X, VPN %05X\n", + v >> CR_PTEADDR_PTBASE_SHIFT, + (v & CR_PTEADDR_VPN_MASK) >> CR_PTEADDR_VPN_SHIFT)); + + /* Writes to PTEADDR don't change the read-back VPN value */ + env->regs[CR_PTEADDR] = (v & ~CR_PTEADDR_VPN_MASK) | + (env->regs[CR_PTEADDR] & CR_PTEADDR_VPN_MASK); + env->mmu.pteaddr_wr = v; + break; + + default: + break; + } +} + +void mmu_init(CPUNios2State *env) +{ + Nios2CPU *cpu = nios2_env_get_cpu(env); + Nios2MMU *mmu = &env->mmu; + + MMU_LOG(qemu_log("mmu_init\n")); + + mmu->tlb_entry_mask = (cpu->tlb_num_entries / cpu->tlb_num_ways) - 1; + mmu->tlb = g_new0(Nios2TLBEntry, cpu->tlb_num_entries); +} + +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUNios2State *env) +{ + Nios2CPU *cpu = nios2_env_get_cpu(env); + int i; + + cpu_fprintf(f, "MMU: ways %d, entries %d, pid bits %d\n", + cpu->tlb_num_ways, cpu->tlb_num_entries, + cpu->pid_num_bits); + + for (i = 0; i < cpu->tlb_num_entries; i++) { + Nios2TLBEntry *entry = &env->mmu.tlb[i]; + cpu_fprintf(f, "TLB[%d] = %08X %08X %c VPN %05X " + "PID %02X %c PFN %05X %c%c%c%c\n", + i, entry->tag, entry->data, + (entry->tag & (1 << 10)) ? 'V' : '-', + entry->tag >> 12, + entry->tag & ((1 << cpu->pid_num_bits) - 1), + (entry->tag & (1 << 11)) ? 'G' : '-', + entry->data & CR_TLBACC_PFN_MASK, + (entry->data & CR_TLBACC_C) ? 'C' : '-', + (entry->data & CR_TLBACC_R) ? 'R' : '-', + (entry->data & CR_TLBACC_W) ? 'W' : '-', + (entry->data & CR_TLBACC_X) ? 'X' : '-'); + } +} + +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/nios2/mmu.h b/target/nios2/mmu.h new file mode 100644 index 0000000000..51d3d1f43a --- /dev/null +++ b/target/nios2/mmu.h @@ -0,0 +1,50 @@ +/* + * Altera Nios II MMU emulation for qemu. + * + * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ +#ifndef MMU_NIOS2_H +#define MMU_NIOS2_H + +typedef struct Nios2TLBEntry { + target_ulong tag; + target_ulong data; +} Nios2TLBEntry; + +typedef struct Nios2MMU { + int tlb_entry_mask; + uint32_t pteaddr_wr; + uint32_t tlbacc_wr; + uint32_t tlbmisc_wr; + Nios2TLBEntry *tlb; +} Nios2MMU; + +typedef struct Nios2MMULookup { + target_ulong vaddr; + target_ulong paddr; + int prot; +} Nios2MMULookup; + +void mmu_flip_um(CPUNios2State *env, unsigned int um); +unsigned int mmu_translate(CPUNios2State *env, + Nios2MMULookup *lu, + target_ulong vaddr, int rw, int mmu_idx); +void mmu_read_debug(CPUNios2State *env, uint32_t rn); +void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v); +void mmu_init(CPUNios2State *env); + +#endif /* MMU_NIOS2_H */ diff --git a/target/nios2/monitor.c b/target/nios2/monitor.c new file mode 100644 index 0000000000..422c81656a --- /dev/null +++ b/target/nios2/monitor.c @@ -0,0 +1,35 @@ +/* + * QEMU monitor + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "hmp.h" + +void hmp_info_tlb(Monitor *mon, const QDict *qdict) +{ + CPUArchState *env1 = mon_get_cpu_env(); + + dump_mmu((FILE *)mon, (fprintf_function)monitor_printf, env1); +} diff --git a/target/nios2/op_helper.c b/target/nios2/op_helper.c new file mode 100644 index 0000000000..538853cda7 --- /dev/null +++ b/target/nios2/op_helper.c @@ -0,0 +1,47 @@ +/* + * Altera Nios II helper routines. + * + * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +#if !defined(CONFIG_USER_ONLY) +void helper_mmu_read_debug(CPUNios2State *env, uint32_t rn) +{ + mmu_read_debug(env, rn); +} + +void helper_mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v) +{ + mmu_write(env, rn, v); +} + +void helper_check_interrupts(CPUNios2State *env) +{ + nios2_check_interrupts(env); +} +#endif /* !CONFIG_USER_ONLY */ + +void helper_raise_exception(CPUNios2State *env, uint32_t index) +{ + CPUState *cs = ENV_GET_CPU(env); + cs->exception_index = index; + cpu_loop_exit(cs); +} diff --git a/target/nios2/translate.c b/target/nios2/translate.c new file mode 100644 index 0000000000..2d738391ad --- /dev/null +++ b/target/nios2/translate.c @@ -0,0 +1,958 @@ +/* + * Altera Nios II emulation for qemu: main translation routines. + * + * Copyright (C) 2016 Marek Vasut <marex@denx.de> + * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com> + * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch> + * (Portions of this file that were originally from nios2sim-ng.) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/lgpl-2.1.html> + */ + +#include "cpu.h" +#include "tcg-op.h" +#include "exec/exec-all.h" +#include "disas/disas.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" +#include "exec/log.h" +#include "exec/cpu_ldst.h" + +#define INSTRUCTION_FLG(func, flags) { (func), (flags) } +#define INSTRUCTION(func) \ + INSTRUCTION_FLG(func, 0) +#define INSTRUCTION_NOP() \ + INSTRUCTION_FLG(nop, 0) +#define INSTRUCTION_UNIMPLEMENTED() \ + INSTRUCTION_FLG(gen_excp, EXCP_UNIMPL) +#define INSTRUCTION_ILLEGAL() \ + INSTRUCTION_FLG(gen_excp, EXCP_ILLEGAL) + +/* Special R-Type instruction opcode */ +#define INSN_R_TYPE 0x3A + +/* I-Type instruction parsing */ +#define I_TYPE(instr, code) \ + struct { \ + uint8_t op; \ + union { \ + uint16_t imm16; \ + int16_t imm16s; \ + }; \ + uint8_t b; \ + uint8_t a; \ + } (instr) = { \ + .op = extract32((code), 0, 6), \ + .imm16 = extract32((code), 6, 16), \ + .b = extract32((code), 22, 5), \ + .a = extract32((code), 27, 5), \ + } + +/* R-Type instruction parsing */ +#define R_TYPE(instr, code) \ + struct { \ + uint8_t op; \ + uint8_t imm5; \ + uint8_t opx; \ + uint8_t c; \ + uint8_t b; \ + uint8_t a; \ + } (instr) = { \ + .op = extract32((code), 0, 6), \ + .imm5 = extract32((code), 6, 5), \ + .opx = extract32((code), 11, 6), \ + .c = extract32((code), 17, 5), \ + .b = extract32((code), 22, 5), \ + .a = extract32((code), 27, 5), \ + } + +/* J-Type instruction parsing */ +#define J_TYPE(instr, code) \ + struct { \ + uint8_t op; \ + uint32_t imm26; \ + } (instr) = { \ + .op = extract32((code), 0, 6), \ + .imm26 = extract32((code), 6, 26), \ + } + +typedef struct DisasContext { + TCGv_ptr cpu_env; + TCGv *cpu_R; + TCGv_i32 zero; + int is_jmp; + target_ulong pc; + TranslationBlock *tb; + int mem_idx; + bool singlestep_enabled; +} DisasContext; + +typedef struct Nios2Instruction { + void (*handler)(DisasContext *dc, uint32_t code, uint32_t flags); + uint32_t flags; +} Nios2Instruction; + +static uint8_t get_opcode(uint32_t code) +{ + I_TYPE(instr, code); + return instr.op; +} + +static uint8_t get_opxcode(uint32_t code) +{ + R_TYPE(instr, code); + return instr.opx; +} + +static TCGv load_zero(DisasContext *dc) +{ + if (TCGV_IS_UNUSED_I32(dc->zero)) { + dc->zero = tcg_const_i32(0); + } + return dc->zero; +} + +static TCGv load_gpr(DisasContext *dc, uint8_t reg) +{ + if (likely(reg != R_ZERO)) { + return dc->cpu_R[reg]; + } else { + return load_zero(dc); + } +} + +static void t_gen_helper_raise_exception(DisasContext *dc, + uint32_t index) +{ + TCGv_i32 tmp = tcg_const_i32(index); + + tcg_gen_movi_tl(dc->cpu_R[R_PC], dc->pc); + gen_helper_raise_exception(dc->cpu_env, tmp); + tcg_temp_free_i32(tmp); + dc->is_jmp = DISAS_UPDATE; +} + +static bool use_goto_tb(DisasContext *dc, uint32_t dest) +{ + if (unlikely(dc->singlestep_enabled)) { + return false; + } + +#ifndef CONFIG_USER_ONLY + return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); +#else + return true; +#endif +} + +static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest) +{ + TranslationBlock *tb = dc->tb; + + if (use_goto_tb(dc, dest)) { + tcg_gen_goto_tb(n); + tcg_gen_movi_tl(dc->cpu_R[R_PC], dest); + tcg_gen_exit_tb((tcg_target_long)tb + n); + } else { + tcg_gen_movi_tl(dc->cpu_R[R_PC], dest); + tcg_gen_exit_tb(0); + } +} + +static void gen_excp(DisasContext *dc, uint32_t code, uint32_t flags) +{ + t_gen_helper_raise_exception(dc, flags); +} + +static void gen_check_supervisor(DisasContext *dc) +{ + if (dc->tb->flags & CR_STATUS_U) { + /* CPU in user mode, privileged instruction called, stop. */ + t_gen_helper_raise_exception(dc, EXCP_SUPERI); + } +} + +/* + * Used as a placeholder for all instructions which do not have + * an effect on the simulator (e.g. flush, sync) + */ +static void nop(DisasContext *dc, uint32_t code, uint32_t flags) +{ + /* Nothing to do here */ +} + +/* + * J-Type instructions + */ +static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags) +{ + J_TYPE(instr, code); + gen_goto_tb(dc, 0, (dc->pc & 0xF0000000) | (instr.imm26 << 2)); + dc->is_jmp = DISAS_TB_JUMP; +} + +static void call(DisasContext *dc, uint32_t code, uint32_t flags) +{ + tcg_gen_movi_tl(dc->cpu_R[R_RA], dc->pc + 4); + jmpi(dc, code, flags); +} + +/* + * I-Type instructions + */ +/* Load instructions */ +static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags) +{ + I_TYPE(instr, code); + + TCGv addr = tcg_temp_new(); + TCGv data; + + /* + * WARNING: Loads into R_ZERO are ignored, but we must generate the + * memory access itself to emulate the CPU precisely. Load + * from a protected page to R_ZERO will cause SIGSEGV on + * the Nios2 CPU. + */ + if (likely(instr.b != R_ZERO)) { + data = dc->cpu_R[instr.b]; + } else { + data = tcg_temp_new(); + } + + tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16s); + tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags); + + if (unlikely(instr.b == R_ZERO)) { + tcg_temp_free(data); + } + + tcg_temp_free(addr); +} + +/* Store instructions */ +static void gen_stx(DisasContext *dc, uint32_t code, uint32_t flags) +{ + I_TYPE(instr, code); + TCGv val = load_gpr(dc, instr.b); + + TCGv addr = tcg_temp_new(); + tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16s); + tcg_gen_qemu_st_tl(val, addr, dc->mem_idx, flags); + tcg_temp_free(addr); +} + +/* Branch instructions */ +static void br(DisasContext *dc, uint32_t code, uint32_t flags) +{ + I_TYPE(instr, code); + + gen_goto_tb(dc, 0, dc->pc + 4 + (instr.imm16s & -4)); + dc->is_jmp = DISAS_TB_JUMP; +} + +static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags) +{ + I_TYPE(instr, code); + + TCGLabel *l1 = gen_new_label(); + tcg_gen_brcond_tl(flags, dc->cpu_R[instr.a], dc->cpu_R[instr.b], l1); + gen_goto_tb(dc, 0, dc->pc + 4); + gen_set_label(l1); + gen_goto_tb(dc, 1, dc->pc + 4 + (instr.imm16s & -4)); + dc->is_jmp = DISAS_TB_JUMP; +} + +/* Comparison instructions */ +#define gen_i_cmpxx(fname, op3) \ +static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ +{ \ + I_TYPE(instr, (code)); \ + tcg_gen_setcondi_tl(flags, (dc)->cpu_R[instr.b], (dc)->cpu_R[instr.a], \ + (op3)); \ +} + +gen_i_cmpxx(gen_cmpxxsi, instr.imm16s) +gen_i_cmpxx(gen_cmpxxui, instr.imm16) + +/* Math/logic instructions */ +#define gen_i_math_logic(fname, insn, resimm, op3) \ +static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ +{ \ + I_TYPE(instr, (code)); \ + if (unlikely(instr.b == R_ZERO)) { /* Store to R_ZERO is ignored */ \ + return; \ + } else if (instr.a == R_ZERO) { /* MOVxI optimizations */ \ + tcg_gen_movi_tl(dc->cpu_R[instr.b], (resimm) ? (op3) : 0); \ + } else { \ + tcg_gen_##insn##_tl((dc)->cpu_R[instr.b], (dc)->cpu_R[instr.a], \ + (op3)); \ + } \ +} + +gen_i_math_logic(addi, addi, 1, instr.imm16s) +gen_i_math_logic(muli, muli, 0, instr.imm16s) + +gen_i_math_logic(andi, andi, 0, instr.imm16) +gen_i_math_logic(ori, ori, 1, instr.imm16) +gen_i_math_logic(xori, xori, 1, instr.imm16) + +gen_i_math_logic(andhi, andi, 0, instr.imm16 << 16) +gen_i_math_logic(orhi , ori, 1, instr.imm16 << 16) +gen_i_math_logic(xorhi, xori, 1, instr.imm16 << 16) + +/* Prototype only, defined below */ +static void handle_r_type_instr(DisasContext *dc, uint32_t code, + uint32_t flags); + +static const Nios2Instruction i_type_instructions[] = { + INSTRUCTION(call), /* call */ + INSTRUCTION(jmpi), /* jmpi */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_ldx, MO_UB), /* ldbu */ + INSTRUCTION(addi), /* addi */ + INSTRUCTION_FLG(gen_stx, MO_UB), /* stb */ + INSTRUCTION(br), /* br */ + INSTRUCTION_FLG(gen_ldx, MO_SB), /* ldb */ + INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_GE), /* cmpgei */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_ldx, MO_UW), /* ldhu */ + INSTRUCTION(andi), /* andi */ + INSTRUCTION_FLG(gen_stx, MO_UW), /* sth */ + INSTRUCTION_FLG(gen_bxx, TCG_COND_GE), /* bge */ + INSTRUCTION_FLG(gen_ldx, MO_SW), /* ldh */ + INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_LT), /* cmplti */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_NOP(), /* initda */ + INSTRUCTION(ori), /* ori */ + INSTRUCTION_FLG(gen_stx, MO_UL), /* stw */ + INSTRUCTION_FLG(gen_bxx, TCG_COND_LT), /* blt */ + INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldw */ + INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_NE), /* cmpnei */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_NOP(), /* flushda */ + INSTRUCTION(xori), /* xori */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_bxx, TCG_COND_NE), /* bne */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_EQ), /* cmpeqi */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_ldx, MO_UB), /* ldbuio */ + INSTRUCTION(muli), /* muli */ + INSTRUCTION_FLG(gen_stx, MO_UB), /* stbio */ + INSTRUCTION_FLG(gen_bxx, TCG_COND_EQ), /* beq */ + INSTRUCTION_FLG(gen_ldx, MO_SB), /* ldbio */ + INSTRUCTION_FLG(gen_cmpxxui, TCG_COND_GEU), /* cmpgeui */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_ldx, MO_UW), /* ldhuio */ + INSTRUCTION(andhi), /* andhi */ + INSTRUCTION_FLG(gen_stx, MO_UW), /* sthio */ + INSTRUCTION_FLG(gen_bxx, TCG_COND_GEU), /* bgeu */ + INSTRUCTION_FLG(gen_ldx, MO_SW), /* ldhio */ + INSTRUCTION_FLG(gen_cmpxxui, TCG_COND_LTU), /* cmpltui */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_UNIMPLEMENTED(), /* custom */ + INSTRUCTION_NOP(), /* initd */ + INSTRUCTION(orhi), /* orhi */ + INSTRUCTION_FLG(gen_stx, MO_SL), /* stwio */ + INSTRUCTION_FLG(gen_bxx, TCG_COND_LTU), /* bltu */ + INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldwio */ + INSTRUCTION_UNIMPLEMENTED(), /* rdprs */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(handle_r_type_instr, 0), /* R-Type */ + INSTRUCTION_NOP(), /* flushd */ + INSTRUCTION(xorhi), /* xorhi */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), +}; + +/* + * R-Type instructions + */ +/* + * status <- estatus + * PC <- ea + */ +static void eret(DisasContext *dc, uint32_t code, uint32_t flags) +{ + tcg_gen_mov_tl(dc->cpu_R[CR_STATUS], dc->cpu_R[CR_ESTATUS]); + tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_EA]); + + dc->is_jmp = DISAS_JUMP; +} + +/* PC <- ra */ +static void ret(DisasContext *dc, uint32_t code, uint32_t flags) +{ + tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_RA]); + + dc->is_jmp = DISAS_JUMP; +} + +/* PC <- ba */ +static void bret(DisasContext *dc, uint32_t code, uint32_t flags) +{ + tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_BA]); + + dc->is_jmp = DISAS_JUMP; +} + +/* PC <- rA */ +static void jmp(DisasContext *dc, uint32_t code, uint32_t flags) +{ + R_TYPE(instr, code); + + tcg_gen_mov_tl(dc->cpu_R[R_PC], load_gpr(dc, instr.a)); + + dc->is_jmp = DISAS_JUMP; +} + +/* rC <- PC + 4 */ +static void nextpc(DisasContext *dc, uint32_t code, uint32_t flags) +{ + R_TYPE(instr, code); + + if (likely(instr.c != R_ZERO)) { + tcg_gen_movi_tl(dc->cpu_R[instr.c], dc->pc + 4); + } +} + +/* + * ra <- PC + 4 + * PC <- rA + */ +static void callr(DisasContext *dc, uint32_t code, uint32_t flags) +{ + R_TYPE(instr, code); + + tcg_gen_mov_tl(dc->cpu_R[R_PC], load_gpr(dc, instr.a)); + tcg_gen_movi_tl(dc->cpu_R[R_RA], dc->pc + 4); + + dc->is_jmp = DISAS_JUMP; +} + +/* rC <- ctlN */ +static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags) +{ + R_TYPE(instr, code); + + gen_check_supervisor(dc); + + switch (instr.imm5 + CR_BASE) { + case CR_PTEADDR: + case CR_TLBACC: + case CR_TLBMISC: + { +#if !defined(CONFIG_USER_ONLY) + if (likely(instr.c != R_ZERO)) { + tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]); +#ifdef DEBUG_MMU + TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE); + gen_helper_mmu_read_debug(dc->cpu_R[instr.c], dc->cpu_env, tmp); + tcg_temp_free_i32(tmp); +#endif + } +#endif + break; + } + + default: + if (likely(instr.c != R_ZERO)) { + tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]); + } + break; + } +} + +/* ctlN <- rA */ +static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags) +{ + R_TYPE(instr, code); + + gen_check_supervisor(dc); + + switch (instr.imm5 + CR_BASE) { + case CR_PTEADDR: + case CR_TLBACC: + case CR_TLBMISC: + { +#if !defined(CONFIG_USER_ONLY) + TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE); + gen_helper_mmu_write(dc->cpu_env, tmp, load_gpr(dc, instr.a)); + tcg_temp_free_i32(tmp); +#endif + break; + } + + default: + tcg_gen_mov_tl(dc->cpu_R[instr.imm5 + CR_BASE], load_gpr(dc, instr.a)); + break; + } + + /* If interrupts were enabled using WRCTL, trigger them. */ +#if !defined(CONFIG_USER_ONLY) + if ((instr.imm5 + CR_BASE) == CR_STATUS) { + gen_helper_check_interrupts(dc->cpu_env); + } +#endif +} + +/* Comparison instructions */ +static void gen_cmpxx(DisasContext *dc, uint32_t code, uint32_t flags) +{ + R_TYPE(instr, code); + if (likely(instr.c != R_ZERO)) { + tcg_gen_setcond_tl(flags, dc->cpu_R[instr.c], dc->cpu_R[instr.a], + dc->cpu_R[instr.b]); + } +} + +/* Math/logic instructions */ +#define gen_r_math_logic(fname, insn, op3) \ +static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ +{ \ + R_TYPE(instr, (code)); \ + if (likely(instr.c != R_ZERO)) { \ + tcg_gen_##insn((dc)->cpu_R[instr.c], load_gpr((dc), instr.a), \ + (op3)); \ + } \ +} + +gen_r_math_logic(add, add_tl, load_gpr(dc, instr.b)) +gen_r_math_logic(sub, sub_tl, load_gpr(dc, instr.b)) +gen_r_math_logic(mul, mul_tl, load_gpr(dc, instr.b)) + +gen_r_math_logic(and, and_tl, load_gpr(dc, instr.b)) +gen_r_math_logic(or, or_tl, load_gpr(dc, instr.b)) +gen_r_math_logic(xor, xor_tl, load_gpr(dc, instr.b)) +gen_r_math_logic(nor, nor_tl, load_gpr(dc, instr.b)) + +gen_r_math_logic(srai, sari_tl, instr.imm5) +gen_r_math_logic(srli, shri_tl, instr.imm5) +gen_r_math_logic(slli, shli_tl, instr.imm5) +gen_r_math_logic(roli, rotli_tl, instr.imm5) + +#define gen_r_mul(fname, insn) \ +static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ +{ \ + R_TYPE(instr, (code)); \ + if (likely(instr.c != R_ZERO)) { \ + TCGv t0 = tcg_temp_new(); \ + tcg_gen_##insn(t0, dc->cpu_R[instr.c], \ + load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \ + tcg_temp_free(t0); \ + } \ +} + +gen_r_mul(mulxss, muls2_tl) +gen_r_mul(mulxuu, mulu2_tl) +gen_r_mul(mulxsu, mulsu2_tl) + +#define gen_r_shift_s(fname, insn) \ +static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \ +{ \ + R_TYPE(instr, (code)); \ + if (likely(instr.c != R_ZERO)) { \ + TCGv t0 = tcg_temp_new(); \ + tcg_gen_andi_tl(t0, load_gpr((dc), instr.b), 31); \ + tcg_gen_##insn((dc)->cpu_R[instr.c], load_gpr((dc), instr.a), t0); \ + tcg_temp_free(t0); \ + } \ +} + +gen_r_shift_s(sra, sar_tl) +gen_r_shift_s(srl, shr_tl) +gen_r_shift_s(sll, shl_tl) +gen_r_shift_s(rol, rotl_tl) +gen_r_shift_s(ror, rotr_tl) + +static void divs(DisasContext *dc, uint32_t code, uint32_t flags) +{ + R_TYPE(instr, (code)); + + /* Stores into R_ZERO are ignored */ + if (unlikely(instr.c == R_ZERO)) { + return; + } + + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + TCGv t3 = tcg_temp_new(); + + tcg_gen_ext32s_tl(t0, load_gpr(dc, instr.a)); + tcg_gen_ext32s_tl(t1, load_gpr(dc, instr.b)); + tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(t2, t2, t3); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(t2, t2, t3); + tcg_gen_movi_tl(t3, 0); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(dc->cpu_R[instr.c], t0, t1); + tcg_gen_ext32s_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.c]); + + tcg_temp_free(t3); + tcg_temp_free(t2); + tcg_temp_free(t1); + tcg_temp_free(t0); +} + +static void divu(DisasContext *dc, uint32_t code, uint32_t flags) +{ + R_TYPE(instr, (code)); + + /* Stores into R_ZERO are ignored */ + if (unlikely(instr.c == R_ZERO)) { + return; + } + + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_const_tl(0); + TCGv t3 = tcg_const_tl(1); + + tcg_gen_ext32u_tl(t0, load_gpr(dc, instr.a)); + tcg_gen_ext32u_tl(t1, load_gpr(dc, instr.b)); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_tl(dc->cpu_R[instr.c], t0, t1); + tcg_gen_ext32s_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.c]); + + tcg_temp_free(t3); + tcg_temp_free(t2); + tcg_temp_free(t1); + tcg_temp_free(t0); +} + +static const Nios2Instruction r_type_instructions[] = { + INSTRUCTION_ILLEGAL(), + INSTRUCTION(eret), /* eret */ + INSTRUCTION(roli), /* roli */ + INSTRUCTION(rol), /* rol */ + INSTRUCTION_NOP(), /* flushp */ + INSTRUCTION(ret), /* ret */ + INSTRUCTION(nor), /* nor */ + INSTRUCTION(mulxuu), /* mulxuu */ + INSTRUCTION_FLG(gen_cmpxx, TCG_COND_GE), /* cmpge */ + INSTRUCTION(bret), /* bret */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION(ror), /* ror */ + INSTRUCTION_NOP(), /* flushi */ + INSTRUCTION(jmp), /* jmp */ + INSTRUCTION(and), /* and */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LT), /* cmplt */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION(slli), /* slli */ + INSTRUCTION(sll), /* sll */ + INSTRUCTION_UNIMPLEMENTED(), /* wrprs */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION(or), /* or */ + INSTRUCTION(mulxsu), /* mulxsu */ + INSTRUCTION_FLG(gen_cmpxx, TCG_COND_NE), /* cmpne */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION(srli), /* srli */ + INSTRUCTION(srl), /* srl */ + INSTRUCTION(nextpc), /* nextpc */ + INSTRUCTION(callr), /* callr */ + INSTRUCTION(xor), /* xor */ + INSTRUCTION(mulxss), /* mulxss */ + INSTRUCTION_FLG(gen_cmpxx, TCG_COND_EQ), /* cmpeq */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION(divu), /* divu */ + INSTRUCTION(divs), /* div */ + INSTRUCTION(rdctl), /* rdctl */ + INSTRUCTION(mul), /* mul */ + INSTRUCTION_FLG(gen_cmpxx, TCG_COND_GEU), /* cmpgeu */ + INSTRUCTION_NOP(), /* initi */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_excp, EXCP_TRAP), /* trap */ + INSTRUCTION(wrctl), /* wrctl */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LTU), /* cmpltu */ + INSTRUCTION(add), /* add */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_FLG(gen_excp, EXCP_BREAK), /* break */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION(nop), /* nop */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION(sub), /* sub */ + INSTRUCTION(srai), /* srai */ + INSTRUCTION(sra), /* sra */ + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), + INSTRUCTION_ILLEGAL(), +}; + +static void handle_r_type_instr(DisasContext *dc, uint32_t code, uint32_t flags) +{ + uint8_t opx; + const Nios2Instruction *instr; + + opx = get_opxcode(code); + if (unlikely(opx >= ARRAY_SIZE(r_type_instructions))) { + goto illegal_op; + } + + instr = &r_type_instructions[opx]; + instr->handler(dc, code, instr->flags); + + return; + +illegal_op: + t_gen_helper_raise_exception(dc, EXCP_ILLEGAL); +} + +static void handle_instruction(DisasContext *dc, CPUNios2State *env) +{ + uint32_t code; + uint8_t op; + const Nios2Instruction *instr; +#if defined(CONFIG_USER_ONLY) + /* FIXME: Is this needed ? */ + if (dc->pc >= 0x1000 && dc->pc < 0x2000) { + env->regs[R_PC] = dc->pc; + t_gen_helper_raise_exception(dc, 0xaa); + return; + } +#endif + code = cpu_ldl_code(env, dc->pc); + op = get_opcode(code); + + if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) { + goto illegal_op; + } + + TCGV_UNUSED_I32(dc->zero); + + instr = &i_type_instructions[op]; + instr->handler(dc, code, instr->flags); + + if (!TCGV_IS_UNUSED_I32(dc->zero)) { + tcg_temp_free(dc->zero); + } + + return; + +illegal_op: + t_gen_helper_raise_exception(dc, EXCP_ILLEGAL); +} + +static const char * const regnames[] = { + "zero", "at", "r2", "r3", + "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", + "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", + "r20", "r21", "r22", "r23", + "et", "bt", "gp", "sp", + "fp", "ea", "ba", "ra", + "status", "estatus", "bstatus", "ienable", + "ipending", "cpuid", "reserved0", "exception", + "pteaddr", "tlbacc", "tlbmisc", "reserved1", + "badaddr", "config", "mpubase", "mpuacc", + "reserved2", "reserved3", "reserved4", "reserved5", + "reserved6", "reserved7", "reserved8", "reserved9", + "reserved10", "reserved11", "reserved12", "reserved13", + "reserved14", "reserved15", "reserved16", "reserved17", + "rpc" +}; + +static TCGv_ptr cpu_env; +static TCGv cpu_R[NUM_CORE_REGS]; + +#include "exec/gen-icount.h" + +static void gen_exception(DisasContext *dc, uint32_t excp) +{ + TCGv_i32 tmp = tcg_const_i32(excp); + + tcg_gen_movi_tl(cpu_R[R_PC], dc->pc); + gen_helper_raise_exception(cpu_env, tmp); + tcg_temp_free_i32(tmp); + dc->is_jmp = DISAS_UPDATE; +} + +/* generate intermediate code for basic block 'tb'. */ +void gen_intermediate_code(CPUNios2State *env, TranslationBlock *tb) +{ + Nios2CPU *cpu = nios2_env_get_cpu(env); + CPUState *cs = CPU(cpu); + DisasContext dc1, *dc = &dc1; + int num_insns; + int max_insns; + + /* Initialize DC */ + dc->cpu_env = cpu_env; + dc->cpu_R = cpu_R; + dc->is_jmp = DISAS_NEXT; + dc->pc = tb->pc; + dc->tb = tb; + dc->mem_idx = cpu_mmu_index(env, false); + dc->singlestep_enabled = cs->singlestep_enabled; + + /* Set up instruction counts */ + num_insns = 0; + if (cs->singlestep_enabled || singlestep) { + max_insns = 1; + } else { + int page_insns = (TARGET_PAGE_SIZE - (tb->pc & TARGET_PAGE_MASK)) / 4; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) { + max_insns = CF_COUNT_MASK; + } + if (max_insns > page_insns) { + max_insns = page_insns; + } + if (max_insns > TCG_MAX_INSNS) { + max_insns = TCG_MAX_INSNS; + } + } + + gen_tb_start(tb); + do { + tcg_gen_insn_start(dc->pc); + num_insns++; + + if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { + gen_exception(dc, EXCP_DEBUG); + /* The address covered by the breakpoint must be included in + [tb->pc, tb->pc + tb->size) in order to for it to be + properly cleared -- thus we increment the PC here so that + the logic setting tb->size below does the right thing. */ + dc->pc += 4; + break; + } + + if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { + gen_io_start(); + } + + /* Decode an instruction */ + handle_instruction(dc, env); + + dc->pc += 4; + + /* Translation stops when a conditional branch is encountered. + * Otherwise the subsequent code could get translated several times. + * Also stop translation when a page boundary is reached. This + * ensures prefetch aborts occur at the right place. */ + } while (!dc->is_jmp && + !tcg_op_buf_full() && + num_insns < max_insns); + + if (tb->cflags & CF_LAST_IO) { + gen_io_end(); + } + + /* Indicate where the next block should start */ + switch (dc->is_jmp) { + case DISAS_NEXT: + /* Save the current PC back into the CPU register */ + tcg_gen_movi_tl(cpu_R[R_PC], dc->pc); + tcg_gen_exit_tb(0); + break; + + default: + case DISAS_JUMP: + case DISAS_UPDATE: + /* The jump will already have updated the PC register */ + tcg_gen_exit_tb(0); + break; + + case DISAS_TB_JUMP: + /* nothing more to generate */ + break; + } + + /* End off the block */ + gen_tb_end(tb, num_insns); + + /* Mark instruction starts for the final generated instruction */ + tb->size = dc->pc - tb->pc; + tb->icount = num_insns; + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) + && qemu_log_in_addr_range(tb->pc)) { + qemu_log_lock(); + qemu_log("IN: %s\n", lookup_symbol(tb->pc)); + log_target_disas(cs, tb->pc, dc->pc - tb->pc, 0); + qemu_log("\n"); + qemu_log_unlock(); + } +#endif +} + +void nios2_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, + int flags) +{ + Nios2CPU *cpu = NIOS2_CPU(cs); + CPUNios2State *env = &cpu->env; + int i; + + if (!env || !f) { + return; + } + + cpu_fprintf(f, "IN: PC=%x %s\n", + env->regs[R_PC], lookup_symbol(env->regs[R_PC])); + + for (i = 0; i < NUM_CORE_REGS; i++) { + cpu_fprintf(f, "%9s=%8.8x ", regnames[i], env->regs[i]); + if ((i + 1) % 4 == 0) { + cpu_fprintf(f, "\n"); + } + } +#if !defined(CONFIG_USER_ONLY) + cpu_fprintf(f, " mmu write: VPN=%05X PID %02X TLBACC %08X\n", + env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK, + (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4, + env->mmu.tlbacc_wr); +#endif + cpu_fprintf(f, "\n\n"); +} + +void nios2_tcg_init(void) +{ + int i; + + cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); + + for (i = 0; i < NUM_CORE_REGS; i++) { + cpu_R[i] = tcg_global_mem_new(cpu_env, + offsetof(CPUNios2State, regs[i]), + regnames[i]); + } +} + +void restore_state_to_opc(CPUNios2State *env, TranslationBlock *tb, + target_ulong *data) +{ + env->regs[R_PC] = data[0]; +} diff --git a/target/ppc/machine.c b/target/ppc/machine.c index 18c16d2512..df9f7a4e05 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -105,7 +105,7 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) return 0; } -static int get_avr(QEMUFile *f, void *pv, size_t size) +static int get_avr(QEMUFile *f, void *pv, size_t size, VMStateField *field) { ppc_avr_t *v = pv; @@ -115,12 +115,14 @@ static int get_avr(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_avr(QEMUFile *f, void *pv, size_t size) +static int put_avr(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { ppc_avr_t *v = pv; qemu_put_be64(f, v->u64[0]); qemu_put_be64(f, v->u64[1]); + return 0; } static const VMStateInfo vmstate_info_avr = { @@ -353,7 +355,7 @@ static const VMStateDescription vmstate_sr = { }; #ifdef TARGET_PPC64 -static int get_slbe(QEMUFile *f, void *pv, size_t size) +static int get_slbe(QEMUFile *f, void *pv, size_t size, VMStateField *field) { ppc_slb_t *v = pv; @@ -363,12 +365,14 @@ static int get_slbe(QEMUFile *f, void *pv, size_t size) return 0; } -static void put_slbe(QEMUFile *f, void *pv, size_t size) +static int put_slbe(QEMUFile *f, void *pv, size_t size, VMStateField *field, + QJSON *vmdesc) { ppc_slb_t *v = pv; qemu_put_be64(f, v->esid); qemu_put_be64(f, v->vsid); + return 0; } static const VMStateInfo vmstate_info_slbe = { diff --git a/target/sparc/machine.c b/target/sparc/machine.c index 39e262ccd1..6bd6b8ee3e 100644 --- a/target/sparc/machine.c +++ b/target/sparc/machine.c @@ -56,7 +56,7 @@ static const VMStateDescription vmstate_tlb_entry = { }; #endif -static int get_psr(QEMUFile *f, void *opaque, size_t size) +static int get_psr(QEMUFile *f, void *opaque, size_t size, VMStateField *field) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; @@ -69,7 +69,8 @@ static int get_psr(QEMUFile *f, void *opaque, size_t size) return 0; } -static void put_psr(QEMUFile *f, void *opaque, size_t size) +static int put_psr(QEMUFile *f, void *opaque, size_t size, VMStateField *field, + QJSON *vmdesc) { SPARCCPU *cpu = opaque; CPUSPARCState *env = &cpu->env; @@ -78,6 +79,7 @@ static void put_psr(QEMUFile *f, void *opaque, size_t size) val = cpu_get_psr(env); qemu_put_be32(f, val); + return 0; } static const VMStateInfo vmstate_psr = { diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index e8e9f9175b..cd7f95823f 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -47,7 +47,7 @@ static bool xtensa_cpu_has_work(CPUState *cs) { XtensaCPU *cpu = XTENSA_CPU(cs); - return cpu->env.pending_irq_level; + return !cpu->env.runstall && cpu->env.pending_irq_level; } /* CPUClass::reset() */ @@ -60,12 +60,13 @@ static void xtensa_cpu_reset(CPUState *s) xcc->parent_reset(s); env->exception_taken = 0; - env->pc = env->config->exception_vector[EXC_RESET]; + env->pc = env->config->exception_vector[EXC_RESET0 + env->static_vectors]; env->sregs[LITBASE] &= ~1; env->sregs[PS] = xtensa_option_enabled(env->config, XTENSA_OPTION_INTERRUPT) ? 0x1f : 0x10; env->sregs[VECBASE] = env->config->vecbase; env->sregs[IBREAKENABLE] = 0; + env->sregs[MEMCTL] = MEMCTL_IL0EN & env->config->memctl_mask; env->sregs[CACHEATTR] = 0x22222222; env->sregs[ATOMCTL] = xtensa_option_enabled(env->config, XTENSA_OPTION_ATOMCTL) ? 0x28 : 0x15; @@ -74,6 +75,7 @@ static void xtensa_cpu_reset(CPUState *s) env->pending_irq_level = 0; reset_mmu(env); + s->halted = env->runstall; } static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model) @@ -125,6 +127,12 @@ static void xtensa_cpu_initfn(Object *obj) cs->env_ptr = env; env->config = xcc->config; + env->address_space_er = g_malloc(sizeof(*env->address_space_er)); + env->system_er = g_malloc(sizeof(*env->system_er)); + memory_region_init_io(env->system_er, NULL, NULL, env, "er", + UINT64_C(0x100000000)); + address_space_init(env->address_space_er, env->system_er, "ER"); + if (tcg_enabled() && !tcg_inited) { tcg_inited = true; xtensa_translate_init(); diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index 7fe82a37af..7e7131a596 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -103,6 +103,7 @@ enum { XTENSA_OPTION_PROCESSOR_ID, XTENSA_OPTION_DEBUG, XTENSA_OPTION_TRACE_PORT, + XTENSA_OPTION_EXTERN_REGS, }; enum { @@ -129,6 +130,7 @@ enum { ITLBCFG = 91, DTLBCFG = 92, IBREAKENABLE = 96, + MEMCTL = 97, CACHEATTR = 98, ATOMCTL = 99, IBREAKA = 128, @@ -189,6 +191,20 @@ enum { #define DBREAKC_SB_LB (DBREAKC_SB | DBREAKC_LB) #define DBREAKC_MASK 0x3f +#define MEMCTL_INIT 0x00800000 +#define MEMCTL_IUSEWAYS_SHIFT 18 +#define MEMCTL_IUSEWAYS_LEN 5 +#define MEMCTL_IUSEWAYS_MASK 0x007c0000 +#define MEMCTL_DALLOCWAYS_SHIFT 13 +#define MEMCTL_DALLOCWAYS_LEN 5 +#define MEMCTL_DALLOCWAYS_MASK 0x0003e000 +#define MEMCTL_DUSEWAYS_SHIFT 8 +#define MEMCTL_DUSEWAYS_LEN 5 +#define MEMCTL_DUSEWAYS_MASK 0x00001f00 +#define MEMCTL_ISNP 0x4 +#define MEMCTL_DSNP 0x2 +#define MEMCTL_IL0EN 0x1 + #define MAX_NAREG 64 #define MAX_NINTERRUPT 32 #define MAX_NLEVEL 6 @@ -209,7 +225,8 @@ enum { enum { /* Static vectors */ - EXC_RESET, + EXC_RESET0, + EXC_RESET1, EXC_MEMORY_ERROR, /* Dynamic vectors */ @@ -268,6 +285,8 @@ typedef enum { INTTYPE_MAX } interrupt_type; +struct CPUXtensaState; + typedef struct xtensa_tlb_entry { uint32_t vaddr; uint32_t paddr; @@ -297,6 +316,11 @@ typedef struct XtensaGdbRegmap { XtensaGdbReg reg[1 + 16 + 64 + 256 + 256]; } XtensaGdbRegmap; +typedef struct XtensaCcompareTimer { + struct CPUXtensaState *env; + QEMUTimer *timer; +} XtensaCcompareTimer; + struct XtensaConfig { const char *name; uint64_t options; @@ -324,6 +348,10 @@ struct XtensaConfig { unsigned nibreak; unsigned ndbreak; + unsigned icache_ways; + unsigned dcache_ways; + uint32_t memctl_mask; + uint32_t configid[2]; uint32_t clock_freq_khz; @@ -365,14 +393,19 @@ typedef struct CPUXtensaState { xtensa_tlb_entry itlb[7][MAX_TLB_WAY_SIZE]; xtensa_tlb_entry dtlb[10][MAX_TLB_WAY_SIZE]; unsigned autorefill_idx; - + bool runstall; + AddressSpace *address_space_er; + MemoryRegion *system_er; int pending_irq_level; /* level of last raised IRQ */ void **irq_inputs; - QEMUTimer *ccompare_timer; - uint32_t wake_ccount; - int64_t halt_clock; + XtensaCcompareTimer ccompare[MAX_NCCOMPARE]; + uint64_t time_base; + uint64_t ccount_time; + uint32_t ccount_base; int exception_taken; + int yield_needed; + unsigned static_vectors; /* Watchpoints for DBREAK registers */ struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK]; @@ -437,9 +470,7 @@ void xtensa_register_core(XtensaConfigList *node); void check_interrupts(CPUXtensaState *s); void xtensa_irq_init(CPUXtensaState *env); void *xtensa_get_extint(CPUXtensaState *env, unsigned extint); -void xtensa_advance_ccount(CPUXtensaState *env, uint32_t d); void xtensa_timer_irq(CPUXtensaState *env, uint32_t id, uint32_t active); -void xtensa_rearm_ccompare_timer(CPUXtensaState *env); int cpu_xtensa_signal_handler(int host_signum, void *pinfo, void *puc); void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf); void xtensa_sync_window_from_phys(CPUXtensaState *env); @@ -460,7 +491,18 @@ int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, void reset_mmu(CPUXtensaState *env); void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env); void debug_exception_env(CPUXtensaState *new_env, uint32_t cause); +static inline MemoryRegion *xtensa_get_er_region(CPUXtensaState *env) +{ + return env->system_er; +} +static inline void xtensa_select_static_vectors(CPUXtensaState *env, + unsigned n) +{ + assert(n < 2); + env->static_vectors = n; +} +void xtensa_runstall(CPUXtensaState *env, bool runstall); #define XTENSA_OPTION_BIT(opt) (((uint64_t)1) << (opt)) #define XTENSA_OPTION_ALL (~(uint64_t)0) @@ -539,6 +581,7 @@ static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch) #define XTENSA_TBFLAG_EXCEPTION 0x4000 #define XTENSA_TBFLAG_WINDOW_MASK 0x18000 #define XTENSA_TBFLAG_WINDOW_SHIFT 15 +#define XTENSA_TBFLAG_YIELD 0x20000 static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) @@ -580,6 +623,9 @@ static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc, } else { *flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT; } + if (env->yield_needed) { + *flags |= XTENSA_TBFLAG_YIELD; + } } #include "exec/cpu-all.h" diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c index 768b32c417..c67d715c4b 100644 --- a/target/xtensa/helper.c +++ b/target/xtensa/helper.c @@ -728,3 +728,16 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env) cpu_fprintf(f, "No TLB for this CPU core\n"); } } + +void xtensa_runstall(CPUXtensaState *env, bool runstall) +{ + CPUState *cpu = CPU(xtensa_env_get_cpu(env)); + + env->runstall = runstall; + cpu->halted = runstall; + if (runstall) { + cpu_interrupt(cpu, CPU_INTERRUPT_HALT); + } else { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT); + } +} diff --git a/target/xtensa/helper.h b/target/xtensa/helper.h index 0c8adae9d4..cc751c98fb 100644 --- a/target/xtensa/helper.h +++ b/target/xtensa/helper.h @@ -16,10 +16,12 @@ DEF_HELPER_1(simcall, void, env) DEF_HELPER_1(dump_state, void, env) DEF_HELPER_3(waiti, void, env, i32, i32) -DEF_HELPER_3(timer_irq, void, env, i32, i32) -DEF_HELPER_2(advance_ccount, void, env, i32) +DEF_HELPER_1(update_ccount, void, env) +DEF_HELPER_2(wsr_ccount, void, env, i32) +DEF_HELPER_2(update_ccompare, void, env, i32) DEF_HELPER_1(check_interrupts, void, env) DEF_HELPER_3(check_atomctl, void, env, i32, i32) +DEF_HELPER_2(wsr_memctl, void, env, i32) DEF_HELPER_2(itlb_hit_test, void, env, i32) DEF_HELPER_2(wsr_rasid, void, env, i32) @@ -54,3 +56,6 @@ DEF_HELPER_4(olt_s, void, env, i32, f32, f32) DEF_HELPER_4(ult_s, void, env, i32, f32, f32) DEF_HELPER_4(ole_s, void, env, i32, f32, f32) DEF_HELPER_4(ule_s, void, env, i32, f32, f32) + +DEF_HELPER_2(rer, i32, env, i32) +DEF_HELPER_3(wer, void, env, i32, i32) diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c index dc0dd351bb..af2723445d 100644 --- a/target/xtensa/op_helper.c +++ b/target/xtensa/op_helper.c @@ -105,6 +105,9 @@ void HELPER(exception)(CPUXtensaState *env, uint32_t excp) CPUState *cs = CPU(xtensa_env_get_cpu(env)); cs->exception_index = excp; + if (excp == EXCP_YIELD) { + env->yield_needed = 0; + } if (excp == EXCP_DEBUG) { env->exception_taken = 0; } @@ -385,22 +388,40 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) } cpu = CPU(xtensa_env_get_cpu(env)); - env->halt_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); cpu->halted = 1; - if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) { - xtensa_rearm_ccompare_timer(env); - } HELPER(exception)(env, EXCP_HLT); } -void HELPER(timer_irq)(CPUXtensaState *env, uint32_t id, uint32_t active) +void HELPER(update_ccount)(CPUXtensaState *env) +{ + uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + + env->ccount_time = now; + env->sregs[CCOUNT] = env->ccount_base + + (uint32_t)((now - env->time_base) * + env->config->clock_freq_khz / 1000000); +} + +void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v) { - xtensa_timer_irq(env, id, active); + int i; + + HELPER(update_ccount)(env); + env->ccount_base += v - env->sregs[CCOUNT]; + for (i = 0; i < env->config->nccompare; ++i) { + HELPER(update_ccompare)(env, i); + } } -void HELPER(advance_ccount)(CPUXtensaState *env, uint32_t d) +void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i) { - xtensa_advance_ccount(env, d); + uint64_t dcc; + + HELPER(update_ccount)(env); + dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1; + timer_mod(env->ccompare[i].timer, + env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz); + env->yield_needed = 1; } void HELPER(check_interrupts)(CPUXtensaState *env) @@ -472,6 +493,30 @@ void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr) } } +void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v) +{ + if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) { + if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) > + env->config->icache_ways) { + deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN, + env->config->icache_ways); + } + } + if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) { + if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) > + env->config->dcache_ways) { + deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN, + env->config->dcache_ways); + } + if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) > + env->config->dcache_ways) { + deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN, + env->config->dcache_ways); + } + } + env->sregs[MEMCTL] = v & env->config->memctl_mask; +} + void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) { XtensaCPU *cpu = xtensa_env_get_cpu(env); @@ -969,3 +1014,15 @@ void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) int v = float32_compare_quiet(a, b, &env->fp_status); set_br(env, v != float_relation_greater, br); } + +uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr) +{ + return address_space_ldl(env->address_space_er, addr, + (MemTxAttrs){0}, NULL); +} + +void HELPER(wer)(CPUXtensaState *env, uint32_t data, uint32_t addr) +{ + address_space_stl(env->address_space_er, addr, data, + (MemTxAttrs){0}, NULL); +} diff --git a/target/xtensa/overlay_tool.h b/target/xtensa/overlay_tool.h index e8a7fda3d8..38e9be9ff5 100644 --- a/target/xtensa/overlay_tool.h +++ b/target/xtensa/overlay_tool.h @@ -47,10 +47,26 @@ #define XCHAL_VECBASE_RESET_VADDR 0 #endif +#ifndef XCHAL_RESET_VECTOR0_VADDR +#define XCHAL_RESET_VECTOR0_VADDR XCHAL_RESET_VECTOR_VADDR +#endif + +#ifndef XCHAL_RESET_VECTOR1_VADDR +#define XCHAL_RESET_VECTOR1_VADDR XCHAL_RESET_VECTOR_VADDR +#endif + #ifndef XCHAL_HW_MIN_VERSION #define XCHAL_HW_MIN_VERSION 0 #endif +#ifndef XCHAL_LOOP_BUFFER_SIZE +#define XCHAL_LOOP_BUFFER_SIZE 0 +#endif + +#ifndef XCHAL_HAVE_EXTERN_REGS +#define XCHAL_HAVE_EXTERN_REGS 0 +#endif + #define XCHAL_OPTION(xchal, qemu) ((xchal) ? XTENSA_OPTION_BIT(qemu) : 0) #define XTENSA_OPTIONS ( \ @@ -84,10 +100,10 @@ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT) | \ XCHAL_OPTION(XCHAL_HAVE_CCOUNT, XTENSA_OPTION_TIMER_INTERRUPT) | \ /* Local memory, TODO */ \ - XCHAL_OPTION(XCHAL_ICACHE_WAYS, XTENSA_OPTION_ICACHE) | \ + XCHAL_OPTION(XCHAL_ICACHE_SIZE, XTENSA_OPTION_ICACHE) | \ XCHAL_OPTION(XCHAL_ICACHE_LINE_LOCKABLE, \ XTENSA_OPTION_ICACHE_INDEX_LOCK) | \ - XCHAL_OPTION(XCHAL_DCACHE_WAYS, XTENSA_OPTION_DCACHE) | \ + XCHAL_OPTION(XCHAL_DCACHE_SIZE, XTENSA_OPTION_DCACHE) | \ XCHAL_OPTION(XCHAL_DCACHE_LINE_LOCKABLE, \ XTENSA_OPTION_DCACHE_INDEX_LOCK) | \ XCHAL_OPTION(XCHAL_UNALIGNED_LOAD_HW, XTENSA_OPTION_HW_ALIGNMENT) | \ @@ -103,7 +119,8 @@ XCHAL_OPTION(XCHAL_HAVE_DEBUG, XTENSA_OPTION_DEBUG) |\ XCHAL_OPTION(XCHAL_NUM_MISC_REGS > 0, XTENSA_OPTION_MISC_SR) | \ XCHAL_OPTION(XCHAL_HAVE_THREADPTR, XTENSA_OPTION_THREAD_POINTER) | \ - XCHAL_OPTION(XCHAL_HAVE_PRID, XTENSA_OPTION_PROCESSOR_ID)) + XCHAL_OPTION(XCHAL_HAVE_PRID, XTENSA_OPTION_PROCESSOR_ID) | \ + XCHAL_OPTION(XCHAL_HAVE_EXTERN_REGS, XTENSA_OPTION_EXTERN_REGS)) #ifndef XCHAL_WINDOW_OF4_VECOFS #define XCHAL_WINDOW_OF4_VECOFS 0x00000000 @@ -133,7 +150,8 @@ #endif #define EXCEPTION_VECTORS { \ - [EXC_RESET] = XCHAL_RESET_VECTOR_VADDR, \ + [EXC_RESET0] = XCHAL_RESET_VECTOR0_VADDR, \ + [EXC_RESET1] = XCHAL_RESET_VECTOR1_VADDR, \ WINDOW_VECTORS \ [EXC_KERNEL] = XCHAL_KERNEL_VECTOR_VADDR, \ [EXC_USER] = XCHAL_USER_VECTOR_VADDR, \ @@ -334,6 +352,16 @@ .nibreak = XCHAL_NUM_IBREAK, \ .ndbreak = XCHAL_NUM_DBREAK +#define CACHE_SECTION \ + .icache_ways = XCHAL_ICACHE_WAYS, \ + .dcache_ways = XCHAL_DCACHE_WAYS, \ + .memctl_mask = \ + (XCHAL_ICACHE_SIZE ? MEMCTL_IUSEWAYS_MASK : 0) | \ + (XCHAL_DCACHE_SIZE ? \ + MEMCTL_DALLOCWAYS_MASK | MEMCTL_DUSEWAYS_MASK : 0) | \ + MEMCTL_ISNP | MEMCTL_DSNP | \ + (XCHAL_HAVE_LOOPS && XCHAL_LOOP_BUFFER_SIZE ? MEMCTL_IL0EN : 0) + #define CONFIG_SECTION \ .configid = { \ XCHAL_HW_CONFIGID0, \ @@ -348,6 +376,7 @@ INTERRUPTS_SECTION, \ TLB_SECTION, \ DEBUG_SECTION, \ + CACHE_SECTION, \ CONFIG_SECTION diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 5a93705fac..263002486c 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -64,7 +64,6 @@ typedef struct DisasContext { bool sar_m32_allocated; TCGv_i32 sar_m32; - uint32_t ccount_delta; unsigned window; bool debug; @@ -134,6 +133,7 @@ static const XtensaReg sregnames[256] = { [ITLBCFG] = XTENSA_REG("ITLBCFG", XTENSA_OPTION_MMU), [DTLBCFG] = XTENSA_REG("DTLBCFG", XTENSA_OPTION_MMU), [IBREAKENABLE] = XTENSA_REG("IBREAKENABLE", XTENSA_OPTION_DEBUG), + [MEMCTL] = XTENSA_REG_BITS("MEMCTL", XTENSA_OPTION_ALL), [CACHEATTR] = XTENSA_REG("CACHEATTR", XTENSA_OPTION_CACHEATTR), [ATOMCTL] = XTENSA_REG("ATOMCTL", XTENSA_OPTION_ATOMCTL), [IBREAKA] = XTENSA_REG("IBREAKA0", XTENSA_OPTION_DEBUG), @@ -314,20 +314,9 @@ static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) tcg_temp_free(tmp); } -static void gen_advance_ccount(DisasContext *dc) -{ - if (dc->ccount_delta > 0) { - TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta); - gen_helper_advance_ccount(cpu_env, tmp); - tcg_temp_free(tmp); - } - dc->ccount_delta = 0; -} - static void gen_exception(DisasContext *dc, int excp) { TCGv_i32 tmp = tcg_const_i32(excp); - gen_advance_ccount(dc); gen_helper_exception(cpu_env, tmp); tcg_temp_free(tmp); } @@ -336,7 +325,6 @@ static void gen_exception_cause(DisasContext *dc, uint32_t cause) { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); - gen_advance_ccount(dc); gen_helper_exception_cause(cpu_env, tpc, tcause); tcg_temp_free(tpc); tcg_temp_free(tcause); @@ -351,7 +339,6 @@ static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause, { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); - gen_advance_ccount(dc); gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr); tcg_temp_free(tpc); tcg_temp_free(tcause); @@ -361,7 +348,6 @@ static void gen_debug_exception(DisasContext *dc, uint32_t cause) { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); - gen_advance_ccount(dc); gen_helper_debug_exception(cpu_env, tpc, tcause); tcg_temp_free(tpc); tcg_temp_free(tcause); @@ -394,7 +380,6 @@ static bool gen_check_cpenable(DisasContext *dc, unsigned cp) static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot) { tcg_gen_mov_i32(cpu_pc, dest); - gen_advance_ccount(dc); if (dc->icount) { tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); } @@ -465,7 +450,6 @@ static bool gen_check_loop_end(DisasContext *dc, int slot) dc->next_pc == dc->lend) { TCGLabel *label = gen_new_label(); - gen_advance_ccount(dc); tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label); tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1); gen_jumpi(dc, dc->lbeg, slot); @@ -488,7 +472,6 @@ static void gen_brcond(DisasContext *dc, TCGCond cond, { TCGLabel *label = gen_new_label(); - gen_advance_ccount(dc); tcg_gen_brcond_i32(cond, t0, t1, label); gen_jumpi_check_loop_end(dc, 0); gen_set_label(label); @@ -528,47 +511,60 @@ static bool gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access) return true; } -static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr) +static bool gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr) { - gen_advance_ccount(dc); + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_update_ccount(cpu_env); tcg_gen_mov_i32(d, cpu_SR[sr]); + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_end(); + return true; + } + return false; } -static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr) +static bool gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr) { tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10); tcg_gen_or_i32(d, d, cpu_SR[sr]); tcg_gen_andi_i32(d, d, 0xfffffffc); + return false; } -static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr) +static bool gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr) { - static void (* const rsr_handler[256])(DisasContext *dc, + static bool (* const rsr_handler[256])(DisasContext *dc, TCGv_i32 d, uint32_t sr) = { [CCOUNT] = gen_rsr_ccount, + [INTSET] = gen_rsr_ccount, [PTEVADDR] = gen_rsr_ptevaddr, }; if (rsr_handler[sr]) { - rsr_handler[sr](dc, d, sr); + return rsr_handler[sr](dc, d, sr); } else { tcg_gen_mov_i32(d, cpu_SR[sr]); + return false; } } -static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s) +static bool gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s) { gen_helper_wsr_lbeg(cpu_env, s); gen_jumpi_check_loop_end(dc, 0); + return false; } -static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s) +static bool gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s) { gen_helper_wsr_lend(cpu_env, s); gen_jumpi_check_loop_end(dc, 0); + return false; } -static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s) +static bool gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f); if (dc->sar_m32_5bit) { @@ -576,68 +572,85 @@ static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s) } dc->sar_5bit = false; dc->sar_m32_5bit = false; + return false; } -static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s) +static bool gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff); + return false; } -static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s) +static bool gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); + return true; } -static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s) +static bool gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_ext8s_i32(cpu_SR[sr], s); + return false; } -static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_windowbase(cpu_env, v); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); + return true; } -static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); + return true; } -static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000); + return false; } -static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_rasid(cpu_env, v); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); + return true; } -static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000); + return false; } -static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_ibreakenable(cpu_env, v); gen_jumpi_check_loop_end(dc, 0); + return true; +} + +static bool gen_wsr_memctl(DisasContext *dc, uint32_t sr, TCGv_i32 v) +{ + gen_helper_wsr_memctl(cpu_env, v); + return false; } -static void gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0x3f); + return false; } -static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - IBREAKA; @@ -646,10 +659,12 @@ static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) gen_helper_wsr_ibreaka(cpu_env, tmp, v); tcg_temp_free(tmp); gen_jumpi_check_loop_end(dc, 0); + return true; } + return false; } -static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - DBREAKA; @@ -658,9 +673,10 @@ static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) gen_helper_wsr_dbreaka(cpu_env, tmp, v); tcg_temp_free(tmp); } + return false; } -static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - DBREAKC; @@ -669,24 +685,38 @@ static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v) gen_helper_wsr_dbreakc(cpu_env, tmp, v); tcg_temp_free(tmp); } + return false; } -static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xff); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); + return true; } -static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static void gen_check_interrupts(DisasContext *dc) +{ + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_check_interrupts(cpu_env); + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_end(); + } +} + +static bool gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, dc->config->inttype_mask[INTTYPE_SOFTWARE]); - gen_helper_check_interrupts(cpu_env); + gen_check_interrupts(dc); gen_jumpi_check_loop_end(dc, 0); + return true; } -static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -696,17 +726,20 @@ static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v) dc->config->inttype_mask[INTTYPE_SOFTWARE]); tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp); tcg_temp_free(tmp); - gen_helper_check_interrupts(cpu_env); + gen_check_interrupts(dc); + gen_jumpi_check_loop_end(dc, 0); + return true; } -static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_mov_i32(cpu_SR[sr], v); - gen_helper_check_interrupts(cpu_env); + gen_check_interrupts(dc); gen_jumpi_check_loop_end(dc, 0); + return true; } -static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v) { uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB | PS_UM | PS_EXCM | PS_INTLEVEL; @@ -715,42 +748,72 @@ static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v) mask |= PS_RING; } tcg_gen_andi_i32(cpu_SR[sr], v, mask); - gen_helper_check_interrupts(cpu_env); + gen_check_interrupts(dc); /* This can change mmu index and tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); + return true; +} + +static bool gen_wsr_ccount(DisasContext *dc, uint32_t sr, TCGv_i32 v) +{ + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_wsr_ccount(cpu_env, v); + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_end(); + gen_jumpi_check_loop_end(dc, 0); + return true; + } + return false; } -static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v) { if (dc->icount) { tcg_gen_mov_i32(dc->next_icount, v); } else { tcg_gen_mov_i32(cpu_SR[sr], v); } + return false; } -static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xf); /* This can change tb->flags, so exit tb */ gen_jumpi_check_loop_end(dc, -1); + return true; } -static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v) +static bool gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v) { uint32_t id = sr - CCOMPARE; + bool ret = false; + if (id < dc->config->nccompare) { uint32_t int_bit = 1 << dc->config->timerint[id]; - gen_advance_ccount(dc); + TCGv_i32 tmp = tcg_const_i32(id); + tcg_gen_mov_i32(cpu_SR[sr], v); tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit); - gen_helper_check_interrupts(cpu_env); + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_start(); + } + gen_helper_update_ccompare(cpu_env, tmp); + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_end(); + gen_jumpi_check_loop_end(dc, 0); + ret = true; + } + tcg_temp_free(tmp); } + return ret; } -static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) +static bool gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) { - static void (* const wsr_handler[256])(DisasContext *dc, + static bool (* const wsr_handler[256])(DisasContext *dc, uint32_t sr, TCGv_i32 v) = { [LBEG] = gen_wsr_lbeg, [LEND] = gen_wsr_lend, @@ -765,6 +828,7 @@ static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) [ITLBCFG] = gen_wsr_tlbcfg, [DTLBCFG] = gen_wsr_tlbcfg, [IBREAKENABLE] = gen_wsr_ibreakenable, + [MEMCTL] = gen_wsr_memctl, [ATOMCTL] = gen_wsr_atomctl, [IBREAKA] = gen_wsr_ibreaka, [IBREAKA + 1] = gen_wsr_ibreaka, @@ -777,6 +841,7 @@ static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) [INTCLEAR] = gen_wsr_intclear, [INTENABLE] = gen_wsr_intenable, [PS] = gen_wsr_ps, + [CCOUNT] = gen_wsr_ccount, [ICOUNT] = gen_wsr_icount, [ICOUNTLEVEL] = gen_wsr_icountlevel, [CCOMPARE] = gen_wsr_ccompare, @@ -785,9 +850,10 @@ static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) }; if (wsr_handler[sr]) { - wsr_handler[sr](dc, sr, s); + return wsr_handler[sr](dc, sr, s); } else { tcg_gen_mov_i32(cpu_SR[sr], s); + return false; } } @@ -829,10 +895,17 @@ static void gen_waiti(DisasContext *dc, uint32_t imm4) { TCGv_i32 pc = tcg_const_i32(dc->next_pc); TCGv_i32 intlevel = tcg_const_i32(imm4); - gen_advance_ccount(dc); + + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_start(); + } gen_helper_waiti(cpu_env, pc, intlevel); + if (dc->tb->cflags & CF_USE_ICOUNT) { + gen_io_end(); + } tcg_temp_free(pc); tcg_temp_free(intlevel); + gen_jumpi_check_loop_end(dc, 0); } static bool gen_window_check1(DisasContext *dc, unsigned r1) @@ -841,7 +914,6 @@ static bool gen_window_check1(DisasContext *dc, unsigned r1) TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 w = tcg_const_i32(r1 / 4); - gen_advance_ccount(dc); gen_helper_window_check(cpu_env, pc, w); dc->is_jmp = DISAS_UPDATE; return false; @@ -1037,7 +1109,6 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); - gen_advance_ccount(dc); gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); @@ -1086,7 +1157,6 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); if (gen_window_check2(dc, RRR_T, RRR_S)) { TCGv_i32 pc = tcg_const_i32(dc->pc); - gen_advance_ccount(dc); gen_helper_movsp(cpu_env, pc); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]); tcg_temp_free(pc); @@ -1134,7 +1204,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) case 0: /*RFEx*/ if (gen_check_privilege(dc)) { tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); - gen_helper_check_interrupts(cpu_env); + gen_check_interrupts(dc); gen_jump(dc, cpu_SR[EPC1]); } break; @@ -1169,7 +1239,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) } gen_helper_restore_owb(cpu_env); - gen_helper_check_interrupts(cpu_env); + gen_check_interrupts(dc); gen_jump(dc, cpu_SR[EPC1]); tcg_temp_free(tmp); @@ -1188,7 +1258,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) if (gen_check_privilege(dc)) { tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + RRR_S - 2]); - gen_helper_check_interrupts(cpu_env); + gen_check_interrupts(dc); gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]); } } else { @@ -1246,7 +1316,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S); - gen_helper_check_interrupts(cpu_env); + gen_check_interrupts(dc); gen_jumpi_check_loop_end(dc, 0); } break; @@ -1350,11 +1420,19 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) break; case 6: /*RER*/ - TBD(); + HAS_OPTION(XTENSA_OPTION_EXTERN_REGS); + if (gen_check_privilege(dc) && + gen_window_check2(dc, RRR_S, RRR_T)) { + gen_helper_rer(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S]); + } break; case 7: /*WER*/ - TBD(); + HAS_OPTION(XTENSA_OPTION_EXTERN_REGS); + if (gen_check_privilege(dc) && + gen_window_check2(dc, RRR_S, RRR_T)) { + gen_helper_wer(cpu_env, cpu_R[RRR_T], cpu_R[RRR_S]); + } break; case 8: /*ROTWw*/ @@ -1534,11 +1612,15 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) (RSR_SR < 64 || gen_check_privilege(dc)) && gen_window_check1(dc, RRR_T)) { TCGv_i32 tmp = tcg_temp_new_i32(); + bool rsr_end, wsr_end; tcg_gen_mov_i32(tmp, cpu_R[RRR_T]); - gen_rsr(dc, cpu_R[RRR_T], RSR_SR); - gen_wsr(dc, RSR_SR, tmp); + rsr_end = gen_rsr(dc, cpu_R[RRR_T], RSR_SR); + wsr_end = gen_wsr(dc, RSR_SR, tmp); tcg_temp_free(tmp); + if (rsr_end && !wsr_end) { + gen_jumpi_check_loop_end(dc, 0); + } } break; @@ -1759,7 +1841,9 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) if (gen_check_sr(dc, RSR_SR, SR_R) && (RSR_SR < 64 || gen_check_privilege(dc)) && gen_window_check1(dc, RRR_T)) { - gen_rsr(dc, cpu_R[RRR_T], RSR_SR); + if (gen_rsr(dc, cpu_R[RRR_T], RSR_SR)) { + gen_jumpi_check_loop_end(dc, 0); + } } break; @@ -2517,7 +2601,6 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); gen_load_store_alignment(dc, 2, addr, true); - gen_advance_ccount(dc); tpc = tcg_const_i32(dc->pc); gen_helper_check_atomctl(cpu_env, tpc, addr); tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring); @@ -2747,7 +2830,6 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 s = tcg_const_i32(BRI12_S); TCGv_i32 imm = tcg_const_i32(BRI12_IMM12); - gen_advance_ccount(dc); gen_helper_entry(cpu_env, pc, s, imm); tcg_temp_free(imm); tcg_temp_free(s); @@ -2966,7 +3048,6 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); - gen_advance_ccount(dc); gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); @@ -3063,7 +3144,6 @@ void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb) dc.lbeg = env->sregs[LBEG]; dc.lend = env->sregs[LEND]; dc.is_jmp = DISAS_NEXT; - dc.ccount_delta = 0; dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG; dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT; dc.cpenable = (tb->flags & XTENSA_TBFLAG_CPENABLE_MASK) >> @@ -3079,17 +3159,26 @@ void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb) gen_tb_start(tb); + if ((tb->cflags & CF_USE_ICOUNT) && + (tb->flags & XTENSA_TBFLAG_YIELD)) { + tcg_gen_insn_start(dc.pc); + ++insn_count; + gen_exception(&dc, EXCP_YIELD); + dc.is_jmp = DISAS_UPDATE; + goto done; + } if (tb->flags & XTENSA_TBFLAG_EXCEPTION) { - tcg_gen_movi_i32(cpu_pc, dc.pc); + tcg_gen_insn_start(dc.pc); + ++insn_count; gen_exception(&dc, EXCP_DEBUG); + dc.is_jmp = DISAS_UPDATE; + goto done; } do { tcg_gen_insn_start(dc.pc); ++insn_count; - ++dc.ccount_delta; - if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) { tcg_gen_movi_i32(cpu_pc, dc.pc); gen_exception(&dc, EXCP_DEBUG); @@ -3136,7 +3225,7 @@ void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb) dc.pc < next_page_start && dc.pc + xtensa_insn_len(env, &dc) <= next_page_start && !tcg_op_buf_full()); - +done: reset_litbase(&dc); reset_sar_tracker(&dc); if (dc.icount) { diff --git a/tests/tcg/xtensa/Makefile b/tests/tcg/xtensa/Makefile index 7f9f2d96c3..2882c431e4 100644 --- a/tests/tcg/xtensa/Makefile +++ b/tests/tcg/xtensa/Makefile @@ -5,7 +5,7 @@ CROSS=xtensa-$(CORE)-elf- ifndef XT SIM = ../../../xtensa-softmmu/qemu-system-xtensa -SIMFLAGS = -M sim -cpu $(CORE) -nographic -semihosting $(EXTFLAGS) -kernel +SIMFLAGS = -M sim -cpu $(CORE) -nographic -semihosting -icount 7 $(EXTFLAGS) -kernel SIMDEBUG = -s -S else SIM = xt-run diff --git a/tests/tcg/xtensa/test_interrupt.S b/tests/tcg/xtensa/test_interrupt.S index 334ddab287..876683518e 100644 --- a/tests/tcg/xtensa/test_interrupt.S +++ b/tests/tcg/xtensa/test_interrupt.S @@ -1,5 +1,7 @@ #include "macros.inc" +#define LSBIT(v) ((v) ^ ((v) & ((v) - 1))) + test_suite interrupt .macro clear_interrupts @@ -46,14 +48,17 @@ test soft_disabled set_vector kernel, 1f clear_interrupts - movi a2, 0x80 + movi a2, LSBIT(XCHAL_INTTYPE_MASK_SOFTWARE) wsr a2, intset esync rsr a3, interrupt + movi a4, ~XCHAL_INTTYPE_MASK_TIMER + and a3, a3, a4 assert eq, a2, a3 wsr a2, intclear esync rsr a3, interrupt + and a3, a3, a4 assert eqi, a3, 0 j 2f 1: @@ -65,10 +70,12 @@ test soft_intenable set_vector kernel, 1f clear_interrupts - movi a2, 0x80 + movi a2, LSBIT(XCHAL_INTTYPE_MASK_SOFTWARE) wsr a2, intset esync rsr a3, interrupt + movi a4, ~XCHAL_INTTYPE_MASK_TIMER + and a3, a3, a4 assert eq, a2, a3 rsil a3, 0 wsr a2, intenable @@ -82,10 +89,12 @@ test soft_rsil set_vector kernel, 1f clear_interrupts - movi a2, 0x80 + movi a2, LSBIT(XCHAL_INTTYPE_MASK_SOFTWARE) wsr a2, intset esync rsr a3, interrupt + movi a4, ~XCHAL_INTTYPE_MASK_TIMER + and a3, a3, a4 assert eq, a2, a3 wsr a2, intenable rsil a3, 0 @@ -99,10 +108,12 @@ test soft_waiti set_vector kernel, 1f clear_interrupts - movi a2, 0x80 + movi a2, LSBIT(XCHAL_INTTYPE_MASK_SOFTWARE) wsr a2, intset esync rsr a3, interrupt + movi a4, ~XCHAL_INTTYPE_MASK_TIMER + and a3, a3, a4 assert eq, a2, a3 wsr a2, intenable waiti 0 @@ -116,10 +127,12 @@ test soft_user set_vector user, 2f clear_interrupts - movi a2, 0x80 + movi a2, LSBIT(XCHAL_INTTYPE_MASK_SOFTWARE) wsr a2, intset esync rsr a3, interrupt + movi a4, ~XCHAL_INTTYPE_MASK_TIMER + and a3, a3, a4 assert eq, a2, a3 wsr a2, intenable @@ -139,7 +152,7 @@ test soft_priority set_vector level3, 2f clear_interrupts - movi a2, 0x880 + movi a2, XCHAL_INTTYPE_MASK_SOFTWARE wsr a2, intenable rsil a3, 0 esync @@ -161,7 +174,7 @@ test eps_epc_rfi clear_interrupts reset_ps - movi a2, 0x880 + movi a2, XCHAL_INTTYPE_MASK_SOFTWARE wsr a2, intenable rsil a3, 0 rsr a3, ps diff --git a/tests/tcg/xtensa/test_sr.S b/tests/tcg/xtensa/test_sr.S index 4fac46e80f..42e3e5e386 100644 --- a/tests/tcg/xtensa/test_sr.S +++ b/tests/tcg/xtensa/test_sr.S @@ -44,6 +44,7 @@ test_end test_sr acchi, 1 test_sr acclo, 1 +test_sr /*memctl*/97, 0 test_sr_mask /*atomctl*/99, 0, 0 test_sr_mask /*br*/4, 0, 0 test_sr_mask /*cacheattr*/98, 0, 0 diff --git a/tests/tcg/xtensa/test_timer.S b/tests/tcg/xtensa/test_timer.S index f8c6f7423a..6cda71adbb 100644 --- a/tests/tcg/xtensa/test_timer.S +++ b/tests/tcg/xtensa/test_timer.S @@ -1,12 +1,56 @@ #include "macros.inc" +#define CCOUNT_SHIFT 4 +#define WAIT_LOOPS 20 + +.macro make_ccount_delta target, delta + rsr \delta, ccount + rsr \target, ccount + sub \delta, \target, \delta + slli \delta, \delta, CCOUNT_SHIFT + add \target, \target, \delta +.endm + test_suite timer test ccount rsr a3, ccount rsr a4, ccount - sub a3, a4, a3 - assert eqi, a3, 1 + assert ne, a3, a4 +test_end + +test ccount_write + rsr a3, ccount + rsr a4, ccount + sub a4, a4, a3 + movi a2, 0x12345678 + wsr a2, ccount + esync + rsr a3, ccount + sub a3, a3, a2 + slli a4, a4, 2 + assert ltu, a3, a4 +test_end + +test ccount_update_deadline + movi a2, 0 + wsr a2, intenable + rsr a2, interrupt + wsr a2, intclear + movi a2, 0 + wsr a2, ccompare1 + wsr a2, ccompare2 + movi a2, 0x12345678 + wsr a2, ccompare0 + rsr a3, interrupt + assert eqi, a3, 0 + movi a2, 0x12345677 + wsr a2, ccount + esync + nop + rsr a2, interrupt + movi a3, 1 << XCHAL_TIMER0_INTERRUPT + assert eq, a2, a3 test_end test ccompare @@ -18,18 +62,18 @@ test ccompare wsr a2, ccompare1 wsr a2, ccompare2 - movi a3, 20 - rsr a2, ccount - addi a2, a2, 20 + make_ccount_delta a2, a15 wsr a2, ccompare0 - rsr a2, interrupt - assert eqi, a2, 0 - loop a3, 1f - rsr a3, interrupt - bnez a3, 2f 1: - test_fail + rsr a3, interrupt + rsr a4, ccount + rsr a5, interrupt + sub a4, a4, a2 + bgez a4, 2f + assert eqi, a3, 0 + j 1b 2: + assert nei, a5, 0 test_end test ccompare0_interrupt @@ -42,15 +86,14 @@ test ccompare0_interrupt wsr a2, ccompare1 wsr a2, ccompare2 - movi a3, 20 - rsr a2, ccount - addi a2, a2, 20 + movi a3, WAIT_LOOPS + make_ccount_delta a2, a15 wsr a2, ccompare0 rsync rsr a2, interrupt assert eqi, a2, 0 - movi a2, 0x40 + movi a2, 1 << XCHAL_TIMER0_INTERRUPT wsr a2, intenable rsil a2, 0 loop a3, 1f @@ -72,14 +115,13 @@ test ccompare1_interrupt wsr a2, ccompare0 wsr a2, ccompare2 - movi a3, 20 - rsr a2, ccount - addi a2, a2, 20 + movi a3, WAIT_LOOPS + make_ccount_delta a2, a15 wsr a2, ccompare1 rsync rsr a2, interrupt assert eqi, a2, 0 - movi a2, 0x400 + movi a2, 1 << XCHAL_TIMER1_INTERRUPT wsr a2, intenable rsil a2, 2 loop a3, 1f @@ -99,14 +141,13 @@ test ccompare2_interrupt wsr a2, ccompare0 wsr a2, ccompare1 - movi a3, 20 - rsr a2, ccount - addi a2, a2, 20 + movi a3, WAIT_LOOPS + make_ccount_delta a2, a15 wsr a2, ccompare2 rsync rsr a2, interrupt assert eqi, a2, 0 - movi a2, 0x2000 + movi a2, 1 << XCHAL_TIMER2_INTERRUPT wsr a2, intenable rsil a2, 4 loop a3, 1f @@ -125,17 +166,16 @@ test ccompare_interrupt_masked movi a2, 0 wsr a2, ccompare2 - movi a3, 40 - rsr a2, ccount - addi a2, a2, 20 + movi a3, 2 * WAIT_LOOPS + make_ccount_delta a2, a15 wsr a2, ccompare1 - addi a2, a2, 20 + add a2, a2, a15 wsr a2, ccompare0 rsync rsr a2, interrupt assert eqi, a2, 0 - movi a2, 0x40 + movi a2, 1 << XCHAL_TIMER0_INTERRUPT wsr a2, intenable rsil a2, 0 loop a3, 1f @@ -156,17 +196,16 @@ test ccompare_interrupt_masked_waiti movi a2, 0 wsr a2, ccompare2 - movi a3, 40 - rsr a2, ccount - addi a2, a2, 20 + movi a3, 2 * WAIT_LOOPS + make_ccount_delta a2, a15 wsr a2, ccompare1 - addi a2, a2, 20 + add a2, a2, a15 wsr a2, ccompare0 rsync rsr a2, interrupt assert eqi, a2, 0 - movi a2, 0x40 + movi a2, 1 << XCHAL_TIMER0_INTERRUPT wsr a2, intenable waiti 0 test_fail diff --git a/tests/test-vmstate.c b/tests/test-vmstate.c index d2f529b831..9d87faf12b 100644 --- a/tests/test-vmstate.c +++ b/tests/test-vmstate.c @@ -544,6 +544,150 @@ static void test_arr_ptr_str_no0_load(void) } } +/* test QTAILQ migration */ +typedef struct TestQtailqElement TestQtailqElement; + +struct TestQtailqElement { + bool b; + uint8_t u8; + QTAILQ_ENTRY(TestQtailqElement) next; +}; + +typedef struct TestQtailq { + int16_t i16; + QTAILQ_HEAD(TestQtailqHead, TestQtailqElement) q; + int32_t i32; +} TestQtailq; + +static const VMStateDescription vmstate_q_element = { + .name = "test/queue-element", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BOOL(b, TestQtailqElement), + VMSTATE_UINT8(u8, TestQtailqElement), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_q = { + .name = "test/queue", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_INT16(i16, TestQtailq), + VMSTATE_QTAILQ_V(q, TestQtailq, 1, vmstate_q_element, TestQtailqElement, + next), + VMSTATE_INT32(i32, TestQtailq), + VMSTATE_END_OF_LIST() + } +}; + +uint8_t wire_q[] = { + /* i16 */ 0xfe, 0x0, + /* start of element 0 of q */ 0x01, + /* .b */ 0x01, + /* .u8 */ 0x82, + /* start of element 1 of q */ 0x01, + /* b */ 0x00, + /* u8 */ 0x41, + /* end of q */ 0x00, + /* i32 */ 0x00, 0x01, 0x11, 0x70, + QEMU_VM_EOF, /* just to ensure we won't get EOF reported prematurely */ +}; + +static void test_save_q(void) +{ + TestQtailq obj_q = { + .i16 = -512, + .i32 = 70000, + }; + + TestQtailqElement obj_qe1 = { + .b = true, + .u8 = 130, + }; + + TestQtailqElement obj_qe2 = { + .b = false, + .u8 = 65, + }; + + QTAILQ_INIT(&obj_q.q); + QTAILQ_INSERT_TAIL(&obj_q.q, &obj_qe1, next); + QTAILQ_INSERT_TAIL(&obj_q.q, &obj_qe2, next); + + save_vmstate(&vmstate_q, &obj_q); + compare_vmstate(wire_q, sizeof(wire_q)); +} + +static void test_load_q(void) +{ + TestQtailq obj_q = { + .i16 = -512, + .i32 = 70000, + }; + + TestQtailqElement obj_qe1 = { + .b = true, + .u8 = 130, + }; + + TestQtailqElement obj_qe2 = { + .b = false, + .u8 = 65, + }; + + QTAILQ_INIT(&obj_q.q); + QTAILQ_INSERT_TAIL(&obj_q.q, &obj_qe1, next); + QTAILQ_INSERT_TAIL(&obj_q.q, &obj_qe2, next); + + QEMUFile *fsave = open_test_file(true); + + qemu_put_buffer(fsave, wire_q, sizeof(wire_q)); + g_assert(!qemu_file_get_error(fsave)); + qemu_fclose(fsave); + + QEMUFile *fload = open_test_file(false); + TestQtailq tgt; + + QTAILQ_INIT(&tgt.q); + vmstate_load_state(fload, &vmstate_q, &tgt, 1); + char eof = qemu_get_byte(fload); + g_assert(!qemu_file_get_error(fload)); + g_assert_cmpint(tgt.i16, ==, obj_q.i16); + g_assert_cmpint(tgt.i32, ==, obj_q.i32); + g_assert_cmpint(eof, ==, QEMU_VM_EOF); + + TestQtailqElement *qele_from = QTAILQ_FIRST(&obj_q.q); + TestQtailqElement *qlast_from = QTAILQ_LAST(&obj_q.q, TestQtailqHead); + TestQtailqElement *qele_to = QTAILQ_FIRST(&tgt.q); + TestQtailqElement *qlast_to = QTAILQ_LAST(&tgt.q, TestQtailqHead); + + while (1) { + g_assert_cmpint(qele_to->b, ==, qele_from->b); + g_assert_cmpint(qele_to->u8, ==, qele_from->u8); + if ((qele_from == qlast_from) || (qele_to == qlast_to)) { + break; + } + qele_from = QTAILQ_NEXT(qele_from, next); + qele_to = QTAILQ_NEXT(qele_to, next); + } + + g_assert_cmpint((uintptr_t) qele_from, ==, (uintptr_t) qlast_from); + g_assert_cmpint((uintptr_t) qele_to, ==, (uintptr_t) qlast_to); + + /* clean up */ + TestQtailqElement *qele; + while (!QTAILQ_EMPTY(&tgt.q)) { + qele = QTAILQ_LAST(&tgt.q, TestQtailqHead); + QTAILQ_REMOVE(&tgt.q, qele, next); + free(qele); + qele = NULL; + } + qemu_fclose(fload); +} + int main(int argc, char **argv) { temp_fd = mkstemp(temp_file); @@ -562,6 +706,9 @@ int main(int argc, char **argv) test_arr_ptr_str_no0_save); g_test_add_func("/vmstate/array/ptr/str/no0/load", test_arr_ptr_str_no0_load); + g_test_add_func("/vmstate/qtailq/save/saveq", test_save_q); + g_test_add_func("/vmstate/qtailq/load/loadq", test_load_q); + g_test_run(); close(temp_fd); diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index 5a85aa3c89..2f55f5e94f 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -12,6 +12,7 @@ #include "qemu/osdep.h" #include "qemu/mmap-alloc.h" +#include "qemu/host-utils.h" #define HUGETLBFS_MAGIC 0x958458f6 @@ -61,18 +62,18 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) #else void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); #endif - size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; + size_t offset; void *ptr1; if (ptr == MAP_FAILED) { return MAP_FAILED; } - /* Make sure align is a power of 2 */ - assert(!(align & (align - 1))); + assert(is_power_of_2(align)); /* Always align to host page size */ assert(align >= getpagesize()); + offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE, MAP_FIXED | (fd == -1 ? MAP_ANONYMOUS : 0) | @@ -83,22 +84,20 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) return MAP_FAILED; } - ptr += offset; - total -= offset; - if (offset > 0) { - munmap(ptr - offset, offset); + munmap(ptr, offset); } /* * Leave a single PROT_NONE page allocated after the RAM block, to serve as * a guard page guarding against potential buffer overflows. */ + total -= offset; if (total > size + getpagesize()) { - munmap(ptr + size + getpagesize(), total - size - getpagesize()); + munmap(ptr1 + size + getpagesize(), total - size - getpagesize()); } - return ptr; + return ptr1; } void qemu_ram_munmap(void *ptr, size_t size) diff --git a/util/oslib-win32.c b/util/oslib-win32.c index d09863cc9d..0b1890fd33 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -327,6 +327,7 @@ char *qemu_get_exec_dir(void) return g_strdup(exec_dir); } +#if !GLIB_CHECK_VERSION(2, 50, 0) /* * The original implementation of g_poll from glib has a problem on Windows * when using timeouts < 10 ms. @@ -530,6 +531,7 @@ gint g_poll(GPollFD *fds, guint nfds, gint timeout) return retval; } +#endif int getpagesize(void) { diff --git a/util/uri.c b/util/uri.c index 70a9cbcbd2..21b1828170 100644 --- a/util/uri.c +++ b/util/uri.c @@ -342,7 +342,7 @@ rfc3986_parse_port(URI *uri, const char **str) * @uri: pointer to an URI structure * @str: the string to analyze * - * Parse an user informations part and fills in the appropriate fields + * Parse a user information part and fill in the appropriate fields * of the @uri structure * * userinfo = *( unreserved / pct-encoded / sub-delims / ":" ) @@ -508,7 +508,7 @@ rfc3986_parse_authority(URI *uri, const char **str) cur = *str; /* - * try to parse an userinfo and check for the trailing @ + * try to parse a userinfo and check for the trailing @ */ ret = rfc3986_parse_user_info(uri, &cur); if ((ret != 0) || (*cur != '@')) diff --git a/vl.c b/vl.c index abb0900fe4..68e8c003d1 100644 --- a/vl.c +++ b/vl.c @@ -182,6 +182,7 @@ bool boot_strict; uint8_t *boot_splash_filedata; size_t boot_splash_filedata_size; uint8_t qemu_extra_params_fw[2]; +int only_migratable; /* turn it off unless user states otherwise */ int icount_align_option; @@ -3884,6 +3885,9 @@ int main(int argc, char **argv, char **envp) } incoming = optarg; break; + case QEMU_OPTION_only_migratable: + only_migratable = 1; + break; case QEMU_OPTION_nodefaults: has_defaults = 0; break; |