diff options
| author | Stefan Hajnoczi <stefanha@redhat.com> | 2022-09-20 16:22:26 -0400 |
|---|---|---|
| committer | Stefan Hajnoczi <stefanha@redhat.com> | 2022-09-20 16:22:26 -0400 |
| commit | 1b3a3383df62e32485161712405c2e3dd8b478b0 (patch) | |
| tree | 43165a4e34586751ca3375e584740824915843a3 /tests | |
| parent | 832e9e33bc51f52fc3ea667d48912e95af3e28f3 (diff) | |
| parent | df22fbb751dc72f321218c3fb192730a47ad59a9 (diff) | |
| download | focaccia-qemu-1b3a3383df62e32485161712405c2e3dd8b478b0.tar.gz focaccia-qemu-1b3a3383df62e32485161712405c2e3dd8b478b0.zip | |
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
* add help option for -audio and -audiodev * another missing memory barrier for dirty pages * target/i386: Raise #GP on unaligned m128 accesses * coverity fixes + improvements to components * add MMX and 3DNow! tests * SSE4a fixes * target/i386: TCG translation cleanups * update qboot submodule # -----BEGIN PGP SIGNATURE----- # # iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmMocZcUHHBib256aW5p # QHJlZGhhdC5jb20ACgkQv/vSX3jHroNV7Qf+NEoB8R0ug+ClMRe1Qqt8FXEd0eXE # nT19q4rOWfmW4/L+wI6gpxhbxrxOuLwoZ8YvD8c6rQAdexMoHoeTvA1PAca4zZTo # ISmW3bXsoHN2uGLPz4CKhjKBLCANtDkh3EWCwRFkLSRCLSRDhKPrG1Ue3fOgQ6GO # riROcxbyYzvU/4uefSW+xG/Im9gftNF6occZZ59LrK7Xd8kwlb+E+EdsmzFw5f8O # Q9irVQ8pX9ZM4BK2KiT16nZ0uSRwJqSJKbLI670nUEsj1jQCIgU3srgZHjAIvoir # yivDs6oktgS/HkPD5CQoTX+fVDgEDM1TTF6P8r7uJopPXpzz+AHswfSJmg== # =RVCS # -----END PGP SIGNATURE----- # gpg: Signature made Mon 19 Sep 2022 09:41:43 EDT # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (21 commits) qboot: update to latest submodule build: remove extra parentheses causing missing rebuilds target/i386: introduce insn_get_addr target/i386: REPZ and REPNZ are mutually exclusive target/i386: fix INSERTQ implementation target/i386: correctly mask SSE4a bit indices in register operands audio: add help option for -audio and -audiodev tests/tcg: remove old SSE tests tests/tcg: refine MMX support in SSE tests tests/tcg: i386: add MMX and 3DNow! tests tests/tcg: i386: fix typos in 3DNow! instructions tests: unit: add NULL-pointer check tests: test-qga: close socket on failure to connect tests: unit: simplify test-visitor-serialization list tests smbios: sanitize type from external type before checking have_fields_bitmap coverity: put NUBus under m68k component coverity: add new RISC-V component spapr_pci: fix leak in spapr_phb_vfio_get_loc_code kvm: fix memory leak on failure to read stats descriptors target/i386: Raise #GP on unaligned m128 accesses when required. ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'tests')
| -rw-r--r-- | tests/tcg/i386/Makefile.target | 24 | ||||
| -rw-r--r-- | tests/tcg/i386/test-3dnow.c | 3 | ||||
| -rwxr-xr-x | tests/tcg/i386/test-avx.py | 33 | ||||
| -rw-r--r-- | tests/tcg/i386/test-i386.c | 573 | ||||
| -rw-r--r-- | tests/tcg/i386/test-mmx.c | 315 | ||||
| -rwxr-xr-x | tests/tcg/i386/test-mmx.py | 244 | ||||
| -rw-r--r-- | tests/tcg/i386/x86.csv | 4 | ||||
| -rw-r--r-- | tests/tcg/x86_64/Makefile.target | 1 | ||||
| -rw-r--r-- | tests/unit/check-block-qdict.c | 2 | ||||
| -rw-r--r-- | tests/unit/test-qga.c | 1 | ||||
| -rw-r--r-- | tests/unit/test-visitor-serialization.c | 157 |
11 files changed, 682 insertions, 675 deletions
diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target index be21b81b96..599f192529 100644 --- a/tests/tcg/i386/Makefile.target +++ b/tests/tcg/i386/Makefile.target @@ -7,8 +7,8 @@ VPATH += $(I386_SRC) I386_SRCS=$(notdir $(wildcard $(I386_SRC)/*.c)) ALL_X86_TESTS=$(I386_SRCS:.c=) -SKIP_I386_TESTS=test-i386-ssse3 test-avx -X86_64_TESTS:=$(filter test-i386-bmi2 test-i386-ssse3 test-avx, $(ALL_X86_TESTS)) +SKIP_I386_TESTS=test-i386-ssse3 test-avx test-3dnow test-mmx +X86_64_TESTS:=$(filter test-i386-bmi2 $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) test-i386-sse-exceptions: CFLAGS += -msse4.1 -mfpmath=sse run-test-i386-sse-exceptions: QEMU_OPTS += -cpu max @@ -82,9 +82,27 @@ run-plugin-sha512-sse-with-%: QEMU_OPTS+=-cpu max TESTS+=sha512-sse -CLEANFILES += test-avx.h +CLEANFILES += test-avx.h test-mmx.h test-3dnow.h +test-3dnow.h: test-mmx.py x86.csv + $(PYTHON) $(I386_SRC)/test-mmx.py $(I386_SRC)/x86.csv $@ 3DNOW + +test-mmx.h: test-mmx.py x86.csv + $(PYTHON) $(I386_SRC)/test-mmx.py $(I386_SRC)/x86.csv $@ MMX SSE SSE2 SSE3 SSSE3 + test-avx.h: test-avx.py x86.csv $(PYTHON) $(I386_SRC)/test-avx.py $(I386_SRC)/x86.csv $@ +test-3dnow: CFLAGS += -masm=intel -O -I. +run-test-3dnow: QEMU_OPTS += -cpu max +run-plugin-test-3dnow: QEMU_OPTS += -cpu max +test-3dnow: test-3dnow.h + +test-mmx: CFLAGS += -masm=intel -O -I. +run-test-mmx: QEMU_OPTS += -cpu max +run-plugin-test-mmx: QEMU_OPTS += -cpu max +test-mmx: test-mmx.h + test-avx: CFLAGS += -masm=intel -O -I. +run-test-avx: QEMU_OPTS += -cpu max +run-plugin-test-avx: QEMU_OPTS += -cpu max test-avx: test-avx.h diff --git a/tests/tcg/i386/test-3dnow.c b/tests/tcg/i386/test-3dnow.c new file mode 100644 index 0000000000..67abc68677 --- /dev/null +++ b/tests/tcg/i386/test-3dnow.c @@ -0,0 +1,3 @@ +#define EMMS "femms" +#define TEST_FILE "test-3dnow.h" +#include "test-mmx.c" diff --git a/tests/tcg/i386/test-avx.py b/tests/tcg/i386/test-avx.py index 6eb455a8b4..e16a3d8bee 100755 --- a/tests/tcg/i386/test-avx.py +++ b/tests/tcg/i386/test-avx.py @@ -7,7 +7,6 @@ import sys from fnmatch import fnmatch archs = [ - # TODO: MMX? "SSE", "SSE2", "SSE3", "SSSE3", "SSE4_1", "SSE4_2", ] @@ -104,7 +103,11 @@ class XMMArg(): class MMArg(): isxmm = True - ismem = False # TODO + def __init__(self, mw): + if mw not in [0, 32, 64]: + raise Exception("Bad mem width: %s" % mw) + self.mw = mw + self.ismem = mw != 0 def regstr(self, n): return "mm%d" % (n & 7) @@ -170,6 +173,9 @@ class ArgMem(): def regstr(self, n): return mem_w(self.w) +class SkipInstruction(Exception): + pass + def ArgGenerator(arg, op): if arg[:3] == 'xmm' or arg[:3] == "ymm": if "/" in arg: @@ -180,7 +186,13 @@ def ArgGenerator(arg, op): else: return XMMArg(arg[0], 0); elif arg[:2] == 'mm': - return MMArg(); + if "/" in arg: + r, m = arg.split('/') + if (m[0] != 'm'): + raise Exception("Expected /m: %s", arg) + return MMArg(int(m[1:])); + else: + return MMArg(0); elif arg[:4] == 'imm8': return ArgImm8u(op); elif arg == '<XMM0>': @@ -218,8 +230,12 @@ class InsnGenerator: try: self.args = list(ArgGenerator(a, op) for a in args) + if not any((x.isxmm for x in self.args)): + raise SkipInstruction if len(self.args) > 0 and self.args[-1] is None: self.args = self.args[:-1] + except SkipInstruction: + raise except Exception as e: raise Exception("Bad arg %s: %s" % (op, e)) @@ -340,10 +356,13 @@ def main(): continue cpuid = row[6] if cpuid in archs: - g = InsnGenerator(insn[0], insn[1:]) - for insn in g.gen(): - outf.write('TEST(%d, "%s", %s)\n' % (n, insn, g.optype)) - n += 1 + try: + g = InsnGenerator(insn[0], insn[1:]) + for insn in g.gen(): + outf.write('TEST(%d, "%s", %s)\n' % (n, insn, g.optype)) + n += 1 + except SkipInstruction: + pass outf.write("#undef TEST\n") csvfile.close() diff --git a/tests/tcg/i386/test-i386.c b/tests/tcg/i386/test-i386.c index e6b308a2c0..864c4e620d 100644 --- a/tests/tcg/i386/test-i386.c +++ b/tests/tcg/i386/test-i386.c @@ -34,15 +34,8 @@ #endif //#define LINUX_VM86_IOPL_FIX //#define TEST_P4_FLAGS -#ifdef __SSE__ -#define TEST_SSE #define TEST_CMOV 1 #define TEST_FCOMI 1 -#else -#undef TEST_SSE -#define TEST_CMOV 1 -#define TEST_FCOMI 1 -#endif #if defined(__x86_64__) #define FMT64X "%016lx" @@ -2104,568 +2097,6 @@ static void test_enter(void) TEST_ENTER("w", uint16_t, 31); } -#ifdef TEST_SSE - -typedef int __m64 __attribute__ ((vector_size(8))); -typedef float __m128 __attribute__ ((vector_size(16))); - -typedef union { - double d[2]; - float s[4]; - uint32_t l[4]; - uint64_t q[2]; - __m128 dq; -} XMMReg; - -static uint64_t __attribute__((aligned(16))) test_values[4][2] = { - { 0x456723c698694873, 0xdc515cff944a58ec }, - { 0x1f297ccd58bad7ab, 0x41f21efba9e3e146 }, - { 0x007c62c2085427f8, 0x231be9e8cde7438d }, - { 0x0f76255a085427f8, 0xc233e9e8c4c9439a }, -}; - -#define SSE_OP(op)\ -{\ - asm volatile (#op " %2, %0" : "=x" (r.dq) : "0" (a.dq), "x" (b.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X " r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - b.q[1], b.q[0],\ - r.q[1], r.q[0]);\ -} - -#define SSE_OP2(op)\ -{\ - int i;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - b.q[0] = test_values[2*i+1][0];\ - b.q[1] = test_values[2*i+1][1];\ - SSE_OP(op);\ - }\ -} - -#define MMX_OP2(op)\ -{\ - int i;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - b.q[0] = test_values[2*i+1][0];\ - asm volatile (#op " %2, %0" : "=y" (r.q[0]) : "0" (a.q[0]), "y" (b.q[0]));\ - printf("%-9s: a=" FMT64X " b=" FMT64X " r=" FMT64X "\n",\ - #op,\ - a.q[0],\ - b.q[0],\ - r.q[0]);\ - }\ - SSE_OP2(op);\ -} - -#define SHUF_OP(op, ib)\ -{\ - a.q[0] = test_values[0][0];\ - a.q[1] = test_values[0][1];\ - b.q[0] = test_values[1][0];\ - b.q[1] = test_values[1][1];\ - asm volatile (#op " $" #ib ", %2, %0" : "=x" (r.dq) : "0" (a.dq), "x" (b.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X " ib=%02x r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - b.q[1], b.q[0],\ - ib,\ - r.q[1], r.q[0]);\ -} - -#define PSHUF_OP(op, ib)\ -{\ - int i;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - asm volatile (#op " $" #ib ", %1, %0" : "=x" (r.dq) : "x" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " ib=%02x r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - ib,\ - r.q[1], r.q[0]);\ - }\ -} - -#define SHIFT_IM(op, ib)\ -{\ - int i;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - asm volatile (#op " $" #ib ", %0" : "=x" (r.dq) : "0" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " ib=%02x r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - ib,\ - r.q[1], r.q[0]);\ - }\ -} - -#define SHIFT_OP(op, ib)\ -{\ - int i;\ - SHIFT_IM(op, ib);\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - b.q[0] = ib;\ - b.q[1] = 0;\ - asm volatile (#op " %2, %0" : "=x" (r.dq) : "0" (a.dq), "x" (b.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X " r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - b.q[1], b.q[0],\ - r.q[1], r.q[0]);\ - }\ -} - -#define MOVMSK(op)\ -{\ - int i, reg;\ - for(i=0;i<2;i++) {\ - a.q[0] = test_values[2*i][0];\ - a.q[1] = test_values[2*i][1];\ - asm volatile (#op " %1, %0" : "=r" (reg) : "x" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " r=%08x\n",\ - #op,\ - a.q[1], a.q[0],\ - reg);\ - }\ -} - -#define SSE_OPS(a) \ -SSE_OP(a ## ps);\ -SSE_OP(a ## ss); - -#define SSE_OPD(a) \ -SSE_OP(a ## pd);\ -SSE_OP(a ## sd); - -#define SSE_COMI(op, field)\ -{\ - unsigned long eflags;\ - XMMReg a, b;\ - a.field[0] = a1;\ - b.field[0] = b1;\ - asm volatile (#op " %2, %1\n"\ - "pushf\n"\ - "pop %0\n"\ - : "=rm" (eflags)\ - : "x" (a.dq), "x" (b.dq));\ - printf("%-9s: a=%f b=%f cc=%04lx\n",\ - #op, a1, b1,\ - eflags & (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A));\ -} - -void test_sse_comi(double a1, double b1) -{ - SSE_COMI(ucomiss, s); - SSE_COMI(ucomisd, d); - SSE_COMI(comiss, s); - SSE_COMI(comisd, d); -} - -#define CVT_OP_XMM(op)\ -{\ - asm volatile (#op " %1, %0" : "=x" (r.dq) : "x" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - r.q[1], r.q[0]);\ -} - -/* Force %xmm0 usage to avoid the case where both register index are 0 - to test instruction decoding more extensively */ -#define CVT_OP_XMM2MMX(op)\ -{\ - asm volatile (#op " %1, %0" : "=y" (r.q[0]) : "x" (a.dq) \ - : "%xmm0"); \ - asm volatile("emms\n"); \ - printf("%-9s: a=" FMT64X "" FMT64X " r=" FMT64X "\n",\ - #op,\ - a.q[1], a.q[0],\ - r.q[0]);\ -} - -#define CVT_OP_MMX2XMM(op)\ -{\ - asm volatile (#op " %1, %0" : "=x" (r.dq) : "y" (a.q[0]));\ - asm volatile("emms\n"); \ - printf("%-9s: a=" FMT64X " r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.q[0],\ - r.q[1], r.q[0]);\ -} - -#define CVT_OP_REG2XMM(op)\ -{\ - asm volatile (#op " %1, %0" : "=x" (r.dq) : "r" (a.l[0]));\ - printf("%-9s: a=%08x r=" FMT64X "" FMT64X "\n",\ - #op,\ - a.l[0],\ - r.q[1], r.q[0]);\ -} - -#define CVT_OP_XMM2REG(op)\ -{\ - asm volatile (#op " %1, %0" : "=r" (r.l[0]) : "x" (a.dq));\ - printf("%-9s: a=" FMT64X "" FMT64X " r=%08x\n",\ - #op,\ - a.q[1], a.q[0],\ - r.l[0]);\ -} - -struct fpxstate { - uint16_t fpuc; - uint16_t fpus; - uint16_t fptag; - uint16_t fop; - uint32_t fpuip; - uint16_t cs_sel; - uint16_t dummy0; - uint32_t fpudp; - uint16_t ds_sel; - uint16_t dummy1; - uint32_t mxcsr; - uint32_t mxcsr_mask; - uint8_t fpregs1[8 * 16]; - uint8_t xmm_regs[8 * 16]; - uint8_t dummy2[224]; -}; - -static struct fpxstate fpx_state __attribute__((aligned(16))); -static struct fpxstate fpx_state2 __attribute__((aligned(16))); - -void test_fxsave(void) -{ - struct fpxstate *fp = &fpx_state; - struct fpxstate *fp2 = &fpx_state2; - int i, nb_xmm; - XMMReg a, b; - a.q[0] = test_values[0][0]; - a.q[1] = test_values[0][1]; - b.q[0] = test_values[1][0]; - b.q[1] = test_values[1][1]; - - asm("movdqa %2, %%xmm0\n" - "movdqa %3, %%xmm7\n" -#if defined(__x86_64__) - "movdqa %2, %%xmm15\n" -#endif - " fld1\n" - " fldpi\n" - " fldln2\n" - " fxsave %0\n" - " fxrstor %0\n" - " fxsave %1\n" - " fninit\n" - : "=m" (*(uint32_t *)fp2), "=m" (*(uint32_t *)fp) - : "m" (a), "m" (b)); - printf("fpuc=%04x\n", fp->fpuc); - printf("fpus=%04x\n", fp->fpus); - printf("fptag=%04x\n", fp->fptag); - for(i = 0; i < 3; i++) { - printf("ST%d: " FMT64X " %04x\n", - i, - *(uint64_t *)&fp->fpregs1[i * 16], - *(uint16_t *)&fp->fpregs1[i * 16 + 8]); - } - printf("mxcsr=%08x\n", fp->mxcsr & 0x1f80); -#if defined(__x86_64__) - nb_xmm = 16; -#else - nb_xmm = 8; -#endif - for(i = 0; i < nb_xmm; i++) { - printf("xmm%d: " FMT64X "" FMT64X "\n", - i, - *(uint64_t *)&fp->xmm_regs[i * 16], - *(uint64_t *)&fp->xmm_regs[i * 16 + 8]); - } -} - -void test_sse(void) -{ - XMMReg r, a, b; - int i; - - MMX_OP2(punpcklbw); - MMX_OP2(punpcklwd); - MMX_OP2(punpckldq); - MMX_OP2(packsswb); - MMX_OP2(pcmpgtb); - MMX_OP2(pcmpgtw); - MMX_OP2(pcmpgtd); - MMX_OP2(packuswb); - MMX_OP2(punpckhbw); - MMX_OP2(punpckhwd); - MMX_OP2(punpckhdq); - MMX_OP2(packssdw); - MMX_OP2(pcmpeqb); - MMX_OP2(pcmpeqw); - MMX_OP2(pcmpeqd); - - MMX_OP2(paddq); - MMX_OP2(pmullw); - MMX_OP2(psubusb); - MMX_OP2(psubusw); - MMX_OP2(pminub); - MMX_OP2(pand); - MMX_OP2(paddusb); - MMX_OP2(paddusw); - MMX_OP2(pmaxub); - MMX_OP2(pandn); - - MMX_OP2(pmulhuw); - MMX_OP2(pmulhw); - - MMX_OP2(psubsb); - MMX_OP2(psubsw); - MMX_OP2(pminsw); - MMX_OP2(por); - MMX_OP2(paddsb); - MMX_OP2(paddsw); - MMX_OP2(pmaxsw); - MMX_OP2(pxor); - MMX_OP2(pmuludq); - MMX_OP2(pmaddwd); - MMX_OP2(psadbw); - MMX_OP2(psubb); - MMX_OP2(psubw); - MMX_OP2(psubd); - MMX_OP2(psubq); - MMX_OP2(paddb); - MMX_OP2(paddw); - MMX_OP2(paddd); - - MMX_OP2(pavgb); - MMX_OP2(pavgw); - - asm volatile ("pinsrw $1, %1, %0" : "=y" (r.q[0]) : "r" (0x12345678)); - printf("%-9s: r=" FMT64X "\n", "pinsrw", r.q[0]); - - asm volatile ("pinsrw $5, %1, %0" : "=x" (r.dq) : "r" (0x12345678)); - printf("%-9s: r=" FMT64X "" FMT64X "\n", "pinsrw", r.q[1], r.q[0]); - - a.q[0] = test_values[0][0]; - a.q[1] = test_values[0][1]; - asm volatile ("pextrw $1, %1, %0" : "=r" (r.l[0]) : "y" (a.q[0])); - printf("%-9s: r=%08x\n", "pextrw", r.l[0]); - - asm volatile ("pextrw $5, %1, %0" : "=r" (r.l[0]) : "x" (a.dq)); - printf("%-9s: r=%08x\n", "pextrw", r.l[0]); - - asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "y" (a.q[0])); - printf("%-9s: r=%08x\n", "pmovmskb", r.l[0]); - - asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "x" (a.dq)); - printf("%-9s: r=%08x\n", "pmovmskb", r.l[0]); - - { - r.q[0] = -1; - r.q[1] = -1; - - a.q[0] = test_values[0][0]; - a.q[1] = test_values[0][1]; - b.q[0] = test_values[1][0]; - b.q[1] = test_values[1][1]; - asm volatile("maskmovq %1, %0" : - : "y" (a.q[0]), "y" (b.q[0]), "D" (&r) - : "memory"); - printf("%-9s: r=" FMT64X " a=" FMT64X " b=" FMT64X "\n", - "maskmov", - r.q[0], - a.q[0], - b.q[0]); - asm volatile("maskmovdqu %1, %0" : - : "x" (a.dq), "x" (b.dq), "D" (&r) - : "memory"); - printf("%-9s: r=" FMT64X "" FMT64X " a=" FMT64X "" FMT64X " b=" FMT64X "" FMT64X "\n", - "maskmov", - r.q[1], r.q[0], - a.q[1], a.q[0], - b.q[1], b.q[0]); - } - - asm volatile ("emms"); - - SSE_OP2(punpcklqdq); - SSE_OP2(punpckhqdq); - SSE_OP2(andps); - SSE_OP2(andpd); - SSE_OP2(andnps); - SSE_OP2(andnpd); - SSE_OP2(orps); - SSE_OP2(orpd); - SSE_OP2(xorps); - SSE_OP2(xorpd); - - SSE_OP2(unpcklps); - SSE_OP2(unpcklpd); - SSE_OP2(unpckhps); - SSE_OP2(unpckhpd); - - SHUF_OP(shufps, 0x78); - SHUF_OP(shufpd, 0x02); - - PSHUF_OP(pshufd, 0x78); - PSHUF_OP(pshuflw, 0x78); - PSHUF_OP(pshufhw, 0x78); - - SHIFT_OP(psrlw, 7); - SHIFT_OP(psrlw, 16); - SHIFT_OP(psraw, 7); - SHIFT_OP(psraw, 16); - SHIFT_OP(psllw, 7); - SHIFT_OP(psllw, 16); - - SHIFT_OP(psrld, 7); - SHIFT_OP(psrld, 32); - SHIFT_OP(psrad, 7); - SHIFT_OP(psrad, 32); - SHIFT_OP(pslld, 7); - SHIFT_OP(pslld, 32); - - SHIFT_OP(psrlq, 7); - SHIFT_OP(psrlq, 32); - SHIFT_OP(psllq, 7); - SHIFT_OP(psllq, 32); - - SHIFT_IM(psrldq, 16); - SHIFT_IM(psrldq, 7); - SHIFT_IM(pslldq, 16); - SHIFT_IM(pslldq, 7); - - MOVMSK(movmskps); - MOVMSK(movmskpd); - - /* FPU specific ops */ - - { - uint32_t mxcsr; - asm volatile("stmxcsr %0" : "=m" (mxcsr)); - printf("mxcsr=%08x\n", mxcsr & 0x1f80); - asm volatile("ldmxcsr %0" : : "m" (mxcsr)); - } - - test_sse_comi(2, -1); - test_sse_comi(2, 2); - test_sse_comi(2, 3); - test_sse_comi(2, q_nan.d); - test_sse_comi(q_nan.d, -1); - - for(i = 0; i < 2; i++) { - a.s[0] = 2.7; - a.s[1] = 3.4; - a.s[2] = 4; - a.s[3] = -6.3; - b.s[0] = 45.7; - b.s[1] = 353.4; - b.s[2] = 4; - b.s[3] = 56.3; - if (i == 1) { - a.s[0] = q_nan.d; - b.s[3] = q_nan.d; - } - - SSE_OPS(add); - SSE_OPS(mul); - SSE_OPS(sub); - SSE_OPS(min); - SSE_OPS(div); - SSE_OPS(max); - SSE_OPS(sqrt); - SSE_OPS(cmpeq); - SSE_OPS(cmplt); - SSE_OPS(cmple); - SSE_OPS(cmpunord); - SSE_OPS(cmpneq); - SSE_OPS(cmpnlt); - SSE_OPS(cmpnle); - SSE_OPS(cmpord); - - - a.d[0] = 2.7; - a.d[1] = -3.4; - b.d[0] = 45.7; - b.d[1] = -53.4; - if (i == 1) { - a.d[0] = q_nan.d; - b.d[1] = q_nan.d; - } - SSE_OPD(add); - SSE_OPD(mul); - SSE_OPD(sub); - SSE_OPD(min); - SSE_OPD(div); - SSE_OPD(max); - SSE_OPD(sqrt); - SSE_OPD(cmpeq); - SSE_OPD(cmplt); - SSE_OPD(cmple); - SSE_OPD(cmpunord); - SSE_OPD(cmpneq); - SSE_OPD(cmpnlt); - SSE_OPD(cmpnle); - SSE_OPD(cmpord); - } - - /* float to float/int */ - a.s[0] = 2.7; - a.s[1] = 3.4; - a.s[2] = 4; - a.s[3] = -6.3; - CVT_OP_XMM(cvtps2pd); - CVT_OP_XMM(cvtss2sd); - CVT_OP_XMM2MMX(cvtps2pi); - CVT_OP_XMM2MMX(cvttps2pi); - CVT_OP_XMM2REG(cvtss2si); - CVT_OP_XMM2REG(cvttss2si); - CVT_OP_XMM(cvtps2dq); - CVT_OP_XMM(cvttps2dq); - - a.d[0] = 2.6; - a.d[1] = -3.4; - CVT_OP_XMM(cvtpd2ps); - CVT_OP_XMM(cvtsd2ss); - CVT_OP_XMM2MMX(cvtpd2pi); - CVT_OP_XMM2MMX(cvttpd2pi); - CVT_OP_XMM2REG(cvtsd2si); - CVT_OP_XMM2REG(cvttsd2si); - CVT_OP_XMM(cvtpd2dq); - CVT_OP_XMM(cvttpd2dq); - - /* sse/mmx moves */ - CVT_OP_XMM2MMX(movdq2q); - CVT_OP_MMX2XMM(movq2dq); - - /* int to float */ - a.l[0] = -6; - a.l[1] = 2; - a.l[2] = 100; - a.l[3] = -60000; - CVT_OP_MMX2XMM(cvtpi2ps); - CVT_OP_MMX2XMM(cvtpi2pd); - CVT_OP_REG2XMM(cvtsi2ss); - CVT_OP_REG2XMM(cvtsi2sd); - CVT_OP_XMM(cvtdq2ps); - CVT_OP_XMM(cvtdq2pd); - - /* XXX: test PNI insns */ -#if 0 - SSE_OP2(movshdup); -#endif - asm volatile ("emms"); -} - -#endif - #define TEST_CONV_RAX(op)\ {\ unsigned long a, r;\ @@ -2756,9 +2187,5 @@ int main(int argc, char **argv) #endif test_enter(); test_conv(); -#ifdef TEST_SSE - test_sse(); - test_fxsave(); -#endif return 0; } diff --git a/tests/tcg/i386/test-mmx.c b/tests/tcg/i386/test-mmx.c new file mode 100644 index 0000000000..09e5d583ff --- /dev/null +++ b/tests/tcg/i386/test-mmx.c @@ -0,0 +1,315 @@ +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#ifndef TEST_FILE +#define TEST_FILE "test-mmx.h" +#endif +#ifndef EMMS +#define EMMS "emms" +#endif + +typedef void (*testfn)(void); + +typedef struct { + uint64_t q0, q1; +} __attribute__((aligned(16))) v2di; + +typedef struct { + uint64_t mm[8]; + v2di xmm[8]; + uint64_t r[16]; + uint64_t flags; + uint32_t ff; + uint64_t pad; + v2di mem[4]; + v2di mem0[4]; +} reg_state; + +typedef struct { + int n; + testfn fn; + const char *s; + reg_state *init; +} TestDef; + +reg_state initI; +reg_state initF32; +reg_state initF64; + +static void dump_mmx(int n, const uint64_t *r, int ff) +{ + if (ff == 32) { + float v[2]; + memcpy(v, r, sizeof(v)); + printf("MM%d = %016lx %8g %8g\n", n, *r, v[1], v[0]); + } else { + printf("MM%d = %016lx\n", n, *r); + } +} + +static void dump_xmm(const char *name, int n, const v2di *r, int ff) +{ + printf("%s%d = %016lx %016lx\n", + name, n, r->q1, r->q0); + if (ff == 32) { + float v[4]; + memcpy(v, r, sizeof(v)); + printf(" %8g %8g %8g %8g\n", + v[3], v[2], v[1], v[0]); + } +} + +static void dump_regs(reg_state *s, int ff) +{ + int i; + + for (i = 0; i < 8; i++) { + dump_mmx(i, &s->mm[i], ff); + } + for (i = 0; i < 4; i++) { + dump_xmm("mem", i, &s->mem0[i], 0); + } +} + +static void compare_state(const reg_state *a, const reg_state *b) +{ + int i; + for (i = 0; i < 8; i++) { + if (a->mm[i] != b->mm[i]) { + printf("MM%d = %016lx\n", i, b->mm[i]); + } + } + for (i = 0; i < 16; i++) { + if (a->r[i] != b->r[i]) { + printf("r%d = %016lx\n", i, b->r[i]); + } + } + for (i = 0; i < 8; i++) { + if (memcmp(&a->xmm[i], &b->xmm[i], 8)) { + dump_xmm("xmm", i, &b->xmm[i], a->ff); + } + } + for (i = 0; i < 4; i++) { + if (memcmp(&a->mem0[i], &a->mem[i], 16)) { + dump_xmm("mem", i, &a->mem[i], a->ff); + } + } + if (a->flags != b->flags) { + printf("FLAGS = %016lx\n", b->flags); + } +} + +#define LOADMM(r, o) "movq " #r ", " #o "[%0]\n\t" +#define LOADXMM(r, o) "movdqa " #r ", " #o "[%0]\n\t" +#define STOREMM(r, o) "movq " #o "[%1], " #r "\n\t" +#define STOREXMM(r, o) "movdqa " #o "[%1], " #r "\n\t" +#define MMREG(F) \ + F(mm0, 0x00) \ + F(mm1, 0x08) \ + F(mm2, 0x10) \ + F(mm3, 0x18) \ + F(mm4, 0x20) \ + F(mm5, 0x28) \ + F(mm6, 0x30) \ + F(mm7, 0x38) +#define XMMREG(F) \ + F(xmm0, 0x040) \ + F(xmm1, 0x050) \ + F(xmm2, 0x060) \ + F(xmm3, 0x070) \ + F(xmm4, 0x080) \ + F(xmm5, 0x090) \ + F(xmm6, 0x0a0) \ + F(xmm7, 0x0b0) +#define LOADREG(r, o) "mov " #r ", " #o "[rax]\n\t" +#define STOREREG(r, o) "mov " #o "[rax], " #r "\n\t" +#define REG(F) \ + F(rbx, 0xc8) \ + F(rcx, 0xd0) \ + F(rdx, 0xd8) \ + F(rsi, 0xe0) \ + F(rdi, 0xe8) \ + F(r8, 0x100) \ + F(r9, 0x108) \ + F(r10, 0x110) \ + F(r11, 0x118) \ + F(r12, 0x120) \ + F(r13, 0x128) \ + F(r14, 0x130) \ + F(r15, 0x138) \ + +static void run_test(const TestDef *t) +{ + reg_state result; + reg_state *init = t->init; + memcpy(init->mem, init->mem0, sizeof(init->mem)); + printf("%5d %s\n", t->n, t->s); + asm volatile( + MMREG(LOADMM) + XMMREG(LOADXMM) + "sub rsp, 128\n\t" + "push rax\n\t" + "push rbx\n\t" + "push rcx\n\t" + "push rdx\n\t" + "push %1\n\t" + "push %2\n\t" + "mov rax, %0\n\t" + "pushf\n\t" + "pop rbx\n\t" + "shr rbx, 8\n\t" + "shl rbx, 8\n\t" + "mov rcx, 0x140[rax]\n\t" + "and rcx, 0xff\n\t" + "or rbx, rcx\n\t" + "push rbx\n\t" + "popf\n\t" + REG(LOADREG) + "mov rax, 0xc0[rax]\n\t" + "call [rsp]\n\t" + "mov [rsp], rax\n\t" + "mov rax, 8[rsp]\n\t" + REG(STOREREG) + "mov rbx, [rsp]\n\t" + "mov 0xc0[rax], rbx\n\t" + "mov rbx, 0\n\t" + "mov 0xf0[rax], rbx\n\t" + "mov 0xf8[rax], rbx\n\t" + "pushf\n\t" + "pop rbx\n\t" + "and rbx, 0xff\n\t" + "mov 0x140[rax], rbx\n\t" + "add rsp, 16\n\t" + "pop rdx\n\t" + "pop rcx\n\t" + "pop rbx\n\t" + "pop rax\n\t" + "add rsp, 128\n\t" + MMREG(STOREMM) + EMMS "\n\t" + XMMREG(STOREXMM) + : : "r"(init), "r"(&result), "r"(t->fn) + : "memory", "cc", + "rsi", "rdi", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", + "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", + "xmm12", "xmm13", "xmm14", "xmm15" + ); + compare_state(init, &result); +} + +#define TEST(n, cmd, type) \ +static void __attribute__((naked)) test_##n(void) \ +{ \ + asm volatile(cmd); \ + asm volatile("ret"); \ +} +#include TEST_FILE + + +static const TestDef test_table[] = { +#define TEST(n, cmd, type) {n, test_##n, cmd, &init##type}, +#include TEST_FILE + {-1, NULL, "", NULL} +}; + +static void run_all(void) +{ + const TestDef *t; + for (t = test_table; t->fn; t++) { + run_test(t); + } +} + +#define ARRAY_LEN(x) (sizeof(x) / sizeof(x[0])) + +float val_f32[] = {2.0, -1.0, 4.8, 0.8, 3, -42.0, 5e6, 7.5, 8.3}; +uint64_t val_i64[] = { + 0x3d6b3b6a9e4118f2lu, 0x355ae76d2774d78clu, + 0xd851c54a56bf1f29lu, 0x4a84d1d50bf4c4fflu, + 0x5826475e2c5fd799lu, 0xfd32edc01243f5e9lu, +}; + +v2di deadbeef = {0xa5a5a5a5deadbeefull, 0xa5a5a5a5deadbeefull}; + +void init_f32reg(uint64_t *r) +{ + static int n; + float v[2]; + int i; + for (i = 0; i < 2; i++) { + v[i] = val_f32[n++]; + if (n == ARRAY_LEN(val_f32)) { + n = 0; + } + } + memcpy(r, v, sizeof(*r)); +} + +void init_intreg(uint64_t *r) +{ + static uint64_t mask; + static int n; + + *r = val_i64[n] ^ mask; + n++; + if (n == ARRAY_LEN(val_i64)) { + n = 0; + mask *= 0x104C11DB7; + } +} + +static void init_all(reg_state *s) +{ + int i; + + for (i = 0; i < 16; i++) { + init_intreg(&s->r[i]); + } + s->r[3] = (uint64_t)&s->mem[0]; /* rdx */ + s->r[5] = (uint64_t)&s->mem[2]; /* rdi */ + s->r[6] = 0; + s->r[7] = 0; + s->flags = 2; + for (i = 0; i < 8; i++) { + s->xmm[i] = deadbeef; + memcpy(&s->mm[i], &s->xmm[i], sizeof(s->mm[i])); + } + for (i = 0; i < 2; i++) { + s->mem0[i] = deadbeef; + } +} + +int main(int argc, char *argv[]) +{ + init_all(&initI); + init_intreg(&initI.mm[5]); + init_intreg(&initI.mm[6]); + init_intreg(&initI.mm[7]); + init_intreg(&initI.mem0[1].q0); + init_intreg(&initI.mem0[1].q1); + printf("Int:\n"); + dump_regs(&initI, 0); + + init_all(&initF32); + init_f32reg(&initF32.mm[5]); + init_f32reg(&initF32.mm[6]); + init_f32reg(&initF32.mm[7]); + init_f32reg(&initF32.mem0[1].q0); + init_f32reg(&initF32.mem0[1].q1); + initF32.ff = 32; + printf("F32:\n"); + dump_regs(&initF32, 32); + + if (argc > 1) { + int n = atoi(argv[1]); + run_test(&test_table[n]); + } else { + run_all(); + } + return 0; +} diff --git a/tests/tcg/i386/test-mmx.py b/tests/tcg/i386/test-mmx.py new file mode 100755 index 0000000000..392315e176 --- /dev/null +++ b/tests/tcg/i386/test-mmx.py @@ -0,0 +1,244 @@ +#! /usr/bin/env python3 + +# Generate test-avx.h from x86.csv + +import csv +import sys +from fnmatch import fnmatch + +ignore = set(["EMMS", "FEMMS", "FISTTP", + "LDMXCSR", "VLDMXCSR", "STMXCSR", "VSTMXCSR"]) + +imask = { + 'PALIGNR': 0x3f, + 'PEXTRB': 0x0f, + 'PEXTRW': 0x07, + 'PEXTRD': 0x03, + 'PEXTRQ': 0x01, + 'PINSRB': 0x0f, + 'PINSRW': 0x07, + 'PINSRD': 0x03, + 'PINSRQ': 0x01, + 'PSHUF[DW]': 0xff, + 'PSHUF[LH]W': 0xff, + 'PS[LR][AL][WDQ]': 0x3f, +} + +def strip_comments(x): + for l in x: + if l != '' and l[0] != '#': + yield l + +def reg_w(w): + if w == 8: + return 'al' + elif w == 16: + return 'ax' + elif w == 32: + return 'eax' + elif w == 64: + return 'rax' + raise Exception("bad reg_w %d" % w) + +def mem_w(w): + if w == 8: + t = "BYTE" + elif w == 16: + t = "WORD" + elif w == 32: + t = "DWORD" + elif w == 64: + t = "QWORD" + else: + raise Exception() + + return t + " PTR 32[rdx]" + +class MMArg(): + isxmm = True + + def __init__(self, mw): + if mw not in [0, 32, 64]: + raise Exception("Bad /m width: %s" % w) + self.mw = mw + self.ismem = mw != 0 + def regstr(self, n): + if n < 0: + return mem_w(self.mw) + else: + return "mm%d" % (n, ) + +def match(op, pattern): + return fnmatch(op, pattern) + +class ArgImm8u(): + isxmm = False + ismem = False + def __init__(self, op): + for k, v in imask.items(): + if match(op, k): + self.mask = imask[k]; + return + raise Exception("Unknown immediate") + def vals(self): + mask = self.mask + yield 0 + n = 0 + while n != mask: + n += 1 + while (n & ~mask) != 0: + n += (n & ~mask) + yield n + +class ArgRM(): + isxmm = False + def __init__(self, rw, mw): + if rw not in [8, 16, 32, 64]: + raise Exception("Bad r/w width: %s" % w) + if mw not in [0, 8, 16, 32, 64]: + raise Exception("Bad r/w width: %s" % w) + self.rw = rw + self.mw = mw + self.ismem = mw != 0 + def regstr(self, n): + if n < 0: + return mem_w(self.mw) + else: + return reg_w(self.rw) + +class ArgMem(): + isxmm = False + ismem = True + def __init__(self, w): + if w not in [8, 16, 32, 64, 128, 256]: + raise Exception("Bad mem width: %s" % w) + self.w = w + def regstr(self, n): + return mem_w(self.w) + +class SkipInstruction(Exception): + pass + +def ArgGenerator(arg, op): + if arg[:2] == 'mm': + if "/" in arg: + r, m = arg.split('/') + if (m[0] != 'm'): + raise Exception("Expected /m: %s", arg) + return MMArg(int(m[1:])); + else: + return MMArg(0); + elif arg[:4] == 'imm8': + return ArgImm8u(op); + elif arg[0] == 'r': + if '/m' in arg: + r, m = arg.split('/') + if (m[0] != 'm'): + raise Exception("Expected /m: %s", arg) + mw = int(m[1:]) + if r == 'r': + rw = mw + else: + rw = int(r[1:]) + return ArgRM(rw, mw) + + return ArgRM(int(arg[1:]), 0); + elif arg[0] == 'm': + return ArgMem(int(arg[1:])) + else: + raise SkipInstruction + +class InsnGenerator: + def __init__(self, op, args): + self.op = op + if op[0:2] == "PF": + self.optype = 'F32' + else: + self.optype = 'I' + + try: + self.args = list(ArgGenerator(a, op) for a in args) + if len(self.args) > 0 and self.args[-1] is None: + self.args = self.args[:-1] + except SkipInstruction: + raise + except Exception as e: + raise Exception("Bad arg %s: %s" % (op, e)) + + def gen(self): + regs = (5, 6, 7) + dest = 4 + + nreg = len(self.args) + if nreg == 0: + yield self.op + return + if isinstance(self.args[-1], ArgImm8u): + nreg -= 1 + immarg = self.args[-1] + else: + immarg = None + memarg = -1 + for n, arg in enumerate(self.args): + if arg.ismem: + memarg = n + + if nreg == 1: + regset = [(regs[0],)] + if memarg == 0: + regset += [(-1,)] + elif nreg == 2: + regset = [ + (regs[0], regs[1]), + (regs[0], regs[0]), + ] + if memarg == 0: + regset += [(-1, regs[0])] + elif memarg == 1: + regset += [(dest, -1)] + else: + raise Exception("Too many regs: %s(%d)" % (self.op, nreg)) + + for regv in regset: + argstr = [] + for i in range(nreg): + arg = self.args[i] + argstr.append(arg.regstr(regv[i])) + if immarg is None: + yield self.op + ' ' + ','.join(argstr) + else: + for immval in immarg.vals(): + yield self.op + ' ' + ','.join(argstr) + ',' + str(immval) + +def split0(s): + if s == '': + return [] + return s.split(',') + +def main(): + n = 0 + if len(sys.argv) <= 3: + print("Usage: test-mmx.py x86.csv test-mmx.h CPUID...") + exit(1) + csvfile = open(sys.argv[1], 'r', newline='') + archs = sys.argv[3:] + with open(sys.argv[2], "w") as outf: + outf.write("// Generated by test-mmx.py. Do not edit.\n") + for row in csv.reader(strip_comments(csvfile)): + insn = row[0].replace(',', '').split() + if insn[0] in ignore: + continue + cpuid = row[6] + if cpuid in archs: + try: + g = InsnGenerator(insn[0], insn[1:]) + for insn in g.gen(): + outf.write('TEST(%d, "%s", %s)\n' % (n, insn, g.optype)) + n += 1 + except SkipInstruction: + pass + outf.write("#undef TEST\n") + csvfile.close() + +if __name__ == "__main__": + main() diff --git a/tests/tcg/i386/x86.csv b/tests/tcg/i386/x86.csv index d5d0c17f1b..c43bf42dd3 100644 --- a/tests/tcg/i386/x86.csv +++ b/tests/tcg/i386/x86.csv @@ -1469,16 +1469,16 @@ "PFCMPEQ mm1, mm2/m64","PFCMPEQ mm2/m64, mm1","pfcmpeq mm2/m64, mm1","0F 0F B0 /r","V","V","3DNOW","amd","rw,r","","" "PFCMPGE mm1, mm2/m64","PFCMPGE mm2/m64, mm1","pfcmpge mm2/m64, mm1","0F 0F 90 /r","V","V","3DNOW","amd","rw,r","","" "PFCMPGT mm1, mm2/m64","PFCMPGT mm2/m64, mm1","pfcmpgt mm2/m64, mm1","0F 0F A0 /r","V","V","3DNOW","amd","rw,r","","" -"PFCPIT1 mm1, mm2/m64","PFCPIT1 mm2/m64, mm1","pfcpit1 mm2/m64, mm1","0F 0F A6 /r","V","V","3DNOW","amd","rw,r","","" "PFMAX mm1, mm2/m64","PFMAX mm2/m64, mm1","pfmax mm2/m64, mm1","0F 0F A4 /r","V","V","3DNOW","amd","rw,r","","" "PFMIN mm1, mm2/m64","PFMIN mm2/m64, mm1","pfmin mm2/m64, mm1","0F 0F 94 /r","V","V","3DNOW","amd","rw,r","","" "PFMUL mm1, mm2/m64","PFMUL mm2/m64, mm1","pfmul mm2/m64, mm1","0F 0F B4 /r","V","V","3DNOW","amd","rw,r","","" "PFNACC mm1, mm2/m64","PFNACC mm2/m64, mm1","pfnacc mm2/m64, mm1","0F 0F 8A /r","V","V","3DNOW","amd","rw,r","","" "PFPNACC mm1, mm2/m64","PFPNACC mm2/m64, mm1","pfpnacc mm2/m64, mm1","0F 0F 8E /r","V","V","3DNOW","amd","rw,r","","" "PFRCP mm1, mm2/m64","PFRCP mm2/m64, mm1","pfrcp mm2/m64, mm1","0F 0F 96 /r","V","V","3DNOW","amd","rw,r","","" +"PFRCPIT1 mm1, mm2/m64","PFRCPIT1 mm2/m64, mm1","pfrcpit1 mm2/m64, mm1","0F 0F A6 /r","V","V","3DNOW","amd","rw,r","","" "PFRCPIT2 mm1, mm2/m64","PFRCPIT2 mm2/m64, mm1","pfrcpit2 mm2/m64, mm1","0F 0F B6 /r","V","V","3DNOW","amd","rw,r","","" "PFRSQIT1 mm1, mm2/m64","PFRSQIT1 mm2/m64, mm1","pfrsqit1 mm2/m64, mm1","0F 0F A7 /r","V","V","3DNOW","amd","rw,r","","" -"PFSQRT mm1, mm2/m64","PFSQRT mm2/m64, mm1","pfsqrt mm2/m64, mm1","0F 0F 97 /r","V","V","3DNOW","amd","rw,r","","" +"PFRSQRT mm1, mm2/m64","PFRSQRT mm2/m64, mm1","pfrsqrt mm2/m64, mm1","0F 0F 97 /r","V","V","3DNOW","amd","rw,r","","" "PFSUB mm1, mm2/m64","PFSUB mm2/m64, mm1","pfsub mm2/m64, mm1","0F 0F 9A /r","V","V","3DNOW","amd","rw,r","","" "PFSUBR mm1, mm2/m64","PFSUBR mm2/m64, mm1","pfsubr mm2/m64, mm1","0F 0F AA /r","V","V","3DNOW","amd","rw,r","","" "PHADDD mm1, mm2/m64","PHADDD mm2/m64, mm1","phaddd mm2/m64, mm1","0F 38 02 /r","V","V","SSSE3","","rw,r","","" diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target index 861a0966f4..6895db1c43 100644 --- a/tests/tcg/x86_64/Makefile.target +++ b/tests/tcg/x86_64/Makefile.target @@ -17,7 +17,6 @@ TESTS=$(MULTIARCH_TESTS) endif run-test-i386-ssse3: QEMU_OPTS += -cpu max -run-test-avx: QEMU_OPTS += -cpu max run-plugin-test-i386-ssse3-%: QEMU_OPTS += -cpu max test-x86_64: LDFLAGS+=-lm -lc diff --git a/tests/unit/check-block-qdict.c b/tests/unit/check-block-qdict.c index 5a25825093..751c58e737 100644 --- a/tests/unit/check-block-qdict.c +++ b/tests/unit/check-block-qdict.c @@ -504,7 +504,7 @@ static void qdict_crumple_test_empty(void) src = qdict_new(); dst = qobject_to(QDict, qdict_crumple(src, &error_abort)); - + g_assert(dst); g_assert_cmpint(qdict_size(dst), ==, 0); qobject_unref(src); diff --git a/tests/unit/test-qga.c b/tests/unit/test-qga.c index a05a4628ed..d27ff94d13 100644 --- a/tests/unit/test-qga.c +++ b/tests/unit/test-qga.c @@ -32,6 +32,7 @@ static int connect_qga(char *path) g_usleep(G_USEC_PER_SEC); } if (i++ == 10) { + close(s); return -1; } } while (ret == -1); diff --git a/tests/unit/test-visitor-serialization.c b/tests/unit/test-visitor-serialization.c index 907263d030..667e8fed82 100644 --- a/tests/unit/test-visitor-serialization.c +++ b/tests/unit/test-visitor-serialization.c @@ -427,131 +427,117 @@ static void test_primitive_lists(gconstpointer opaque) ops->deserialize((void **)&pl_copy_ptr, serialize_data, visit_primitive_list, &error_abort); - i = 0; + + switch (pl_copy.type) { + case PTYPE_STRING: + cur_head = pl_copy.value.strings; + break; + case PTYPE_INTEGER: + cur_head = pl_copy.value.integers; + break; + case PTYPE_S8: + cur_head = pl_copy.value.s8_integers; + break; + case PTYPE_S16: + cur_head = pl_copy.value.s16_integers; + break; + case PTYPE_S32: + cur_head = pl_copy.value.s32_integers; + break; + case PTYPE_S64: + cur_head = pl_copy.value.s64_integers; + break; + case PTYPE_U8: + cur_head = pl_copy.value.u8_integers; + break; + case PTYPE_U16: + cur_head = pl_copy.value.u16_integers; + break; + case PTYPE_U32: + cur_head = pl_copy.value.u32_integers; + break; + case PTYPE_U64: + cur_head = pl_copy.value.u64_integers; + break; + case PTYPE_NUMBER: + cur_head = pl_copy.value.numbers; + break; + case PTYPE_BOOLEAN: + cur_head = pl_copy.value.booleans; + break; + default: + g_assert_not_reached(); + } /* compare our deserialized list of primitives to the original */ - do { + i = 0; + while (cur_head) { switch (pl_copy.type) { case PTYPE_STRING: { - strList *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.strings; - } + strList *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpstr(pt->value.string, ==, ptr->value); break; } case PTYPE_INTEGER: { - intList *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.integers; - } + intList *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.integer, ==, ptr->value); break; } case PTYPE_S8: { - int8List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.s8_integers; - } + int8List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.s8, ==, ptr->value); break; } case PTYPE_S16: { - int16List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.s16_integers; - } + int16List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.s16, ==, ptr->value); break; } case PTYPE_S32: { - int32List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.s32_integers; - } + int32List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.s32, ==, ptr->value); break; } case PTYPE_S64: { - int64List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.s64_integers; - } + int64List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.s64, ==, ptr->value); break; } case PTYPE_U8: { - uint8List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.u8_integers; - } + uint8List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.u8, ==, ptr->value); break; } case PTYPE_U16: { - uint16List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.u16_integers; - } + uint16List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.u16, ==, ptr->value); break; } case PTYPE_U32: { - uint32List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.u32_integers; - } + uint32List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.u32, ==, ptr->value); break; } case PTYPE_U64: { - uint64List *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.u64_integers; - } + uint64List *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(pt->value.u64, ==, ptr->value); break; } case PTYPE_NUMBER: { - numberList *ptr; GString *double_expected = g_string_new(""); GString *double_actual = g_string_new(""); - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.numbers; - } + numberList *ptr = cur_head; + cur_head = ptr->next; /* we serialize with %f for our reference visitors, so rather than * fuzzy floating math to test "equality", just compare the * formatted values @@ -564,13 +550,8 @@ static void test_primitive_lists(gconstpointer opaque) break; } case PTYPE_BOOLEAN: { - boolList *ptr; - if (cur_head) { - ptr = cur_head; - cur_head = ptr->next; - } else { - cur_head = ptr = pl_copy.value.booleans; - } + boolList *ptr = cur_head; + cur_head = ptr->next; g_assert_cmpint(!!pt->value.boolean, ==, !!ptr->value); break; } @@ -578,9 +559,9 @@ static void test_primitive_lists(gconstpointer opaque) g_assert_not_reached(); } i++; - } while (cur_head); + } - g_assert_cmpint(i, ==, 33); + g_assert_cmpint(i, ==, 32); ops->cleanup(serialize_data); dealloc_helper(&pl, visit_primitive_list, &error_abort); |