1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <errno.h>
#include "debug.h"
#include "box64context.h"
#include "dynarec.h"
#include "emu/x64emu_private.h"
#include "emu/x64run_private.h"
#include "x64run.h"
#include "x64emu.h"
#include "box64stack.h"
#include "callback.h"
#include "emu/x64run_private.h"
#include "x64trace.h"
#include "dynarec_native.h"
#include "my_cpuid.h"
#include "emu/x87emu_private.h"
#include "emu/x64shaext.h"
#include "arm64_printer.h"
#include "dynarec_arm64_private.h"
#include "dynarec_arm64_functions.h"
#include "dynarec_arm64_helper.h"
uintptr_t dynarec64_AVX_F2_0F38(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int ninst, vex_t vex, int* ok, int* need_epilog)
{
(void)ip; (void)need_epilog;
uint8_t opcode = F8;
uint8_t nextop, u8;
uint8_t gd, ed, vd;
uint8_t wback, wb1, wb2;
uint8_t eb1, eb2, gb1, gb2;
int32_t i32, i32_;
int cacheupd = 0;
int v0, v1, v2;
int q0, q1, q2;
int d0, d1, d2;
int s0;
uint64_t tmp64u;
int64_t j64;
int64_t fixedaddress;
int unscaled;
MAYUSE(wb1);
MAYUSE(wb2);
MAYUSE(eb1);
MAYUSE(eb2);
MAYUSE(gb1);
MAYUSE(gb2);
MAYUSE(q0);
MAYUSE(q1);
MAYUSE(d0);
MAYUSE(d1);
MAYUSE(s0);
MAYUSE(j64);
MAYUSE(cacheupd);
rex_t rex = vex.rex;
switch(opcode) {
case 0xF6:
INST_NAME("MULX Gd, Vd, Ed (,RDX)");
nextop = F8;
GETGD;
GETED(0);
GETVD;
if(rex.w) {
// 64bits mul
UMULH(x3, xRDX, ed);
MULx(vd, xRDX, ed);
MOVx_REG(gd, x3);
} else {
// 32bits mul
UMULL(x3, xRDX, ed);
MOVw_REG(vd, x3);
LSRx(gd, x3, 32);
}
break;
case 0xF7:
INST_NAME("SHRX Gd, Ed, Vd");
nextop = F8;
GETGD;
GETED(0);
GETVD;
ANDx_mask(x3, vd, 1, 0, rex.w?5:4); // mask 0x3f/0x1f
LSRxw_REG(gd, ed, x3);
break;
default:
DEFAULT;
}
return addr;
}
|