about summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--miasm2/arch/x86/arch.py107
-rw-r--r--miasm2/arch/x86/sem.py494
-rw-r--r--test/arch/x86/arch.py115
3 files changed, 622 insertions, 94 deletions
diff --git a/miasm2/arch/x86/arch.py b/miasm2/arch/x86/arch.py
index 13c06ae6..72ed3309 100644
--- a/miasm2/arch/x86/arch.py
+++ b/miasm2/arch/x86/arch.py
@@ -4306,6 +4306,10 @@ addop("pmaxuw", [bs8(0x0f), bs8(0x38), bs8(0x3e), pref_66] +
 addop("pmaxud", [bs8(0x0f), bs8(0x38), bs8(0x3f), pref_66] +
       rmmod(xmm_reg, rm_arg_xmm))
 
+addop("pmaxsw", [bs8(0x0f), bs8(0xee), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("pmaxsw", [bs8(0x0f), bs8(0xee), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
 
 addop("pminub", [bs8(0x0f), bs8(0xda), no_xmm_pref] +
       rmmod(mm_reg, rm_arg_mm))
@@ -4339,6 +4343,11 @@ addop("pcmpgtb", [bs8(0x0f), bs8(0x64), no_xmm_pref] +
 addop("pcmpgtb", [bs8(0x0f), bs8(0x64), pref_66] +
       rmmod(xmm_reg, rm_arg_xmm))
 
+addop("pcmpgtw", [bs8(0x0f), bs8(0x65), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm))
+addop("pcmpgtw", [bs8(0x0f), bs8(0x65), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm))
+
 addop("pcmpgtd", [bs8(0x0f), bs8(0x66), no_xmm_pref] +
       rmmod(mm_reg, rm_arg_mm))
 addop("pcmpgtd", [bs8(0x0f), bs8(0x66), pref_66] +
@@ -4423,9 +4432,9 @@ addop("pextrq", [bs8(0x0f), bs8(0x3a), bs8(0x16), pref_66] +
 addop("pextrw", [bs8(0x0f), bs8(0x3a), bs8(0x15), pref_66] +
       rmmod(xmm_reg, rm_arg_reg_m16) + [u08], [rm_arg_reg_m16, xmm_reg, u08])
 addop("pextrw", [bs8(0x0f), bs8(0xc5), no_xmm_pref] +
-      rmmod(mm_reg, rm_arg_reg_m16) + [u08], [rm_arg_reg_m16, mm_reg, u08])
+      rmmod(rmreg, rm_arg_mm) + [u08], [rmreg, rm_arg_mm, u08])
 addop("pextrw", [bs8(0x0f), bs8(0xc5), pref_66] +
-      rmmod(xmm_reg, rm_arg_reg_m16) + [u08], [rm_arg_reg_m16, xmm_reg, u08])
+      rmmod(rmreg, rm_arg_xmm) + [u08], [rmreg, rm_arg_xmm, u08])
 
 
 addop("sqrtpd", [bs8(0x0f), bs8(0x51), pref_66] +
@@ -4453,6 +4462,100 @@ addop("aesdec", [bs8(0x0f), bs8(0x38), bs8(0xde), pref_66] + rmmod(xmm_reg, rm_a
 addop("aesenclast", [bs8(0x0f), bs8(0x38), bs8(0xdd), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
 addop("aesdeclast", [bs8(0x0f), bs8(0x38), bs8(0xdf), pref_66] + rmmod(xmm_reg, rm_arg_xmm))
 
+addop("packsswb", [bs8(0x0f), bs8(0x63), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("packsswb", [bs8(0x0f), bs8(0x63), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("packssdw", [bs8(0x0f), bs8(0x6b), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("packssdw", [bs8(0x0f), bs8(0x6b), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+
+addop("packuswb", [bs8(0x0f), bs8(0x67), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("packuswb", [bs8(0x0f), bs8(0x67), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+
+addop("pmullw", [bs8(0x0f), bs8(0xd5), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("pmullw", [bs8(0x0f), bs8(0xd5), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("pmulhuw", [bs8(0x0f), bs8(0xe4), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("pmulhuw", [bs8(0x0f), bs8(0xe4), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("pmulhw", [bs8(0x0f), bs8(0xe5), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("pmulhw", [bs8(0x0f), bs8(0xe5), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("pmuludq", [bs8(0x0f), bs8(0xf4), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("pmuludq", [bs8(0x0f), bs8(0xf4), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+
+
+addop("psubusb", [bs8(0x0f), bs8(0xd8), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("psubusb", [bs8(0x0f), bs8(0xd8), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("psubusw", [bs8(0x0f), bs8(0xd9), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("psubusw", [bs8(0x0f), bs8(0xd9), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("psubsb", [bs8(0x0f), bs8(0xe8), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("psubsb", [bs8(0x0f), bs8(0xe8), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("psubsw", [bs8(0x0f), bs8(0xe9), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("psubsw", [bs8(0x0f), bs8(0xe9), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+
+
+addop("paddusb", [bs8(0x0f), bs8(0xdc), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("paddusb", [bs8(0x0f), bs8(0xdc), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("paddusw", [bs8(0x0f), bs8(0xdd), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("paddusw", [bs8(0x0f), bs8(0xdd), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("paddsb", [bs8(0x0f), bs8(0xec), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("paddsb", [bs8(0x0f), bs8(0xec), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("paddsw", [bs8(0x0f), bs8(0xed), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("paddsw", [bs8(0x0f), bs8(0xed), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+
+addop("pmaddwd", [bs8(0x0f), bs8(0xf5), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("pmaddwd", [bs8(0x0f), bs8(0xf5), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+
+addop("psadbw", [bs8(0x0f), bs8(0xf6), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("psadbw", [bs8(0x0f), bs8(0xf6), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+
+addop("pavgb", [bs8(0x0f), bs8(0xe0), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("pavgb", [bs8(0x0f), bs8(0xe0), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+addop("pavgw", [bs8(0x0f), bs8(0xe3), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_m64))
+addop("pavgw", [bs8(0x0f), bs8(0xe3), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_m128))
+
+addop("maskmovq", [bs8(0x0f), bs8(0xf7), no_xmm_pref] +
+      rmmod(mm_reg, rm_arg_mm_reg))
+addop("maskmovdqu", [bs8(0x0f), bs8(0xf7), pref_66] +
+      rmmod(xmm_reg, rm_arg_xmm_reg))
+
+addop("emms", [bs8(0x0f), bs8(0x77)])
+
+
 mn_x86.bintree = factor_one_bit(mn_x86.bintree)
 # mn_x86.bintree = factor_fields_all(mn_x86.bintree)
 """
diff --git a/miasm2/arch/x86/sem.py b/miasm2/arch/x86/sem.py
index 3cbf5526..589c2eb9 100644
--- a/miasm2/arch/x86/sem.py
+++ b/miasm2/arch/x86/sem.py
@@ -3319,62 +3319,104 @@ def vec_op_clip(op, size):
 # Generic vertical operation
 
 
-def vec_vertical_sem(op, elt_size, reg_size, dst, src):
+def vec_vertical_sem(op, elt_size, reg_size, dst, src, apply_on_output):
     assert reg_size % elt_size == 0
     n = reg_size / elt_size
     if op == '-':
         ops = [
-            (dst[i * elt_size:(i + 1) * elt_size]
-             - src[i * elt_size:(i + 1) * elt_size]) for i in xrange(0, n)]
+            apply_on_output((dst[i * elt_size:(i + 1) * elt_size]
+                             - src[i * elt_size:(i + 1) * elt_size]))
+            for i in xrange(0, n)
+        ]
     else:
-        ops = [m2_expr.ExprOp(op, dst[i * elt_size:(i + 1) * elt_size],
-                              src[i * elt_size:(i + 1) * elt_size]) for i in xrange(0, n)]
+        ops = [
+            apply_on_output(m2_expr.ExprOp(op, dst[i * elt_size:(i + 1) * elt_size],
+                                           src[i * elt_size:(i + 1) * elt_size]))
+            for i in xrange(0, n)
+        ]
 
     return m2_expr.ExprCompose(*ops)
 
 
-def float_vec_vertical_sem(op, elt_size, reg_size, dst, src):
+def float_vec_vertical_sem(op, elt_size, reg_size, dst, src, apply_on_output):
     assert reg_size % elt_size == 0
     n = reg_size / elt_size
 
     x_to_int, int_to_x = {32: ('float_to_int_%d', 'int_%d_to_float'),
                           64: ('double_to_int_%d', 'int_%d_to_double')}[elt_size]
     if op == '-':
-        ops = [m2_expr.ExprOp(x_to_int % elt_size,
-                              m2_expr.ExprOp(int_to_x % elt_size, dst[i * elt_size:(i + 1) * elt_size]) -
-                              m2_expr.ExprOp(
-                                  int_to_x % elt_size, src[i * elt_size:(
-                                      i + 1) * elt_size])) for i in xrange(0, n)]
+        ops = [
+            apply_on_output(m2_expr.ExprOp(
+                x_to_int % elt_size,
+                m2_expr.ExprOp(int_to_x % elt_size, dst[i * elt_size:(i + 1) * elt_size]) -
+                m2_expr.ExprOp(
+                    int_to_x % elt_size, src[i * elt_size:(
+                        i + 1) * elt_size])))
+            for i in xrange(0, n)
+        ]
     else:
-        ops = [m2_expr.ExprOp(x_to_int % elt_size,
-                              m2_expr.ExprOp(op,
-                                             m2_expr.ExprOp(
-                                                 int_to_x % elt_size, dst[i * elt_size:(
-                                                     i + 1) * elt_size]),
-                                             m2_expr.ExprOp(
-                                                 int_to_x % elt_size, src[i * elt_size:(
-                                                     i + 1) * elt_size]))) for i in xrange(0, n)]
+        ops = [
+            apply_on_output(m2_expr.ExprOp(
+                x_to_int % elt_size,
+                m2_expr.ExprOp(op,
+                               m2_expr.ExprOp(
+                                   int_to_x % elt_size, dst[i * elt_size:(
+                                       i + 1) * elt_size]),
+                               m2_expr.ExprOp(
+                                   int_to_x % elt_size, src[i * elt_size:(
+                                       i + 1) * elt_size]))))
+            for i in xrange(0, n)]
 
     return m2_expr.ExprCompose(*ops)
 
 
-def __vec_vertical_instr_gen(op, elt_size, sem):
+def __vec_vertical_instr_gen(op, elt_size, sem, apply_on_output):
     def vec_instr(ir, instr, dst, src):
         e = []
         if isinstance(src, m2_expr.ExprMem):
             src = ir.ExprMem(src.arg, dst.size)
         reg_size = dst.size
-        e.append(m2_expr.ExprAff(dst, sem(op, elt_size, reg_size, dst, src)))
+        e.append(m2_expr.ExprAff(dst, sem(op, elt_size, reg_size, dst, src,
+                                          apply_on_output)))
         return e, []
     return vec_instr
 
 
-def vec_vertical_instr(op, elt_size):
-    return __vec_vertical_instr_gen(op, elt_size, vec_vertical_sem)
+def vec_vertical_instr(op, elt_size, apply_on_output=lambda x: x):
+    return __vec_vertical_instr_gen(op, elt_size, vec_vertical_sem,
+                                    apply_on_output)
+
 
+def float_vec_vertical_instr(op, elt_size, apply_on_output=lambda x: x):
+    return __vec_vertical_instr_gen(op, elt_size, float_vec_vertical_sem,
+                                    apply_on_output)
 
-def float_vec_vertical_instr(op, elt_size):
-    return __vec_vertical_instr_gen(op, elt_size, float_vec_vertical_sem)
+
+def _keep_mul_high(expr, signed=False):
+    assert expr.is_op("*") and len(expr.args) == 2
+
+    if signed:
+        arg1 = expr.args[0].signExtend(expr.size * 2)
+        arg2 = expr.args[1].signExtend(expr.size * 2)
+    else:
+        arg1 = expr.args[0].zeroExtend(expr.size * 2)
+        arg2 = expr.args[1].zeroExtend(expr.size * 2)
+    return m2_expr.ExprOp("*", arg1, arg2)[expr.size:]
+
+# Op, signed => associated comparison
+_min_max_func = {
+    ("min", False): m2_expr.expr_is_unsigned_lower,
+    ("min", True): m2_expr.expr_is_signed_lower,
+    ("max", False): m2_expr.expr_is_unsigned_greater,
+    ("max", True): m2_expr.expr_is_signed_greater,
+}
+def _min_max(expr, signed):
+    assert (expr.is_op("min") or expr.is_op("max")) and len(expr.args) == 2
+    return m2_expr.ExprCond(
+        _min_max_func[(expr.op, signed)](expr.args[1], expr.args[0]),
+        expr.args[1],
+        expr.args[0],
+    )
 
 
 # Integer arithmetic
@@ -3398,6 +3440,109 @@ psubw = vec_vertical_instr('-', 16)
 psubd = vec_vertical_instr('-', 32)
 psubq = vec_vertical_instr('-', 64)
 
+# Multiplications
+#
+
+# SSE
+pmullb = vec_vertical_instr('*', 8)
+pmullw = vec_vertical_instr('*', 16)
+pmulld = vec_vertical_instr('*', 32)
+pmullq = vec_vertical_instr('*', 64)
+pmulhub = vec_vertical_instr('*', 8, _keep_mul_high)
+pmulhuw = vec_vertical_instr('*', 16, _keep_mul_high)
+pmulhud = vec_vertical_instr('*', 32, _keep_mul_high)
+pmulhuq = vec_vertical_instr('*', 64, _keep_mul_high)
+pmulhb = vec_vertical_instr('*', 8, lambda x: _keep_mul_high(x, signed=True))
+pmulhw = vec_vertical_instr('*', 16, lambda x: _keep_mul_high(x, signed=True))
+pmulhd = vec_vertical_instr('*', 32, lambda x: _keep_mul_high(x, signed=True))
+pmulhq = vec_vertical_instr('*', 64, lambda x: _keep_mul_high(x, signed=True))
+
+def pmuludq(ir, instr, dst, src):
+    e = []
+    if dst.size == 64:
+        e.append(m2_expr.ExprAff(
+            dst,
+            src[:32].zeroExtend(64) * dst[:32].zeroExtend(64)
+        ))
+    elif dst.size == 128:
+        e.append(m2_expr.ExprAff(
+            dst[:64],
+            src[:32].zeroExtend(64) * dst[:32].zeroExtend(64)
+        ))
+        e.append(m2_expr.ExprAff(
+            dst[64:],
+            src[64:96].zeroExtend(64) * dst[64:96].zeroExtend(64)
+        ))
+    else:
+        raise RuntimeError("Unsupported size %d" % dst.size)
+    return e, []
+
+# Mix
+#
+
+# SSE
+def pmaddwd(ir, instr, dst, src):
+    sizedst = 32
+    sizesrc = 16
+    out = []
+    for start in xrange(0, dst.size, sizedst):
+        base = start
+        mul1 = src[base: base + sizesrc].signExtend(sizedst) * dst[base: base + sizesrc].signExtend(sizedst)
+        base += sizesrc
+        mul2 = src[base: base + sizesrc].signExtend(sizedst) * dst[base: base + sizesrc].signExtend(sizedst)
+        out.append(mul1 + mul2)
+    return [m2_expr.ExprAff(dst, m2_expr.ExprCompose(*out))], []
+
+
+def _absolute(expr):
+    """Return abs(@expr)"""
+    signed = expr.msb()
+    value_unsigned = (expr ^ expr.mask) + m2_expr.ExprInt(1, expr.size)
+    return m2_expr.ExprCond(signed, value_unsigned, expr)
+
+
+def psadbw(ir, instr, dst, src):
+    sizedst = 16
+    sizesrc = 8
+    out_dst = []
+    for start in xrange(0, dst.size, 64):
+        out = []
+        for src_start in xrange(0, 64, sizesrc):
+            beg = start + src_start
+            end = beg + sizesrc
+            # Not clear in the doc equations, but in the text, src and dst are:
+            # "8 unsigned byte integers"
+            out.append(_absolute(dst[beg: end].zeroExtend(sizedst) - src[beg: end].zeroExtend(sizedst)))
+        out_dst.append(m2_expr.ExprOp("+", *out))
+        out_dst.append(m2_expr.ExprInt(0, 64 - sizedst))
+
+    return [m2_expr.ExprAff(dst, m2_expr.ExprCompose(*out_dst))], []
+
+def _average(expr):
+    assert expr.is_op("avg") and len(expr.args) == 2
+
+    arg1 = expr.args[0].zeroExtend(expr.size * 2)
+    arg2 = expr.args[1].zeroExtend(expr.size * 2)
+    one = m2_expr.ExprInt(1, arg1.size)
+    # avg(unsigned) = (a + b + 1) >> 1, addition beeing at least on one more bit
+    return ((arg1 + arg2 + one) >> one)[:expr.size]
+
+pavgb = vec_vertical_instr('avg', 8, _average)
+pavgw = vec_vertical_instr('avg', 16, _average)
+
+# Comparisons
+#
+
+# SSE
+pminsw = vec_vertical_instr('min', 16, lambda x: _min_max(x, signed=True))
+pminub = vec_vertical_instr('min', 8, lambda x: _min_max(x, signed=False))
+pminuw = vec_vertical_instr('min', 16, lambda x: _min_max(x, signed=False))
+pminud = vec_vertical_instr('min', 32, lambda x: _min_max(x, signed=False))
+pmaxub = vec_vertical_instr('max', 8, lambda x: _min_max(x, signed=False))
+pmaxuw = vec_vertical_instr('max', 16, lambda x: _min_max(x, signed=False))
+pmaxud = vec_vertical_instr('max', 32, lambda x: _min_max(x, signed=False))
+pmaxsw = vec_vertical_instr('max', 16, lambda x: _min_max(x, signed=True))
+
 # Floating-point arithmetic
 #
 
@@ -3448,12 +3593,6 @@ def por(_, instr, dst, src):
     return e, []
 
 
-def pminsw(_, instr, dst, src):
-    e = []
-    e.append(m2_expr.ExprAff(dst, m2_expr.ExprCond((dst - src).msb(), dst, src)))
-    return e, []
-
-
 def cvtdq2pd(_, instr, dst, src):
     e = []
     e.append(
@@ -3819,62 +3958,6 @@ def iret(ir, instr):
     return exprs, []
 
 
-def pmaxu(_, instr, dst, src, size):
-    e = []
-    for i in xrange(0, dst.size, size):
-        op1 = dst[i:i + size]
-        op2 = src[i:i + size]
-        res = op1 - op2
-        # Compote CF in @res = @op1 - @op2
-        ret = (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (op1 ^ op2))).msb()
-
-        e.append(m2_expr.ExprAff(dst[i:i + size],
-                                 m2_expr.ExprCond(ret,
-                                                  src[i:i + size],
-                                                  dst[i:i + size])))
-    return e, []
-
-
-def pmaxub(ir, instr, dst, src):
-    return pmaxu(ir, instr, dst, src, 8)
-
-
-def pmaxuw(ir, instr, dst, src):
-    return pmaxu(ir, instr, dst, src, 16)
-
-
-def pmaxud(ir, instr, dst, src):
-    return pmaxu(ir, instr, dst, src, 32)
-
-
-def pminu(_, instr, dst, src, size):
-    e = []
-    for i in xrange(0, dst.size, size):
-        op1 = dst[i:i + size]
-        op2 = src[i:i + size]
-        res = op1 - op2
-        # Compote CF in @res = @op1 - @op2
-        ret = (((op1 ^ op2) ^ res) ^ ((op1 ^ res) & (op1 ^ op2))).msb()
-
-        e.append(m2_expr.ExprAff(dst[i:i + size],
-                                 m2_expr.ExprCond(ret,
-                                                  dst[i:i + size],
-                                                  src[i:i + size])))
-    return e, []
-
-
-def pminub(ir, instr, dst, src):
-    return pminu(ir, instr, dst, src, 8)
-
-
-def pminuw(ir, instr, dst, src):
-    return pminu(ir, instr, dst, src, 16)
-
-
-def pminud(ir, instr, dst, src):
-    return pminu(ir, instr, dst, src, 32)
-
-
 def pcmpeq(_, instr, dst, src, size):
     e = []
     for i in xrange(0, dst.size, size):
@@ -4173,6 +4256,202 @@ def palignr(ir, instr, dst, src, imm):
     return [m2_expr.ExprAff(dst, result)], []
 
 
+def _signed_saturation(expr, dst_size):
+    """Saturate the expr @expr for @dst_size bit
+    Signed saturation return MAX_INT / MIN_INT or value depending on the value
+    """
+    assert expr.size > dst_size
+
+    median = 1 << (dst_size - 1)
+    min_int = m2_expr.ExprInt(- median, dst_size)
+    max_int = m2_expr.ExprInt(median - 1, dst_size)
+    signed = expr.msb()
+    value_unsigned = (expr ^ expr.mask) + m2_expr.ExprInt(1, expr.size)
+    # Re-use the sign bit
+    value = m2_expr.ExprCompose(expr[:dst_size - 1], signed)
+
+    # Bit hack: to avoid a double signed comparison, use mask
+    # ie., in unsigned, 0xXY > 0x0f iff X is not null
+
+    # if expr >s 0
+    #    if expr[dst_size - 1:] > 0: # bigger than max_int
+    #        -> max_int
+    #    else
+    #        -> value
+    # else # negative
+    #    if expr[dst_size:-1] > 0: # smaller than min_int
+    #        -> value
+    #    else
+    #        -> min_int
+
+    return m2_expr.ExprCond(
+        signed,
+        m2_expr.ExprCond(value_unsigned[dst_size - 1:],
+                         min_int,
+                         value),
+        m2_expr.ExprCond(expr[dst_size - 1:],
+                         max_int,
+                         value),
+    )
+
+
+def _unsigned_saturation(expr, dst_size):
+    """Saturate the expr @expr for @dst_size bit
+    Unsigned saturation return MAX_INT or value depending on the value
+    """
+    assert expr.size > dst_size
+
+    zero = m2_expr.ExprInt(0, dst_size)
+    max_int = m2_expr.ExprInt(-1, dst_size)
+    value = expr[:dst_size]
+    signed = expr.msb()
+
+
+    # Bit hack: to avoid a double signed comparison, use mask
+    # ie., in unsigned, 0xXY > 0x0f iff X is not null
+
+    return m2_expr.ExprCond(
+        signed,
+        zero,
+        m2_expr.ExprCond(expr[dst_size:],
+                         max_int,
+                         value),
+    )
+
+
+
+def packsswb(ir, instr, dst, src):
+    out = []
+    for source in [dst, src]:
+        for start in xrange(0, dst.size, 16):
+            out.append(_signed_saturation(source[start:start + 16], 8))
+    return [m2_expr.ExprAff(dst, m2_expr.ExprCompose(*out))], []
+
+
+def packssdw(ir, instr, dst, src):
+    out = []
+    for source in [dst, src]:
+        for start in xrange(0, dst.size, 32):
+            out.append(_signed_saturation(source[start:start + 32], 16))
+    return [m2_expr.ExprAff(dst, m2_expr.ExprCompose(*out))], []
+
+
+def packuswb(ir, instr, dst, src):
+    out = []
+    for source in [dst, src]:
+        for start in xrange(0, dst.size, 16):
+            out.append(_unsigned_saturation(source[start:start + 16], 8))
+    return [m2_expr.ExprAff(dst, m2_expr.ExprCompose(*out))], []
+
+
+def _saturation_sub_unsigned(expr):
+    assert expr.is_op("+") and len(expr.args) == 2 and expr.args[-1].is_op("-")
+
+    # Compute the soustraction on one more bit to be able to distinguish cases:
+    # 0x48 - 0xd7 in 8 bit, should saturate
+    arg1 = expr.args[0].zeroExtend(expr.size + 1)
+    arg2 = expr.args[1].args[0].zeroExtend(expr.size + 1)
+    return _unsigned_saturation(arg1 - arg2, expr.size)
+
+def _saturation_sub_signed(expr):
+    assert expr.is_op("+") and len(expr.args) == 2 and expr.args[-1].is_op("-")
+
+    # Compute the substraction on two more bits, see _saturation_sub_unsigned
+    arg1 = expr.args[0].signExtend(expr.size + 2)
+    arg2 = expr.args[1].args[0].signExtend(expr.size + 2)
+    return _signed_saturation(arg1 - arg2, expr.size)
+
+def _saturation_add(expr):
+    assert expr.is_op("+") and len(expr.args) == 2
+
+    # Compute the addition on one more bit to be able to distinguish cases:
+    # 0x48 + 0xd7 in 8 bit, should saturate
+
+    arg1 = expr.args[0].zeroExtend(expr.size + 1)
+    arg2 = expr.args[1].zeroExtend(expr.size + 1)
+
+    # We can also use _unsigned_saturation with two additionnal bits (to
+    # distinguish minus and overflow case)
+    # The resulting expression being more complicated with an impossible case
+    # (signed=True), we rewrite the rule here
+
+    return m2_expr.ExprCond((arg1 + arg2).msb(), m2_expr.ExprInt(-1, expr.size),
+                            expr)
+
+def _saturation_add_signed(expr):
+    assert expr.is_op("+") and len(expr.args) == 2
+
+    # Compute the substraction on two more bits, see _saturation_add_unsigned
+
+    arg1 = expr.args[0].signExtend(expr.size + 2)
+    arg2 = expr.args[1].signExtend(expr.size + 2)
+
+    return _signed_saturation(arg1 + arg2, expr.size)
+
+
+# Saturate SSE operations
+
+psubusb = vec_vertical_instr('-', 8, _saturation_sub_unsigned)
+psubusw = vec_vertical_instr('-', 16, _saturation_sub_unsigned)
+paddusb = vec_vertical_instr('+', 8, _saturation_add)
+paddusw = vec_vertical_instr('+', 16, _saturation_add)
+psubsb = vec_vertical_instr('-', 8, _saturation_sub_signed)
+psubsw = vec_vertical_instr('-', 16, _saturation_sub_signed)
+paddsb = vec_vertical_instr('+', 8, _saturation_add_signed)
+paddsw = vec_vertical_instr('+', 16, _saturation_add_signed)
+
+
+# Others SSE operations
+
+def maskmovq(ir, instr, src, mask):
+    lbl_next = m2_expr.ExprId(ir.get_next_label(instr), ir.IRDst.size)
+    blks = []
+
+    # For each possibility, check if a write is necessary
+    check_labels = [m2_expr.ExprId(ir.gen_label(), ir.IRDst.size)
+                    for _ in xrange(0, mask.size, 8)]
+    # If the write has to be done, do it (otherwise, nothing happen)
+    write_labels = [m2_expr.ExprId(ir.gen_label(), ir.IRDst.size)
+                    for _ in xrange(0, mask.size, 8)]
+
+    # Build check blocks
+    for i, start in enumerate(xrange(0, mask.size, 8)):
+        bit = mask[start + 7: start + 8]
+        cur_label = check_labels[i]
+        next_check_label = check_labels[i + 1] if (i + 1) < len(check_labels) else lbl_next
+        write_label = write_labels[i]
+        check = m2_expr.ExprAff(ir.IRDst,
+                                m2_expr.ExprCond(bit,
+                                                 write_label,
+                                                 next_check_label))
+        blks.append(IRBlock(cur_label.name, [AssignBlock([check], instr)]))
+
+    # Build write blocks
+    dst_addr = mRDI[instr.mode]
+    for i, start in enumerate(xrange(0, mask.size, 8)):
+        bit = mask[start + 7: start + 8]
+        cur_label = write_labels[i]
+        next_check_label = check_labels[i + 1] if (i + 1) < len(check_labels) else lbl_next
+        write_addr = dst_addr + m2_expr.ExprInt(i, dst_addr.size)
+
+        # @8[DI/EDI/RDI + i] = src[byte i]
+        write_mem = m2_expr.ExprAff(m2_expr.ExprMem(write_addr, 8),
+                                    src[start: start + 8])
+        jump = m2_expr.ExprAff(ir.IRDst, next_check_label)
+        blks.append(IRBlock(cur_label.name, [AssignBlock([write_mem, jump], instr)]))
+
+    # If mask is null, bypass all
+    e = [m2_expr.ExprAff(ir.IRDst, m2_expr.ExprCond(mask,
+                                                    check_labels[0],
+                                                    lbl_next))]
+    return e, blks
+
+
+def emms(ir, instr):
+    # Implemented as a NOP
+    return [], []
+
+
 mnemo_func = {'mov': mov,
               'xchg': xchg,
               'movzx': movzx,
@@ -4557,6 +4836,29 @@ mnemo_func = {'mov': mov,
               "psubd": psubd,
               "psubq": psubq,
 
+              # Multiplications
+              # SSE
+              "pmullb": pmullb,
+              "pmullw": pmullw,
+              "pmulld": pmulld,
+              "pmullq": pmullq,
+              "pmulhub": pmulhub,
+              "pmulhuw": pmulhuw,
+              "pmulhud": pmulhud,
+              "pmulhuq": pmulhuq,
+              "pmulhb": pmulhb,
+              "pmulhw": pmulhw,
+              "pmulhd": pmulhd,
+              "pmulhq": pmulhq,
+              "pmuludq": pmuludq,
+
+              # Mix
+              # SSE
+              "pmaddwd": pmaddwd,
+              "psadbw": psadbw,
+              "pavgb": pavgb,
+              "pavgw": pavgw,
+
               # Arithmetic (floating-point)
               #
 
@@ -4614,6 +4916,7 @@ mnemo_func = {'mov': mov,
               "pmaxub": pmaxub,
               "pmaxuw": pmaxuw,
               "pmaxud": pmaxud,
+              "pmaxsw": pmaxsw,
 
               "pminub": pminub,
               "pminuw": pminuw,
@@ -4670,8 +4973,23 @@ mnemo_func = {'mov': mov,
 
               "pmovmskb": pmovmskb,
 
-              "smsw": smsw,
+              "packsswb": packsswb,
+              "packssdw": packssdw,
+              "packuswb": packuswb,
+
+              "psubusb": psubusb,
+              "psubusw": psubusw,
+              "paddusb": paddusb,
+              "paddusw": paddusw,
+              "psubsb": psubsb,
+              "psubsw": psubsw,
+              "paddsb": paddsb,
+              "paddsw": paddsw,
 
+              "smsw": smsw,
+              "maskmovq": maskmovq,
+              "maskmovdqu": maskmovq,
+              "emms": emms,
               }
 
 
diff --git a/test/arch/x86/arch.py b/test/arch/x86/arch.py
index d3b2964c..2af90c8a 100644
--- a/test/arch/x86/arch.py
+++ b/test/arch/x86/arch.py
@@ -2902,11 +2902,11 @@ reg_tests = [
     (m32, "00000000    PEXTRW     WORD PTR [EDX], XMM2, 0x5",
     "660F3A151205"),
 
+    (m32, "00000000    PEXTRW     EAX, MM2, 0x5",
+    "0fc5c205"),
+    (m32, "00000000    PEXTRW     EAX, XMM2, 0x5",
+    "660fc5c205"),
 
-    (m32, "00000000    PEXTRW     WORD PTR [EDX], MM2, 0x5",
-    "0FC51205"),
-    (m32, "00000000    PEXTRW     WORD PTR [EDX], XMM2, 0x5",
-    "660FC51205"),
 
     (m32, "00000000    PEXTRD     DWORD PTR [EDX], XMM2, 0x5",
     "660F3A161205"),
@@ -2970,6 +2970,113 @@ reg_tests = [
     (m64, "00000000    BNDMOV     BND3, XMMWORD PTR [RSP + 0xB0]",
      "660f1a9c24b0000000"),
 
+    (m32, "00000000    PACKSSWB   MM7, MM0",
+     "0f63f8"),
+    (m32, "00000000    PACKSSWB   XMM0, XMM5",
+     "660f63c5"),
+
+    (m32, "00000000    PACKSSDW   MM2, MM0",
+     "0f6bd0"),
+    (m32, "00000000    PACKSSDW   XMM0, XMM7",
+     "660f6bc7"),
+
+    (m32, "00000000    PACKUSWB   MM1, MM7",
+     "0f67cf"),
+    (m32, "00000000    PACKUSWB   XMM0, XMM6",
+     "660f67c6"),
+
+    (m32, "00000000    PMULLW     MM4, MM2",
+     "0fd5e2"),
+    (m32, "00000000    PMULLW     XMM0, XMM3",
+     "660fd5c3"),
+
+    (m32, "00000000    PSUBUSB    MM5, MM3",
+     "0fd8eb"),
+    (m32, "00000000    PSUBUSB    XMM0, XMM5",
+     "660fd8c5"),
+
+    (m32, "00000000    PSUBUSW    MM5, MM3",
+     "0fd9eb"),
+    (m32, "00000000    PSUBUSW    XMM0, XMM5",
+     "660fd9c5"),
+
+    (m32, "00000000    PADDUSB    MM5, MM3",
+     "0fdceb"),
+    (m32, "00000000    PADDUSB    XMM0, XMM6",
+     "660fdcc6"),
+
+    (m32, "00000000    PADDUSW    MM7, MM5",
+     "0fddfd"),
+    (m32, "00000000    PADDUSW    XMM0, XMM1",
+     "660fddc1"),
+
+    (m32, "00000000    PMULHUW    MM6, MM4",
+     "0fe4f4"),
+    (m32, "00000000    PMULHUW    XMM0, XMM7",
+     "660fe4c7"),
+
+    (m32, "00000000    PMULHW     MM6, MM4",
+     "0fe5f4"),
+    (m32, "00000000    PMULHW     XMM0, XMM7",
+     "660fe5c7"),
+
+    (m32, "00000000    PSUBSB     MM2, MM0",
+     "0fe8d0"),
+    (m32, "00000000    PSUBSB     XMM0, XMM4",
+     "660fe8c4"),
+
+    (m32, "00000000    PSUBSW     MM3, MM1",
+     "0fe9d9"),
+    (m32, "00000000    PSUBSW     XMM0, XMM6",
+     "660fe9c6"),
+
+    (m32, "00000000    PADDSB     MM2, MM0",
+     "0fecd0"),
+    (m32, "00000000    PADDSB     XMM0, XMM4",
+     "660fecc4"),
+
+    (m32, "00000000    PADDSW     MM3, MM1",
+     "0fedd9"),
+    (m32, "00000000    PADDSW     XMM0, XMM6",
+     "660fedc6"),
+
+    (m32, "00000000    PMAXSW     MM3, MM1",
+     "0feed9"),
+    (m32, "00000000    PMAXSW     XMM0, XMM6",
+     "660feec6"),
+
+    (m32, "00000000    PMULUDQ    MM3, MM1",
+     "0ff4d9"),
+    (m32, "00000000    PMULUDQ    XMM0, XMM6",
+     "660ff4c6"),
+
+    (m32, "00000000    PMADDWD    MM3, MM1",
+     "0ff5d9"),
+    (m32, "00000000    PMADDWD    XMM0, XMM6",
+     "660ff5c6"),
+
+    (m32, "00000000    PSADBW     MM3, MM1",
+     "0ff6d9"),
+    (m32, "00000000    PSADBW     XMM0, XMM6",
+     "660ff6c6"),
+
+    (m32, "00000000    PAVGB      MM3, MM1",
+     "0fe0d9"),
+    (m32, "00000000    PAVGB      XMM0, XMM6",
+     "660fe0c6"),
+
+    (m32, "00000000    PAVGW      MM3, MM1",
+     "0fe3d9"),
+    (m32, "00000000    PAVGW      XMM0, XMM6",
+     "660fe3c6"),
+
+    (m32, "00000000    MASKMOVQ   MM2, MM3",
+     "0ff7d3"),
+    (m32, "00000000    MASKMOVDQU XMM4, XMM5",
+     "660ff7e5"),
+
+    (m32, "00000000    EMMS",
+     "0f77"),
 ]