about summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml3
-rw-r--r--example/disasm/full.py10
-rw-r--r--example/ida/depgraph.py64
-rw-r--r--example/ida/graph_ir.py19
-rw-r--r--example/samples/x86_32_if_reg.S11
-rw-r--r--example/symbol_exec/depgraph.py81
-rw-r--r--miasm2/analysis/binary.py23
-rw-r--r--miasm2/analysis/depgraph.py910
-rw-r--r--miasm2/arch/x86/sem.py86
-rw-r--r--miasm2/core/asmbloc.py420
-rw-r--r--miasm2/core/bin_stream.py5
-rw-r--r--miasm2/core/graph.py28
-rw-r--r--miasm2/expression/expression_helper.py124
-rw-r--r--miasm2/ir/translators/C.py1
-rw-r--r--miasm2/ir/translators/__init__.py1
-rw-r--r--miasm2/ir/translators/smt2.py9
-rw-r--r--miasm2/ir/translators/z3_ir.py2
-rw-r--r--miasm2/jitter/jitcore.py42
-rw-r--r--miasm2/jitter/vm_mngr.c44
-rw-r--r--miasm2/jitter/vm_mngr.h3
-rw-r--r--miasm2/jitter/vm_mngr_py.c22
-rw-r--r--miasm2/os_dep/win_api_x86_32_seh.py80
-rw-r--r--test/analysis/depgraph.py1295
-rw-r--r--test/analysis/dg_check.py20
-rw-r--r--test/analysis/dg_test_00_expected.json1
-rw-r--r--test/analysis/dg_test_00_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_01_expected.json1
-rw-r--r--test/analysis/dg_test_01_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_02_expected.json1
-rw-r--r--test/analysis/dg_test_02_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_03_expected.json1
-rw-r--r--test/analysis/dg_test_03_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_04_expected.json1
-rw-r--r--test/analysis/dg_test_04_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_05_expected.json1
-rw-r--r--test/analysis/dg_test_05_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_06_expected.json1
-rw-r--r--test/analysis/dg_test_06_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_07_expected.json1
-rw-r--r--test/analysis/dg_test_07_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_08_expected.json1
-rw-r--r--test/analysis/dg_test_08_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_09_expected.json2
-rw-r--r--test/analysis/dg_test_09_implicit_expected.json1
-rw-r--r--test/analysis/dg_test_10_expected.json1
-rw-r--r--test/analysis/dg_test_10_implicit_expected.json1
-rw-r--r--test/arch/x86/unit/asm_test.py59
-rw-r--r--test/arch/x86/unit/mn_daa.py4
-rw-r--r--test/arch/x86/unit/mn_das.py4
-rw-r--r--test/arch/x86/unit/mn_float.py4
-rw-r--r--test/arch/x86/unit/mn_int.py4
-rw-r--r--test/arch/x86/unit/mn_pcmpeq.py8
-rw-r--r--test/arch/x86/unit/mn_pextr.py4
-rw-r--r--test/arch/x86/unit/mn_pinsr.py4
-rw-r--r--test/arch/x86/unit/mn_pmaxu.py4
-rw-r--r--test/arch/x86/unit/mn_pminu.py4
-rw-r--r--test/arch/x86/unit/mn_pmovmskb.py4
-rw-r--r--test/arch/x86/unit/mn_pshufb.py4
-rw-r--r--test/arch/x86/unit/mn_psrl_psll.py6
-rw-r--r--test/arch/x86/unit/mn_punpck.py14
-rw-r--r--test/arch/x86/unit/mn_pushpop.py125
-rw-r--r--test/arch/x86/unit/mn_stack.py4
-rw-r--r--test/arch/x86/unit/mn_strings.py6
-rw-r--r--test/core/asmbloc.py30
-rw-r--r--test/expression/expression.py40
-rw-r--r--test/samples/x86_32/dg_test_00.S9
-rw-r--r--test/samples/x86_32/dg_test_01.S9
-rw-r--r--test/samples/x86_32/dg_test_02.S12
-rw-r--r--test/samples/x86_32/dg_test_03.S10
-rw-r--r--test/samples/x86_32/dg_test_04.S10
-rw-r--r--test/samples/x86_32/dg_test_05.S11
-rw-r--r--test/samples/x86_32/dg_test_06.S12
-rw-r--r--test/samples/x86_32/dg_test_07.S10
-rw-r--r--test/samples/x86_32/dg_test_08.S10
-rw-r--r--test/samples/x86_32/dg_test_09.S13
-rw-r--r--test/samples/x86_32/dg_test_10.S20
-rw-r--r--test/test_all.py105
-rw-r--r--test/utils/monothread.py20
-rw-r--r--test/utils/multithread.py24
-rw-r--r--test/utils/screendisplay.py115
-rw-r--r--test/utils/testset.py12
81 files changed, 2082 insertions, 1978 deletions
diff --git a/.travis.yml b/.travis.yml
index bc203658..c021d97f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,7 +7,6 @@ addons:
         packages:
             - make
             - gcc
-            - libpython2.7-dev
             - python-virtualenv
             - unzip
 before_script:
@@ -34,4 +33,4 @@ before_script:
 - "cd miasm;"
 - "python setup.py build build_ext -I$(pwd)/../virtualenv/include -L$(pwd)/../virtualenv/tinycc"
 - "python setup.py install"
-script: "cd test && python test_all.py -m"
+script: "cd test && python test_all.py"
diff --git a/example/disasm/full.py b/example/disasm/full.py
index ee0b88dd..7ff60d3b 100644
--- a/example/disasm/full.py
+++ b/example/disasm/full.py
@@ -78,7 +78,7 @@ mn, dis_engine = machine.mn, machine.dis_engine
 ira, ir = machine.ira, machine.ir
 log.info('ok')
 
-mdis = dis_engine(bs)
+mdis = dis_engine(bs, symbol_pool=cont.symbol_pool)
 # configure disasm engine
 mdis.dontdis_retcall = args.dontdis_retcall
 mdis.blocs_wd = args.blockwatchdog
@@ -86,7 +86,13 @@ mdis.dont_dis_nulstart_bloc = not args.dis_nulstart_block
 mdis.follow_call = args.followcall
 
 todo = []
-addrs = [int(a, 0) for a in args.address]
+addrs = []
+for addr in args.address:
+    try:
+        addrs.append(int(addr, 0))
+    except ValueError:
+        # Second chance, try with symbol
+        addrs.append(mdis.symbol_pool.getby_name(addr).offset)
 
 if len(addrs) == 0 and default_addr is not None:
     addrs.append(default_addr)
diff --git a/example/ida/depgraph.py b/example/ida/depgraph.py
index 406f7200..3c57e51b 100644
--- a/example/ida/depgraph.py
+++ b/example/ida/depgraph.py
@@ -11,11 +11,15 @@ from miasm2.analysis.depgraph import DependencyGraph
 from utils import guess_machine
 
 
+
 class depGraphSettingsForm(Form):
 
     def __init__(self, ira):
 
         self.ira = ira
+        self.stk_args = {'ARG%d' % i:i for i in xrange(10)}
+        self.stk_unalias_force = False
+
         self.address = ScreenEA()
         cur_bloc = list(ira.getby_offset(self.address))[0]
         for line_nb, l in enumerate(cur_bloc.lines):
@@ -24,6 +28,7 @@ class depGraphSettingsForm(Form):
         cur_label = str(cur_bloc.label)
         labels = sorted(map(str, ira.blocs.keys()))
         regs = sorted(ir_arch.arch.regs.all_regs_ids_byname.keys())
+        regs += self.stk_args.keys()
         reg_default = regs[0]
         for i in xrange(10):
             opnd = GetOpnd(self.address, i).upper()
@@ -45,10 +50,13 @@ Track the element:
 <Register to track:{cbReg}>
 <Line number:{iLineNb}>
 
+INFO: To track stack argument number n, use "ARGn"
+
 Method to use:
 <Follow Memory:{rNoMem}>
 <Follow Call:{rNoCall}>
-<Implicit dependencies:{rImplicit}>{cMethod}>
+<Implicit dependencies:{rImplicit}>
+<Unalias stack:{rUnaliasStack}>{cMethod}>
 
 <Highlight color:{cColor}>
 """, {
@@ -58,7 +66,8 @@ Method to use:
                     selval=reg_default),
             'cMode': Form.RadGroupControl(("rBeforeLine", "rAfterLine",
                                            "rEndBlock")),
-            'cMethod': Form.ChkGroupControl(("rNoMem", "rNoCall", "rImplicit")),
+            'cMethod': Form.ChkGroupControl(("rNoMem", "rNoCall", "rImplicit",
+                                             "rUnaliasStack")),
             'iLineNb': Form.NumericInput(tp=Form.FT_RAWHEX,
                                          value=line_nb),
             'cbBBL': Form.DropdownListControl(
@@ -92,7 +101,21 @@ Method to use:
     @property
     def elements(self):
         value = self.cbReg.value
-        return set([ir_arch.arch.regs.all_regs_ids_byname[value]])
+        if value in self.stk_args:
+            line = self.ira.blocs[self.label].lines[self.line_nb]
+            arg_num = self.stk_args[value]
+            stk_high = m2_expr.ExprInt(GetSpd(line.offset), ir_arch.sp.size)
+            stk_off = m2_expr.ExprInt(self.ira.sp.size/8 * arg_num, ir_arch.sp.size)
+            element =  m2_expr.ExprMem(mn.regs.regs_init[ir_arch.sp] + stk_high + stk_off, self.ira.sp.size)
+            element = expr_simp(element)
+            # Force stack unaliasing
+            self.stk_unalias_force = True
+        elif value:
+            element = ir_arch.arch.regs.all_regs_ids_byname.get(value, None)
+
+        else:
+            raise ValueError("Unknown element '%s'!" % value)
+        return set([element])
 
     @property
     def depgraph(self):
@@ -103,6 +126,10 @@ Method to use:
                                follow_call=value & 2)
 
     @property
+    def unalias_stack(self):
+        return self.cMethod.value & 8 or self.stk_unalias_force
+
+    @property
     def color(self):
         return self.cColor.value
 
@@ -130,19 +157,31 @@ blocs = mdis.dis_multibloc(func.startEA)
 for bloc in blocs:
     ir_arch.add_bloc(bloc)
 
-# Simplify affectations
-for irb in ir_arch.blocs.values():
-    for irs in irb.irs:
-        for i, expr in enumerate(irs):
-            irs[i] = m2_expr.ExprAff(expr_simp(expr.dst), expr_simp(expr.src))
-
 # Get settings
 settings = depGraphSettingsForm(ir_arch)
 settings.Execute()
 
+label, elements, line_nb = settings.label, settings.elements, settings.line_nb
+# Simplify affectations
+for irb in ir_arch.blocs.values():
+    fix_stack = irb.label.offset is not None and settings.unalias_stack
+    for i, assignblk in enumerate(irb.irs):
+        if fix_stack:
+            stk_high = m2_expr.ExprInt_from(ir_arch.sp, GetSpd(irb.lines[i].offset))
+            fix_dct = {ir_arch.sp: mn.regs.regs_init[ir_arch.sp] + stk_high}
+
+        for dst, src in assignblk.items():
+            del(assignblk[dst])
+            if fix_stack:
+                src = src.replace_expr(fix_dct)
+                if dst != ir_arch.sp:
+                    dst = dst.replace_expr(fix_dct)
+            dst, src = expr_simp(dst), expr_simp(src)
+            assignblk[dst] = src
+
 # Get dependency graphs
 dg = settings.depgraph
-graphs = dg.get(settings.label, settings.elements, settings.line_nb,
+graphs = dg.get(label, elements, line_nb,
                 set([ir_arch.symbol_pool.getby_offset(func.startEA)]))
 
 # Display the result
@@ -183,7 +222,10 @@ def treat_element():
         comments[offset] = comments.get(offset, []) + [node.element]
         SetColor(offset, CIC_ITEM, settings.color)
 
-    print "Possible value: %s" % graph.emul().values()[0]
+    if graph.has_loop:
+        print 'Graph has dependency loop: symbolic execution is inexact'
+    else:
+        print "Possible value: %s" % graph.emul().values()[0]
 
     for offset, elements in comments.iteritems():
         MakeComm(offset, ", ".join(map(str, elements)))
diff --git a/example/ida/graph_ir.py b/example/ida/graph_ir.py
index 4447cadd..188c8fa6 100644
--- a/example/ida/graph_ir.py
+++ b/example/ida/graph_ir.py
@@ -19,10 +19,11 @@ def color_irbloc(irbloc):
     lbl = '%s' % irbloc.label
     lbl = idaapi.COLSTR(lbl, idaapi.SCOLOR_INSN)
     o.append(lbl)
-    for i, expr in enumerate(irbloc.irs):
-        for e in expr:
-            s = expr2colorstr(ir_arch.arch.regs.all_regs_ids, e)
-            s = idaapi.COLSTR(s, idaapi.SCOLOR_INSN)
+    for assignblk in irbloc.irs:
+        for dst, src in sorted(assignblk.iteritems()):
+            dst_f = expr2colorstr(ir_arch.arch.regs.all_regs_ids, dst)
+            src_f = expr2colorstr(ir_arch.arch.regs.all_regs_ids, src)
+            s = idaapi.COLSTR("%s = %s" % (dst_f, src_f), idaapi.SCOLOR_INSN)
             o.append('    %s' % s)
         o.append("")
     o.pop()
@@ -119,7 +120,7 @@ print hex(ad)
 ab = mdis.dis_multibloc(ad)
 
 print "generating graph"
-open('asm_flow.dot', 'w').write(ab.graph.dot(label=True))
+open('asm_flow.dot', 'w').write(ab.dot())
 
 
 print "generating IR... %x" % ad
@@ -133,9 +134,11 @@ for b in ab:
 print "IR ok... %x" % ad
 
 for irb in ir_arch.blocs.values():
-    for irs in irb.irs:
-        for i, expr in enumerate(irs):
-            irs[i] = ExprAff(expr_simp(expr.dst), expr_simp(expr.src))
+    for assignblk in irb.irs:
+        for dst, src in assignblk.items():
+            del(assignblk[dst])
+            dst, src = expr_simp(dst), expr_simp(src)
+            assignblk[dst] = src
 
 out = ir_arch.graph.dot()
 open(os.path.join(tempfile.gettempdir(), 'graph.dot'), 'wb').write(out)
diff --git a/example/samples/x86_32_if_reg.S b/example/samples/x86_32_if_reg.S
new file mode 100644
index 00000000..f519f8f7
--- /dev/null
+++ b/example/samples/x86_32_if_reg.S
@@ -0,0 +1,11 @@
+main:
+	MOV   EAX, 0x0
+	CMP   EBX, 0
+	JZ    skip1
+	OR    EAX, 0x11220000
+skip1:
+	CMP   EBX, 0
+	JZ    skip2
+	OR    EAX, 0x3344
+skip2:
+	RET
diff --git a/example/symbol_exec/depgraph.py b/example/symbol_exec/depgraph.py
index a870b275..b4f793c0 100644
--- a/example/symbol_exec/depgraph.py
+++ b/example/symbol_exec/depgraph.py
@@ -1,5 +1,6 @@
 from argparse import ArgumentParser
 from pdb import pm
+import json
 
 from miasm2.analysis.machine import Machine
 from miasm2.analysis.binary import Container
@@ -12,18 +13,21 @@ parser.add_argument("func_addr", help="Function address")
 parser.add_argument("target_addr", help="Address to start")
 parser.add_argument("element", nargs="+", help="Elements to track")
 parser.add_argument("-m", "--architecture",
-		    help="Architecture (%s)" % Machine.available_machine())
+                    help="Architecture (%s)" % Machine.available_machine())
 parser.add_argument("-i", "--implicit", help="Use implicit tracking",
-		    action="store_true")
+                    action="store_true")
 parser.add_argument("--unfollow-mem", help="Stop on memory statements",
-		    action="store_true")
+                    action="store_true")
 parser.add_argument("--unfollow-call", help="Stop on call statements",
-		    action="store_true")
+                    action="store_true")
 parser.add_argument("--do-not-simplify", help="Do not simplify expressions",
-		    action="store_true")
+                    action="store_true")
 parser.add_argument("--rename-args",
                     help="Rename common arguments (@32[ESP_init] -> Arg1)",
-		    action="store_true")
+                    action="store_true")
+parser.add_argument("--json",
+                    help="Output solution in JSON",
+                    action="store_true")
 args = parser.parse_args()
 
 # Get architecture
@@ -38,9 +42,9 @@ elements = set()
 regs = machine.mn.regs.all_regs_ids_byname
 for element in args.element:
     try:
-	elements.add(regs[element.upper()])
+        elements.add(regs[element])
     except KeyError:
-	raise ValueError("Unknown element '%s'" % element)
+        raise ValueError("Unknown element '%s'" % element)
 
 mdis = machine.dis_engine(cont.bin_stream, dont_dis_nulstart_bloc=True)
 ir_arch = machine.ira(mdis.symbol_pool)
@@ -63,9 +67,9 @@ for block in blocks:
 
 # Get the instance
 dg = DependencyGraph(ir_arch, implicit=args.implicit,
-		     apply_simp=not(args.do_not_simplify),
-		     follow_mem=not(args.unfollow_mem),
-		     follow_call=not(args.unfollow_call))
+                     apply_simp=not args.do_not_simplify,
+                     follow_mem=not args.unfollow_mem,
+                     follow_call=not args.unfollow_call)
 
 # Build information
 target_addr = int(args.target_addr, 0)
@@ -73,23 +77,46 @@ current_block = list(ir_arch.getby_offset(target_addr))[0]
 line_nb = 0
 for line_nb, line in enumerate(current_block.lines):
     if line.offset == target_addr:
-	break
+        break
 
 # Enumerate solutions
+json_solutions = []
 for sol_nb, sol in enumerate(dg.get(current_block.label, elements, line_nb, set())):
-	fname = "sol_%d.dot" % sol_nb
-	with open(fname, "w") as fdesc:
-		fdesc.write(sol.graph.dot())
-	result = ", ".join("%s: %s" % (k, v)
-			   for k, v in sol.emul(ctx=init_ctx).iteritems())
-	print "Solution %d: %s -> %s" % (sol_nb,
-					 result,
-					 fname)
-        if args.implicit:
-            sat = sol.is_satisfiable
-            constraints = ""
-            if sat:
-                constraints = {}
-                for element in sol.constraints:
-                    constraints[element] = hex(sol.constraints[element].as_long())
+    fname = "sol_%d.dot" % sol_nb
+    with open(fname, "w") as fdesc:
+            fdesc.write(sol.graph.dot())
+
+    results = sol.emul(ctx=init_ctx)
+    tokens = {str(k): str(v) for k, v in results.iteritems()}
+    if not args.json:
+        result = ", ".join("=".join(x) for x in tokens.iteritems())
+        print "Solution %d: %s -> %s" % (sol_nb,
+                                         result,
+                                         fname)
+        if sol.has_loop:
+            print '\tLoop involved'
+
+    if args.implicit:
+        sat = sol.is_satisfiable
+        constraints = {}
+        if sat:
+            for element in sol.constraints:
+                try:
+                    result = hex(sol.constraints[element].as_long())
+                except AttributeError:
+                    result = str(sol.constraints[element])
+                constraints[element] = result
+        if args.json:
+            tokens["satisfiability"] = sat
+            tokens["constraints"] = {str(k): str(v)
+                                     for k, v in constraints.iteritems()}
+        else:
             print "\tSatisfiability: %s %s" % (sat, constraints)
+
+    if args.json:
+        tokens["has_loop"] = sol.has_loop
+        json_solutions.append(tokens)
+
+
+if args.json:
+    print json.dumps(json_solutions)
diff --git a/miasm2/analysis/binary.py b/miasm2/analysis/binary.py
index 900d76ab..d47ca884 100644
--- a/miasm2/analysis/binary.py
+++ b/miasm2/analysis/binary.py
@@ -2,6 +2,7 @@ import logging
 
 from miasm2.core.bin_stream import bin_stream_str, bin_stream_elf, bin_stream_pe
 from miasm2.jitter.csts import PAGE_READ
+from miasm2.core.asmbloc import asm_symbol_pool
 
 
 log = logging.getLogger("binary")
@@ -93,6 +94,7 @@ class Container(object):
         self._bin_stream = None
         self._entry_point = None
         self._arch = None
+        self._symbol_pool = asm_symbol_pool()
 
         # Launch parsing
         self.parse(*args, **kwargs)
@@ -117,6 +119,11 @@ class Container(object):
         "Return the guessed architecture"
         return self._arch
 
+    @property
+    def symbol_pool(self):
+        "asm_symbol_pool instance preloaded with container symbols (if any)"
+        return self._symbol_pool
+
 
 ## Format dependent classes
 class ContainerPE(Container):
@@ -186,6 +193,22 @@ class ContainerELF(Container):
         except Exception, error:
             raise ContainerParsingException('Cannot read ELF: %s' % error)
 
+        # Add known symbols
+        symtab = self._executable.getsectionbyname(".symtab")
+        if symtab is not None:
+            for name, symb in symtab.symbols.iteritems():
+                offset = symb.value
+                if offset != 0:
+                    try:
+                        self._symbol_pool.add_label(name, offset)
+                    except ValueError:
+                        # Two symbols points on the same offset
+                        log.warning("Same offset (%s) for %s and %s", (hex(offset),
+                                                                       name,
+                                                                       self._symbol_pool.getby_offset(offset)))
+                        continue
+
+
 
 class ContainerUnknown(Container):
     "Container abstraction for unknown format"
diff --git a/miasm2/analysis/depgraph.py b/miasm2/analysis/depgraph.py
index 897de77b..b574e421 100644
--- a/miasm2/analysis/depgraph.py
+++ b/miasm2/analysis/depgraph.py
@@ -1,20 +1,18 @@
 """Provide dependency graph"""
-import itertools
-from collections import deque
-from UserDict import IterableUserDict
-
-try:
-    import z3
-except ImportError:
-    pass
 
 import miasm2.expression.expression as m2_expr
 from miasm2.core.graph import DiGraph
-from miasm2.core.asmbloc import asm_label, expr_is_label
+from miasm2.core.asmbloc import asm_label, expr_is_int_or_label, expr_is_label
 from miasm2.expression.simplifications import expr_simp
 from miasm2.ir.symbexec import symbexec
-from miasm2.ir.ir import irbloc
+from miasm2.ir.ir import irbloc, AssignBlock
 from miasm2.ir.translators import Translator
+from miasm2.expression.expression_helper import possible_values
+
+try:
+    import z3
+except ImportError:
+    pass
 
 
 class DependencyNode(object):
@@ -26,24 +24,21 @@ class DependencyNode(object):
     line.
     """
 
-    __slots__ = ["_label", "_element", "_line_nb", "_modifier",
+    __slots__ = ["_label", "_element", "_line_nb",
                  "_step", "_nostep_repr", "_hash"]
 
-    def __init__(self, label, element, line_nb, step, modifier=False):
+    def __init__(self, label, element, line_nb):
         """Create a dependency node with:
         @label: asm_label instance
         @element: Expr instance
         @line_nb: int
-        @modifier: bool
         """
         self._label = label
         self._element = element
         self._line_nb = line_nb
-        self._modifier = modifier
-        self._step = step
         self._nostep_repr = (self._label, self._line_nb, self._element)
         self._hash = hash(
-            (self._label, self._element, self._line_nb, self._step))
+            (self._label, self._element, self._line_nb))
 
     def __hash__(self):
         """Returns a hash of @self to uniquely identify @self"""
@@ -57,8 +52,7 @@ class DependencyNode(object):
             return False
         return (self.label == depnode.label and
                 self.element == depnode.element and
-                self.line_nb == depnode.line_nb and
-                self.step == depnode.step)
+                self.line_nb == depnode.line_nb)
 
     def __cmp__(self, node):
         """Compares @self with @node. The step attribute is not taken into
@@ -72,10 +66,9 @@ class DependencyNode(object):
 
     def __str__(self):
         """Returns a string representation of DependencyNode"""
-        return "<%s %s %s %s M:%s S:%s>" % (self.__class__.__name__,
-                                            self.label.name, self.element,
-                                            self.line_nb, self.modifier,
-                                            self.step)
+        return "<%s %s %s %s>" % (self.__class__.__name__,
+                                  self.label.name, self.element,
+                                  self.line_nb)
 
     def __repr__(self):
         """Returns a string representation of DependencyNode"""
@@ -101,473 +94,219 @@ class DependencyNode(object):
         "Line in the current IRBlock"
         return self._line_nb
 
-    @property
-    def step(self):
-        "Step of the current node"
-        return self._step
-
-    @property
-    def modifier(self):
-        """Evaluating the current line involves a modification of tracked
-        dependencies"""
-        return self._modifier
-
-    @modifier.setter
-    def modifier(self, value):
-        """Evaluating the current line involves a modification of tracked
-        dependencies if @value.
-        @value: boolean"""
-        self._modifier = value
-
-
-class CacheWrapper(IterableUserDict):
-
-    """Wrapper class for cache dictionary"""
-
-    def __init__(self, dct=None):
-        """Create a CacheWrapper with value @dct."""
-        IterableUserDict.__init__(self, dct)
-        self._nostep_cache = None
-        self._nostep_keys = None
-
-    def __eq__(self, cache):
-        """Returns True if the nostep caches are equals"""
-        if self.nostep_keys != cache.nostep_keys:
-            return False
-        return self.nostep_cache == cache.nostep_cache
-
-    @property
-    def nostep_keys(self):
-        """List of dictonnary keys without the step attribute.
-        The list is generated once when the method is called and not updated
-        afterward.
-        """
-        if self._nostep_keys is None:
-            self._nostep_keys = set(key.nostep_repr for key in self.data)
-        return self._nostep_keys
-
-    @property
-    def nostep_cache(self):
-        """Dictionary of DependencyNode and their dependencies,
-        without the step attribute.
-        The dictionary is generated once when the method is called for the
-        first time and not updated afterward.
-        """
-        if self._nostep_cache is None:
-            self._nostep_cache = {}
-            for (node, values) in self.data.iteritems():
-                self._nostep_cache.setdefault(node.nostep_repr, set()).update(
-                    set(val.nostep_repr for val in values))
-        return self._nostep_cache
-
-
-class DependencyDict(object):
-
-    """Internal structure for the DependencyGraph algorithm"""
-    __slots__ = ["_label", "_history", "_pending", "_cache"]
-
-    def __init__(self, label, history):
-        """Create a DependencyDict
-        @label: asm_label, current IRblock label
-        @history: list of DependencyDict
-        """
-        self._label = label
-        self._history = history
-        self._pending = set()
 
-        # DepNode -> set(DepNode)
-        self._cache = CacheWrapper()
+class DependencyState(object):
 
-    def __eq__(self, depdict):
-        if not isinstance(depdict, self.__class__):
-            return False
-        return (self._label == depdict.label and
-                self.cache == depdict.cache)
-
-    def __cmp__(self, depdict):
-        if not isinstance(depdict, self.__class__):
-            raise ValueError("Compare error %s != %s" % (self.__class__,
-                                                         depdict.__class__))
-        return cmp((self._label, self._cache, self._pending),
-                   (depdict.label, depdict.cache, depdict.pending))
-
-    def is_head(self, depnode):
-        """Return True iff @depnode is at the head of the current block
-        @depnode: DependencyNode instance"""
-        return (self.label == depnode.label and
-                depnode.line_nb == 0)
-
-    def copy(self):
-        "Return a copy of itself"
+    """
+    Store intermediate depnodes states during dependencygraph analysis
+    """
 
-        # Initialize
-        new_history = list(self.history)
-        depdict = DependencyDict(self.label, new_history)
+    def __init__(self, label, inputs, pending, line_nb=None):
+        self.label = label
+        self.inputs = inputs
+        self.history = [label]
+        self.pending = {k: set(v) for k, v in pending.iteritems()}
+        self.line_nb = line_nb
+        self.links = set()
 
-        # Copy values
-        for key, values in self.cache.iteritems():
-            depdict.cache[key] = set(values)
-        depdict.pending.update(self.pending)
+        # Init lazy elements
+        self._graph = None
 
-        return depdict
+    def __repr__(self):
+        return "<State: %r (%r) (%r)>" % (self.label,
+                                          self.pending,
+                                          self.links)
 
     def extend(self, label):
-        """Return a copy of itself, with itself in history and pending clean
-        @label: asm_label instance for the new DependencyDict's label
+        """Return a copy of itself, with itself in history
+        @label: asm_label instance for the new DependencyState's label
         """
-        depdict = DependencyDict(label, list(self.history) + [self])
-        for key, values in self.cache.iteritems():
-            depdict.cache[key] = set(values)
-        return depdict
+        new_state = self.__class__(label, self.inputs, self.pending)
+        new_state.links = set(self.links)
+        new_state.history = self.history + [label]
+        return new_state
 
-    def heads(self):
-        """Return an iterator on the list of heads as defined in 'is_head'"""
-        for key in self.cache:
-            if self.is_head(key):
-                yield key
+    def get_done_state(self):
+        """Returns immutable object representing current state"""
+        return (self.label, frozenset(self.links))
 
-    @property
-    def label(self):
-        "Label of the current block"
-        return self._label
-
-    @property
-    def history(self):
-        """List of DependencyDict needed to reach the current DependencyDict
-        The first is the oldest"""
-        return self._history
-
-    @property
-    def cache(self):
-        "Dictionary of DependencyNode and their dependencies"
-        return self._cache
-
-    @property
-    def pending(self):
-        """Dictionary of DependencyNode and their dependencies, waiting for
-        resolution"""
-        return self._pending
-
-    def _get_modifiers_in_cache(self, nodes_heads):
-        """Find modifier nodes in cache starting from @nodes_heads.
-        Returns new cache"""
-        # 'worklist_depnode' order is needed (depth first)
-        worklist_depnodes = list(nodes_heads)
-        # Temporary cache
-        cache = {}
-        # Partially resolved 'cache' elements
-        worklist = []
-
-        # Build worklist and cache for non modifiers
-        while worklist_depnodes:
-            depnode = worklist_depnodes.pop()
-            # Resolve node dependencies
-            if depnode in cache:
-                # Depnode previously resolved
-                continue
-
-            if depnode not in self._cache:
-                # Final node
-                if not depnode.modifier:
-                    cache[depnode] = []
-                continue
-
-            # Propagate to son
-            dependencies = self._cache[depnode]
-            for son in dependencies:
-                worklist_depnodes.append(son)
-            # Save partially resolved dependency
-            worklist.append((depnode, dependencies))
-
-        # Convert worklist to cache
-        while worklist:
-            depnode, dependencies = worklist.pop()
-            parallels = []
-            for node in dependencies:
-                if node.modifier:
-                    parallels.append([node])
-                else:
-                    parallels.append(cache[node])
-            out = set()
-            for parallel in itertools.product(*[p for p in parallels if p]):
-                out.update(parallel)
-            cache[depnode] = out
-
-        return cache
-
-    def clean_modifiers_in_cache(self, node_heads):
-        """Remove intermediary states (non modifier depnodes) in the internal
-        cache values"""
-
-        self._cache = CacheWrapper(self._get_modifiers_in_cache(node_heads))
-
-    def _build_depgraph(self, depnode):
-        """Recursively build the final list of DiGraph, and clean up unmodifier
-        nodes
-        @depnode: starting node
-        """
-
-        if depnode not in self._cache or \
-                not self._cache[depnode]:
-            # There is no dependency
-            graph = DiGraph()
-            graph.add_node(depnode)
-            return graph
-
-        # Recursion
-        dependencies = list(self._cache[depnode])
-
-        graphs = []
-        for sub_depnode in dependencies:
-            graphs.append(self._build_depgraph(sub_depnode))
-
-        # head(graphs[i]) == dependencies[i]
+    def as_graph(self):
+        """Generates a Digraph of dependencies"""
         graph = DiGraph()
-        graph.add_node(depnode)
-        for head in dependencies:
-            graph.add_uniq_edge(head, depnode)
-
-        for subgraphs in itertools.product(graphs):
-            for sourcegraph in subgraphs:
-                for node in sourcegraph.nodes():
-                    graph.add_node(node)
-                for edge in sourcegraph.edges():
-                    graph.add_uniq_edge(*edge)
-
-        # Update the running queue
+        for node_a, node_b in self.links:
+            if not node_b:
+                graph.add_node(node_a)
+            else:
+                graph.add_edge(node_a, node_b)
+        for parent, sons in self.pending.iteritems():
+            for son in sons:
+                graph.add_edge(parent, son)
         return graph
 
-    def as_graph(self, starting_nodes):
-        """Return a DiGraph corresponding to computed dependencies, with
-        @starting_nodes as leafs
-        @starting_nodes: set of DependencyNode instance
-        """
-
-        # Build subgraph for each starting_node
-        subgraphs = []
-        for starting_node in starting_nodes:
-            subgraphs.append(self._build_depgraph(starting_node))
-
-        # Merge subgraphs into a final DiGraph
-        graph = DiGraph()
-        for sourcegraph in subgraphs:
-            for node in sourcegraph.nodes():
-                graph.add_node(node)
-            for edge in sourcegraph.edges():
-                graph.add_uniq_edge(*edge)
-        return graph
+    @property
+    def graph(self):
+        """Returns a DiGraph instance representing the DependencyGraph"""
+        if self._graph is None:
+            self._graph = self.as_graph()
+        return self._graph
 
-    def filter_used_nodes(self, node_heads):
-        """Keep only depnodes which are in the path of @node_heads in the
-        internal cache
-        @node_heads: set of DependencyNode instance
+    def remove_pendings(self, nodes):
+        """Remove resolved @nodes"""
+        for node in nodes:
+            del self.pending[node]
+
+    def add_pendings(self, future_pending):
+        """Add @future_pending to the state"""
+        for node, depnodes in future_pending.iteritems():
+            if node not in self.pending:
+                self.pending[node] = depnodes
+            else:
+                self.pending[node].update(depnodes)
+
+    def link_element(self, element, line_nb):
+        """Link element to its dependencies
+        @element: the element to link
+        @line_nb: the element's line
         """
-        # Init
-        todo = set(node_heads)
-        used_nodes = set()
-
-        # Map
-        while todo:
-            node = todo.pop()
-            if node in used_nodes:
-                continue
-            used_nodes.add(node)
-            if not node in self._cache:
-                continue
-            for sub_node in self._cache[node]:
-                todo.add(sub_node)
 
-        # Remove unused elements
-        for key in list(self._cache.keys()):
-            if key not in used_nodes:
-                del self._cache[key]
-
-    def filter_unmodifier_loops(self, implicit, irdst):
-        """
-        Remove unmodifier node creating dependency loops over
-        pending elements in cache.
-        @implicit: boolean
-        @irdst: ExprId instance of IRDst register
+        depnode = DependencyNode(self.label, element, line_nb)
+        if not self.pending[element]:
+            # Create start node
+            self.links.add((depnode, None))
+        else:
+            # Link element to its known dependencies
+            for node_son in self.pending[element]:
+                self.links.add((depnode, node_son))
+
+    def link_dependencies(self, element, line_nb, dependencies,
+                          future_pending):
+        """Link unfollowed dependencies and create remaining pending elements.
+        @element: the element to link
+        @line_nb: the element's line
+        @dependencies: the element's dependencies
+        @future_pending: the future dependencies
         """
 
-        previous_dict = None
-        # Get pending nodes of last time the label was handled
-        for hist_dict in reversed(self.history):
-            if hist_dict.label == self.label:
-                previous_dict = hist_dict
-                break
-
-        if not previous_dict:
-            return
-
-        nostep_pending = [node.nostep_repr for node in self.pending]
-
-        to_remove = set()
-        for depnode in previous_dict.pending:
-            if (depnode.nostep_repr not in nostep_pending or
-                    implicit and depnode.element == irdst):
-                continue
-
-            to_remove.update(self._non_modifier_in_loop(depnode))
-
-            # Replace unused keys by previous ones
-            for key in to_remove:
-                if depnode.nostep_repr == key.nostep_repr:
-                    self._cache[depnode] = self._cache.get(key, set()).copy()
-                    self.pending.discard(key)
-                    self.pending.add(depnode)
-
-                    # Replace occurences of key to remove
-                    for dependencies in self._cache.itervalues():
-                        if key in dependencies:
-                            dependencies.remove(key)
-                            dependencies.add(depnode)
+        depnode = DependencyNode(self.label, element, line_nb)
 
-                if self._cache.has_key(key):
-                    del self._cache[key]
-
-    def _non_modifier_in_loop(self, depnode):
-        """
-        Walk from @depnode until a node with the same nostep_repr is
-        encountered.
-        Returns a set of unmodifier nodes met in the path if no modifier was
-        found.
-        Returns set() if there exist a modifier node on the path.
-        """
-        if not self.cache.has_key(depnode):
-            return set()
-        # Init
-        todo = set(self.cache[depnode])
-        unmodifier_nodes = []
-
-        # Map
-        while todo:
-            node = todo.pop()
-            if node in unmodifier_nodes:
+        # Update pending, add link to unfollowed nodes
+        for dependency in dependencies:
+            if not dependency.follow:
+                # Add non followed dependencies to the dependency graph
+                parent = DependencyNode(
+                    self.label, dependency.element, line_nb)
+                self.links.add((parent, depnode))
                 continue
-            if node.modifier:
-                return set()
-            unmodifier_nodes.append(node)
-            if not node in self._cache:
-                continue
-            if node.nostep_repr == depnode.nostep_repr:
-                unmodifier_nodes.append(node)
-                break
-
-            for sub_node in self._cache[node]:
-                todo.add(sub_node)
-
-        return unmodifier_nodes
+            # Create future pending between new dependency and the current
+            # element
+            future_pending.setdefault(dependency.element, set()).add(depnode)
 
 
-class DependencyResult(object):
+class DependencyResult(DependencyState):
 
     """Container and methods for DependencyGraph results"""
 
-    def __init__(self, ira, final_depdict, input_depnodes):
-        """Instance a DependencyResult
-        @ira: IRAnalysis instance
-        @final_depdict: DependencyDict instance
-        @input_depnodes: set of DependencyNode instance
-        """
-        # Store arguments
+    def __init__(self, state, ira):
+        self.label = state.label
+        self.inputs = state.inputs
+        self.history = state.history
+        self.pending = state.pending
+        self.line_nb = state.line_nb
+        self.links = state.links
         self._ira = ira
-        self._depdict = final_depdict
-        self._input_depnodes = input_depnodes
 
         # Init lazy elements
         self._graph = None
         self._has_loop = None
 
     @property
-    def graph(self):
-        """Returns a DiGraph instance representing the DependencyGraph"""
-        if self._graph is None:
-            self._graph = self._depdict.as_graph(self._input_depnodes)
-        return self._graph
-
-    @property
-    def history(self):
-        """List of depdict corresponding to the blocks encountered in the
-        analysis"""
-        return list(self._depdict.history) + [self._depdict]
-
-    @property
     def unresolved(self):
         """Set of nodes whose dependencies weren't found"""
-        return set(node.nostep_repr for node in self._depdict.pending
-                   if node.element != self._ira.IRDst)
+        return set(element for element in self.pending
+                   if element != self._ira.IRDst)
 
     @property
     def relevant_nodes(self):
-        """Set of nodes directly and indirectly influencing
-        @self.input_depnodes"""
+        """Set of nodes directly and indirectly influencing inputs"""
         output = set()
-        for depnodes in self._depdict.cache.values():
-            output.update(depnodes)
+        for node_a, node_b in self.links:
+            output.add(node_a)
+            if node_b is not None:
+                output.add(node_b)
         return output
 
     @property
     def relevant_labels(self):
-        """List of labels containing nodes influencing @self.input_depnodes.
-        The history order is preserved.
-        """
+        """List of labels containing nodes influencing inputs.
+        The history order is preserved."""
         # Get used labels
         used_labels = set(depnode.label for depnode in self.relevant_nodes)
 
         # Keep history order
         output = []
-        for label in [depdict.label for depdict in self.history]:
+        for label in self.history:
             if label in used_labels:
                 output.append(label)
 
         return output
 
     @property
-    def input(self):
-        """Set of DependencyGraph start nodes"""
-        return self._input_depnodes
-
-    @property
     def has_loop(self):
-        """True if current dictionary has a loop"""
+        """True iff there is at least one data dependencies cycle (regarding
+        the associated depgraph)"""
         if self._has_loop is None:
-            self._has_loop = (len(self.relevant_labels) !=
-                              len(set(self.relevant_labels)))
+            self._has_loop = self.graph.has_loop()
         return self._has_loop
 
+    def irblock_slice(self, irb):
+        """Slice of the dependency nodes on the irblock @irb
+        @irb: irbloc instance
+        """
+
+        assignblks = []
+        line2elements = {}
+        for depnode in self.relevant_nodes:
+            if depnode.label != irb.label:
+                continue
+            line2elements.setdefault(depnode.line_nb,
+                                     set()).add(depnode.element)
+
+        for line_nb, elements in sorted(line2elements.iteritems()):
+            assignblk = AssignBlock()
+            for element in elements:
+                if element in irb.irs[line_nb]:
+                    # constants, label, ... are not in destination
+                    assignblk[element] = irb.irs[line_nb][element]
+            assignblks.append(assignblk)
+
+        return irbloc(irb.label, assignblks)
+
     def emul(self, ctx=None, step=False):
         """Symbolic execution of relevant nodes according to the history
-        Return the values of input nodes' elements
-        @ctx: (optional) Initial context as dictionary
+        Return the values of inputs nodes' elements
+        @ctx: (optional) Initial context as dictionnary
         @step: (optional) Verbose execution
-
-        Warning: The emulation is not sound if the input nodes depend on loop
+        Warning: The emulation is not sound if the inputs nodes depend on loop
         variant.
         """
         # Init
         ctx_init = self._ira.arch.regs.regs_init
         if ctx is not None:
             ctx_init.update(ctx)
-        depnodes = self.relevant_nodes
-        affects = []
+        assignblks = []
 
         # Build a single affectation block according to history
         for label in self.relevant_labels[::-1]:
-            affected_lines = set(depnode.line_nb for depnode in depnodes
-                                 if depnode.label == label)
-            irs = self._ira.blocs[label].irs
-            for line_nb in sorted(affected_lines):
-                affects.append(irs[line_nb])
+            assignblks += self.irblock_slice(self._ira.blocs[label]).irs
 
         # Eval the block
         temp_label = asm_label("Temp")
         symb_exec = symbexec(self._ira, ctx_init)
-        symb_exec.emulbloc(irbloc(temp_label, affects), step=step)
+        symb_exec.emulbloc(irbloc(temp_label, assignblks), step=step)
 
         # Return only inputs values (others could be wrongs)
-        return {depnode.element: symb_exec.symbols[depnode.element]
-                for depnode in self.input}
+        return {element: symb_exec.symbols[element]
+                for element in self.inputs}
 
 
 class DependencyResultImplicit(DependencyResult):
@@ -575,57 +314,81 @@ class DependencyResultImplicit(DependencyResult):
     """Stand for a result of a DependencyGraph with implicit option
 
     Provide path constraints using the z3 solver"""
-    __slots__ = ["_ira", "_depdict", "_input_depnodes", "_graph",
-                 "_has_loop", "_solver"]
-
     # Z3 Solver instance
     _solver = None
 
+    unsat_expr = m2_expr.ExprAff(m2_expr.ExprInt(0, 1),
+                                 m2_expr.ExprInt(1, 1))
+
+    def _gen_path_constraints(self, translator, expr, expected):
+        """Generate path constraint from @expr. Handle special case with
+        generated labels
+        """
+        out = []
+        expected_is_label = expr_is_label(expected)
+        for consval in possible_values(expr):
+            if (expected_is_label and
+                    consval.value != expected):
+                continue
+            if (not expected_is_label and
+                    expr_is_label(consval.value)):
+                continue
+
+            conds = z3.And(*[translator.from_expr(cond.to_constraint())
+                             for cond in consval.constraints])
+            if expected != consval.value:
+                conds = z3.And(conds,
+                               translator.from_expr(
+                                   m2_expr.ExprAff(consval.value,
+                                                   expected)))
+            out.append(conds)
+
+        if out:
+            conds = z3.Or(*out)
+        else:
+            # Ex: expr: lblgen1, expected: 0x1234
+            # -> Avoid unconsistent solution lblgen1 = 0x1234
+            conds = translator.from_expr(self.unsat_expr)
+        return conds
+
     def emul(self, ctx=None, step=False):
         # Init
         ctx_init = self._ira.arch.regs.regs_init
         if ctx is not None:
             ctx_init.update(ctx)
-        depnodes = self.relevant_nodes
         solver = z3.Solver()
         symb_exec = symbexec(self._ira, ctx_init)
-        temp_label = asm_label("Temp")
-        history = self.relevant_labels[::-1]
+        history = self.history[::-1]
         history_size = len(history)
+        translator = Translator.to_language("z3")
+        size = self._ira.IRDst.size
 
         for hist_nb, label in enumerate(history):
-            # Build block with relevant lines only
-            affected_lines = set(depnode.line_nb for depnode in depnodes
-                                 if depnode.label == label)
-            irs = self._ira.blocs[label].irs
-            affects = []
-
-            for line_nb in sorted(affected_lines):
-                affects.append(irs[line_nb])
+            irb = self.irblock_slice(self._ira.blocs[label])
 
             # Emul the block and get back destination
-            dst = symb_exec.emulbloc(irbloc(temp_label, affects), step=step)
+            dst = symb_exec.emulbloc(irb, step=step)
 
             # Add constraint
             if hist_nb + 1 < history_size:
                 next_label = history[hist_nb + 1]
-                expected = symb_exec.eval_expr(m2_expr.ExprId(next_label, 32))
-                constraint = m2_expr.ExprAff(dst, expected)
-                solver.add(Translator.to_language("z3").from_expr(constraint))
-
+                expected = symb_exec.eval_expr(m2_expr.ExprId(next_label,
+                                                              size))
+                solver.add(
+                    self._gen_path_constraints(translator, dst, expected))
         # Save the solver
         self._solver = solver
 
         # Return only inputs values (others could be wrongs)
-        return {depnode.element: symb_exec.symbols[depnode.element]
-                for depnode in self.input}
+        return {element: symb_exec.eval_expr(element)
+                for element in self.inputs}
 
     @property
     def is_satisfiable(self):
         """Return True iff the solution path admits at least one solution
         PRE: 'emul'
         """
-        return self._solver.check().r > 0
+        return self._solver.check() == z3.sat
 
     @property
     def constraints(self):
@@ -644,24 +407,23 @@ class FollowExpr(object):
         self.follow = follow
         self.element = element
 
+    def __repr__(self):
+        return '%s(%r, %r)' % (self.__class__.__name__, self.follow, self.element)
+
     @staticmethod
-    def to_depnodes(follow_exprs, label, line, modifier, step):
+    def to_depnodes(follow_exprs, label, line):
         """Build a set of FollowExpr(DependencyNode) from the @follow_exprs set
         of FollowExpr
         @follow_exprs: set of FollowExpr
         @label: asm_label instance
         @line: integer
-        @modifier: boolean
-        @step: integer
         """
         dependencies = set()
         for follow_expr in follow_exprs:
             dependencies.add(FollowExpr(follow_expr.follow,
                                         DependencyNode(label,
                                                        follow_expr.element,
-                                                       line,
-                                                       step,
-                                                       modifier=modifier)))
+                                                       line)))
         return dependencies
 
     @staticmethod
@@ -686,9 +448,10 @@ class DependencyGraph(object):
     def __init__(self, ira, implicit=False, apply_simp=True, follow_mem=True,
                  follow_call=True):
         """Create a DependencyGraph linked to @ira
+        The IRA graph must have been computed
 
         @ira: IRAnalysis instance
-        @implicit: (optional) Imply implicit dependencies
+        @implicit: (optional) Track IRDst for each block in the resulting path
 
         Following arguments define filters used to generate dependencies
         @apply_simp: (optional) Apply expr_simp
@@ -698,8 +461,6 @@ class DependencyGraph(object):
         # Init
         self._ira = ira
         self._implicit = implicit
-        self._step_counter = itertools.count()
-        self._current_step = next(self._step_counter)
 
         # Create callback filters. The order is relevant.
         self._cb_follow = []
@@ -710,21 +471,6 @@ class DependencyGraph(object):
                                                                 follow_call))
         self._cb_follow.append(self._follow_nolabel)
 
-    @property
-    def step_counter(self):
-        "Iteration counter"
-        return self._step_counter
-
-    @property
-    def current_step(self):
-        "Current value of iteration counter"
-        return self._current_step
-
-    def inc_step(self):
-        "Increment and return the current step"
-        self._current_step = next(self._step_counter)
-        return self._current_step
-
     @staticmethod
     def _follow_simp_expr(exprs):
         """Simplify expression so avoid tracking useless elements,
@@ -748,10 +494,12 @@ class DependencyGraph(object):
             follow.add(expr)
         elif isinstance(expr, m2_expr.ExprInt):
             nofollow.add(expr)
+        elif isinstance(expr, m2_expr.ExprMem):
+            follow.add(expr)
         return expr
 
     @staticmethod
-    def follow_expr(expr, follow, nofollow, follow_mem=False, follow_call=False):
+    def follow_expr(expr, _, nofollow, follow_mem=False, follow_call=False):
         """Returns True if we must visit sub expressions.
         @expr: expression to browse
         @follow: set of nodes to follow
@@ -785,7 +533,7 @@ class DependencyGraph(object):
         """Do not follow labels"""
         follow = set()
         for expr in exprs:
-            if not expr_is_label(expr):
+            if not expr_is_int_or_label(expr):
                 follow.add(expr)
 
         return follow, set()
@@ -804,167 +552,38 @@ class DependencyGraph(object):
         out.update(set(FollowExpr(False, expr) for expr in nofollow))
         return out
 
-    def _get_irs(self, label):
-        "Return the irs associated to @label"
-        return self._ira.blocs[label].irs
-
-    def _get_affblock(self, depnode):
-        """Return the list of ExprAff associtiated to @depnode.
-        LINE_NB must be > 0"""
-        return self._get_irs(depnode.label)[depnode.line_nb - 1]
-
-    def _direct_depnode_dependencies(self, depnode):
-        """Compute and return the dependencies involved by @depnode,
-        over the instruction @depnode.line_,.
-        Return a set of FollowExpr"""
-
-        if isinstance(depnode.element, m2_expr.ExprInt):
-            # A constant does not have any dependency
-            output = set()
-
-        elif depnode.line_nb == 0:
-            # Beginning of a block, inter-block resolving is not done here
-            output = set()
-
-        else:
-            # Intra-block resolving
-            # Get dependencies
-            read = set()
-            modifier = False
-
-            assignblk = self._get_affblock(depnode)
-            if depnode.element in assignblk:
-                elements = self._follow_apply_cb(assignblk[depnode.element])
-                read.update(elements)
-                modifier = True
-
-            # If it's not a modifier affblock, reinject current element
-            if not modifier:
-                read = set([FollowExpr(True, depnode.element)])
-
-            # Build output
-            output = FollowExpr.to_depnodes(read, depnode.label,
-                                            depnode.line_nb - 1, modifier,
-                                            self.current_step)
-        return output
-
-    def _resolve_intrablock_dep(self, depdict):
-        """Resolve the dependencies of nodes in @depdict.pending inside
-        @depdict.label until a fixed point is reached.
-        @depdict: DependencyDict to update"""
-
-        # Prepare the work list
-        todo = set(depdict.pending)
-
-        # Pending states will be handled
-        depdict.pending.clear()
-
-        while todo:
-            depnode = todo.pop()
-            if isinstance(depnode.element, m2_expr.ExprInt):
-                # A constant does not have any dependency
+    def _track_exprs(self, state, assignblk, line_nb):
+        """Track pending expression in an assignblock"""
+        future_pending = {}
+        node_resolved = set()
+        for dst, src in assignblk.iteritems():
+            # Only track pending
+            if dst not in state.pending:
                 continue
-
-            if depdict.is_head(depnode):
-                depdict.pending.add(depnode)
-                # A head cannot have dependencies inside the current IRblock
+            # Track IRDst in implicit mode only
+            if dst == self._ira.IRDst and not self._implicit:
                 continue
+            assert dst not in node_resolved
+            node_resolved.add(dst)
+            dependencies = self._follow_apply_cb(src)
 
-            # Find dependency of the current depnode
-            sub_depnodes = self._direct_depnode_dependencies(depnode)
-            depdict.cache[depnode] = FollowExpr.extract_depnodes(sub_depnodes)
+            state.link_element(dst, line_nb)
+            state.link_dependencies(dst, line_nb,
+                                    dependencies, future_pending)
 
-            # Add to the worklist its dependencies
-            todo.update(FollowExpr.extract_depnodes(sub_depnodes,
-                                                    only_follow=True))
+        # Update pending nodes
+        state.remove_pendings(node_resolved)
+        state.add_pendings(future_pending)
 
-        # Pending states will be overriden in cache
-        for depnode in depdict.pending:
-            try:
-                del depdict.cache[depnode]
-            except KeyError:
-                continue
+    def _compute_intrablock(self, state):
+        """Follow dependencies tracked in @state in the current irbloc
+        @state: instance of DependencyState"""
 
-    def _get_previousblocks(self, label):
-        """Return an iterator on predecessors blocks of @label, with their
-        lengths"""
-        preds = self._ira.graph.predecessors_iter(label)
-        for pred_label in preds:
-            length = len(self._get_irs(pred_label))
-            yield (pred_label, length)
-
-    def _compute_interblock_dep(self, depnodes, heads):
-        """Create a DependencyDict from @depnodes, and propagate
-        DependencyDicts through all blocs
-        """
-        # Create a DependencyDict which will only contain our depnodes
-        current_depdict = DependencyDict(list(depnodes)[0].label, [])
-        current_depdict.pending.update(depnodes)
+        irb = self._ira.blocs[state.label]
+        line_nb = len(irb.irs) if state.line_nb is None else state.line_nb
 
-        # Init the work list
-        done = {}
-        todo = deque([current_depdict])
-
-        while todo:
-            depdict = todo.popleft()
-
-            # Update the dependencydict until fixed point is reached
-            self._resolve_intrablock_dep(depdict)
-            self.inc_step()
-
-            # Clean irrelevant path
-            depdict.filter_unmodifier_loops(self._implicit, self._ira.IRDst)
-
-            # Avoid infinite loops
-            label = depdict.label
-            if depdict in done.get(label, []):
-                continue
-            done.setdefault(label, []).append(depdict)
-
-            # No more dependencies
-            if len(depdict.pending) == 0:
-                yield depdict.copy()
-                continue
-
-            # Has a predecessor ?
-            is_final = True
-
-            # Propagate the DependencyDict to all parents
-            for label, irb_len in self._get_previousblocks(depdict.label):
-                is_final = False
-
-                # Duplicate the DependencyDict
-                new_depdict = depdict.extend(label)
-
-                if self._implicit:
-                    # Implicit dependencies: IRDst will be link with heads
-                    implicit_depnode = DependencyNode(label, self._ira.IRDst,
-                                                      irb_len,
-                                                      self.current_step,
-                                                      modifier=False)
-
-                # Create links between DependencyDict
-                for depnode_head in depdict.pending:
-                    # Follow the head element in the parent
-                    new_depnode = DependencyNode(label, depnode_head.element,
-                                                 irb_len,
-                                                 self.current_step)
-                    # The new node has to be analysed
-                    new_depdict.cache[depnode_head] = set([new_depnode])
-                    new_depdict.pending.add(new_depnode)
-
-                    # Handle implicit dependencies
-                    if self._implicit:
-                        new_depdict.cache[depnode_head].add(implicit_depnode)
-                        new_depdict.pending.add(implicit_depnode)
-
-                # Manage the new element
-                todo.append(new_depdict)
-
-            # Return the node if it's a final one, ie. it's a head (in graph
-            # or defined by caller)
-            if is_final or depdict.label in heads:
-                yield depdict.copy()
+        for cur_line_nb, assignblk in reversed(list(enumerate(irb.irs[:line_nb]))):
+            self._track_exprs(state, assignblk, cur_line_nb)
 
     def get(self, label, elements, line_nb, heads):
         """Compute the dependencies of @elements at line number @line_nb in
@@ -976,32 +595,34 @@ class DependencyGraph(object):
         @heads: set of asm_label instances
         Return an iterator on DiGraph(DependencyNode)
         """
-
         # Init the algorithm
-        input_depnodes = set()
-        for element in elements:
-            input_depnodes.add(DependencyNode(label, element, line_nb,
-                                              self.current_step))
+        pending = {element: set() for element in elements}
+        state = DependencyState(label, elements, pending, line_nb)
+        todo = set([state])
+        done = set()
+        dpResultcls = DependencyResultImplicit if self._implicit else DependencyResult
 
-        # Compute final depdicts
-        depdicts = self._compute_interblock_dep(input_depnodes, heads)
-
-        # Unify solutions
-        unified = []
-        cls_res = DependencyResultImplicit if self._implicit else \
-            DependencyResult
-
-        for final_depdict in depdicts:
-            # Keep only relevant nodes
-            final_depdict.clean_modifiers_in_cache(input_depnodes)
-            final_depdict.filter_used_nodes(input_depnodes)
-
-            # Remove duplicate solutions
-            if final_depdict not in unified:
-                unified.append(final_depdict)
-
-                # Return solutions as DiGraph
-                yield cls_res(self._ira, final_depdict, input_depnodes)
+        while todo:
+            state = todo.pop()
+            self._compute_intrablock(state)
+            done_state = state.get_done_state()
+            if done_state in done:
+                continue
+            done.add(done_state)
+            if (not state.pending or
+                    state.label in heads or
+                    not self._ira.graph.predecessors(state.label)):
+                yield dpResultcls(state, self._ira)
+                if not state.pending:
+                    continue
+
+            if self._implicit:
+                # Force IRDst to be tracked, except in the input block
+                state.pending[self._ira.IRDst] = set()
+
+            # Propagate state to parents
+            for pred in self._ira.graph.predecessors_iter(state.label):
+                todo.add(state.extend(pred))
 
     def get_from_depnodes(self, depnodes, heads):
         """Alias for the get() method. Use the attributes of @depnodes as
@@ -1013,12 +634,3 @@ class DependencyGraph(object):
         lead = list(depnodes)[0]
         elements = set(depnode.element for depnode in depnodes)
         return self.get(lead.label, elements, lead.line_nb, heads)
-
-    def get_from_end(self, label, elements, heads):
-        """Alias for the get() method. Consider that the dependency is asked at
-        the end of the block named @label.
-        @label: asm_label instance
-        @elements: set of Expr instances
-        @heads: set of asm_label instances
-        """
-        return self.get(label, elements, len(self._get_irs(label)), heads)
diff --git a/miasm2/arch/x86/sem.py b/miasm2/arch/x86/sem.py
index cd456f7b..f66570a7 100644
--- a/miasm2/arch/x86/sem.py
+++ b/miasm2/arch/x86/sem.py
@@ -597,22 +597,6 @@ def shrd(ir, instr, a, b, c):
     return _shift_tpl(">>>", ir, instr, a, b, c, "<<<")
 
 
-def sal(ir, instr, a, b):
-    e = []
-    shifter = get_shift(a, b)
-    c = m2_expr.ExprOp('a<<', a, shifter)
-    new_cf = (a >> (m2_expr.ExprInt_from(a, a.size) - shifter))[:1]
-    e.append(m2_expr.ExprAff(cf, m2_expr.ExprCond(shifter,
-                                                  new_cf,
-                                                  cf)
-                             )
-             )
-    e += update_flag_znp(c)
-    e.append(m2_expr.ExprAff(of, c.msb() ^ new_cf))
-    e.append(m2_expr.ExprAff(a, c))
-    return e, []
-
-
 def shl(ir, instr, a, b):
     return _shift_tpl("<<", ir, instr, a, b, left=True)
 
@@ -1080,51 +1064,45 @@ def popfw(ir, instr):
         m2_expr.ExprAff(mRSP[instr.mode], mRSP[instr.mode] + m2_expr.ExprInt(2, mRSP[instr.mode].size)))
     return e, []
 
+pa_regs = [
+    mRAX, mRCX,
+    mRDX, mRBX,
+    mRSP, mRBP,
+    mRSI, mRDI
+]
 
-def pushad(ir, instr):
+def pusha_gen(ir, instr, size):
     e = []
-    s = instr.v_opmode()
-    opmode, admode = s, instr.v_admode()
-    if not s in [16, 32, 64]:
-        raise ValueError('bad size stacker!')
-
-    regs = [
-        mRAX[instr.mode][:s], mRCX[instr.mode][
-            :s], mRDX[instr.mode][:s], mRBX[instr.mode][:s],
-        mRSP[instr.mode][:s], mRBP[instr.mode][:s],
-        mRSI[instr.mode][:s], mRDI[instr.mode][:s]]
-
-    for i in xrange(len(regs)):
-        c = mRSP[instr.mode][:s] + m2_expr.ExprInt(-(s / 8) * (i + 1), s)
-        e.append(m2_expr.ExprAff(m2_expr.ExprMem(c, s), regs[i]))
-    e.append(m2_expr.ExprAff(mRSP[instr.mode][:s], c))
+    for i, reg in enumerate(pa_regs):
+        stk_ptr = mRSP[instr.mode] + m2_expr.ExprInt(-(reg[size].size / 8) * (i + 1), instr.mode)
+        e.append(m2_expr.ExprAff(m2_expr.ExprMem(stk_ptr, reg[size].size), reg[size]))
+    e.append(m2_expr.ExprAff(mRSP[instr.mode], stk_ptr))
     return e, []
 
+def pusha(ir, instr):
+    return pusha_gen(ir, instr, 16)
 
-def popad(ir, instr):
+def pushad(ir, instr):
+    return pusha_gen(ir, instr, 32)
+
+def popa_gen(ir, instr, size):
     e = []
-    s = instr.v_opmode()
-    opmode, admode = s, instr.v_admode()
-    if not s in [16, 32, 64]:
-        raise ValueError('bad size stacker!')
-    regs = [
-        mRAX[instr.mode][:s], mRCX[instr.mode][
-            :s], mRDX[instr.mode][:s], mRBX[instr.mode][:s],
-        mRSP[instr.mode][:s], mRBP[instr.mode][:s],
-        mRSI[instr.mode][:s], mRDI[instr.mode][:s]]
-    myesp = mRSP[instr.mode][:s]
-    regs.reverse()
-    for i in xrange(len(regs)):
-        if regs[i] == myesp:
+    for i, reg in enumerate(reversed(pa_regs)):
+        if reg == mRSP:
             continue
-        c = myesp + m2_expr.ExprInt_from(myesp, ((s / 8) * i))
-        e.append(m2_expr.ExprAff(regs[i], m2_expr.ExprMem(c, s)))
+        stk_ptr = mRSP[instr.mode] + m2_expr.ExprInt((reg[size].size / 8) * i, instr.mode)
+        e.append(m2_expr.ExprAff(reg[size], m2_expr.ExprMem(stk_ptr, instr.mode)))
 
-    c = myesp + m2_expr.ExprInt_from(myesp, ((s / 8) * (i + 1)))
-    e.append(m2_expr.ExprAff(myesp, c))
+    stk_ptr = mRSP[instr.mode] + m2_expr.ExprInt((instr.mode / 8) * (i + 1), instr.mode)
+    e.append(m2_expr.ExprAff(mRSP[instr.mode], stk_ptr))
 
     return e, []
 
+def popa(ir, instr):
+    return popa_gen(ir, instr, 16)
+
+def popad(ir, instr):
+    return popa_gen(ir, instr, 32)
 
 def call(ir, instr, dst):
     e = []
@@ -3623,7 +3601,7 @@ def ps_rl_ll(ir, instr, a, b, op, size):
     e_do = []
     slices = []
     for i in xrange(0, a.size, size):
-        slices.append((m2_expr.ExprOp(op,a[i:i + size], count[:size]),
+        slices.append((m2_expr.ExprOp(op, a[i:i + size], count[:size]),
                        i, i + size))
     e.append(m2_expr.ExprAff(a[0:a.size], m2_expr.ExprCompose(slices)))
     e_do.append(m2_expr.ExprAff(ir.IRDst, lbl_next))
@@ -3995,7 +3973,7 @@ mnemo_func = {'mov': mov,
               'rcr': rcr,
               'sar': sar,
               'shr': shr,
-              'sal': sal,
+              'sal': shl,
               'shl': shl,
               'shld': shld,
               'cmc': cmc,
@@ -4059,10 +4037,10 @@ mnemo_func = {'mov': mov,
               'popfd': popfd,
               'popfq': popfd,
               'popfw': popfw,
+              'pusha': pusha,
               'pushad': pushad,
-              'pusha': pushad,
               'popad': popad,
-              'popa': popad,
+              'popa': popa,
               'call': call,
               'ret': ret,
               'retf': retf,
diff --git a/miasm2/core/asmbloc.py b/miasm2/core/asmbloc.py
index e4f1db68..54cd51cf 100644
--- a/miasm2/core/asmbloc.py
+++ b/miasm2/core/asmbloc.py
@@ -410,183 +410,6 @@ class asm_symbol_pool:
         return label
 
 
-def dis_bloc(mnemo, pool_bin, label, offset, job_done, symbol_pool,
-             dont_dis=None, split_dis=None, follow_call=False,
-             dontdis_retcall=False, lines_wd=None, dis_bloc_callback=None,
-             dont_dis_nulstart_bloc=False, attrib=None):
-    # pool_bin.offset = offset
-    if dont_dis is None:
-        dont_dis = []
-    if split_dis is None:
-        split_dis = []
-    if attrib is None:
-        attrib = {}
-    lines_cpt = 0
-    in_delayslot = False
-    delayslot_count = mnemo.delayslot
-    offsets_to_dis = set()
-    add_next_offset = False
-    cur_block = asm_bloc(label)
-    log_asmbloc.debug("dis at %X", int(offset))
-    while not in_delayslot or delayslot_count > 0:
-        if in_delayslot:
-            delayslot_count -= 1
-
-        if offset in dont_dis:
-            if not cur_block.lines:
-                job_done.add(offset)
-                # Block is empty -> bad block
-                cur_block = asm_block_bad(label, errno=2)
-            else:
-                # Block is not empty, stop the desassembly pass and add a
-                # constraint to the next block
-                cur_block.add_cst(offset, asm_constraint.c_next, symbol_pool)
-            break
-
-        if lines_cpt > 0 and offset in split_dis:
-            cur_block.add_cst(offset, asm_constraint.c_next, symbol_pool)
-            offsets_to_dis.add(offset)
-            break
-
-        lines_cpt += 1
-        if lines_wd is not None and lines_cpt > lines_wd:
-            # log_asmbloc.warning( "lines watchdog reached at %X"%int(offset))
-            break
-
-        if offset in job_done:
-            cur_block.add_cst(offset, asm_constraint.c_next, symbol_pool)
-            break
-
-        off_i = offset
-        try:
-            instr = mnemo.dis(pool_bin, attrib, offset)
-        except (Disasm_Exception, IOError), e:
-            log_asmbloc.warning(e)
-            instr = None
-
-        if instr is None:
-            log_asmbloc.warning("cannot disasm at %X", int(off_i))
-            if not cur_block.lines:
-                job_done.add(offset)
-                # Block is empty -> bad block
-                cur_block = asm_block_bad(label, errno=0)
-            else:
-                # Block is not empty, stop the desassembly pass and add a
-                # constraint to the next block
-                cur_block.add_cst(off_i, asm_constraint.c_next, symbol_pool)
-            break
-
-        # XXX TODO nul start block option
-        if dont_dis_nulstart_bloc and instr.b.count('\x00') == instr.l:
-            log_asmbloc.warning("reach nul instr at %X", int(off_i))
-            if not cur_block.lines:
-                # Block is empty -> bad block
-                cur_block = asm_block_bad(label, errno=1)
-            else:
-                # Block is not empty, stop the desassembly pass and add a
-                # constraint to the next block
-                cur_block.add_cst(off_i, asm_constraint.c_next, symbol_pool)
-            break
-
-        # special case: flow graph modificator in delayslot
-        if in_delayslot and instr and (instr.splitflow() or instr.breakflow()):
-            add_next_offset = True
-            break
-
-        job_done.add(offset)
-        log_asmbloc.debug("dis at %X", int(offset))
-
-        offset += instr.l
-        log_asmbloc.debug(instr)
-        log_asmbloc.debug(instr.args)
-
-        cur_block.addline(instr)
-        if not instr.breakflow():
-            continue
-        # test split
-        if instr.splitflow() and not (instr.is_subcall() and dontdis_retcall):
-            add_next_offset = True
-            # cur_bloc.add_cst(n, asm_constraint.c_next, symbol_pool)
-            pass
-        if instr.dstflow():
-            instr.dstflow2label(symbol_pool)
-            dst = instr.getdstflow(symbol_pool)
-            dstn = []
-            for d in dst:
-                if isinstance(d, m2_expr.ExprId) and \
-                        isinstance(d.name, asm_label):
-                    dstn.append(d.name)
-            dst = dstn
-            if (not instr.is_subcall()) or follow_call:
-                cur_block.bto.update(
-                    [asm_constraint(x, asm_constraint.c_to) for x in dst])
-
-        # get in delayslot mode
-        in_delayslot = True
-        delayslot_count = instr.delayslot
-
-    for c in cur_block.bto:
-        offsets_to_dis.add(c.label.offset)
-
-    if add_next_offset:
-        cur_block.add_cst(offset, asm_constraint.c_next, symbol_pool)
-        offsets_to_dis.add(offset)
-
-    # Fix multiple constraints
-    cur_block.fix_constraints()
-
-    if dis_bloc_callback is not None:
-        dis_bloc_callback(mn=mnemo, attrib=attrib, pool_bin=pool_bin,
-                          cur_bloc=cur_block, offsets_to_dis=offsets_to_dis,
-                          symbol_pool=symbol_pool)
-    # print 'dst', [hex(x) for x in offsets_to_dis]
-    return cur_block, offsets_to_dis
-
-
-def dis_bloc_all(mnemo, pool_bin, offset, job_done, symbol_pool, dont_dis=None,
-                 split_dis=None, follow_call=False, dontdis_retcall=False,
-                 blocs_wd=None, lines_wd=None, blocs=None,
-                 dis_bloc_callback=None, dont_dis_nulstart_bloc=False,
-                 attrib=None):
-    log_asmbloc.info("dis bloc all")
-    if dont_dis is None:
-        dont_dis = []
-    if split_dis is None:
-        split_dis = []
-    if attrib is None:
-        attrib = {}
-    if blocs is None:
-        blocs = AsmCFG()
-    todo = [offset]
-
-    bloc_cpt = 0
-    while len(todo):
-        bloc_cpt += 1
-        if blocs_wd is not None and bloc_cpt > blocs_wd:
-            log_asmbloc.debug("blocs watchdog reached at %X", int(offset))
-            break
-
-        n = int(todo.pop(0))
-        if n is None:
-            continue
-        if n in job_done:
-            continue
-        label = symbol_pool.getby_offset_create(n)
-        cur_block, nexts = dis_bloc(mnemo, pool_bin, label, n, job_done,
-                                    symbol_pool, dont_dis, split_dis,
-                                    follow_call, dontdis_retcall,
-                                    dis_bloc_callback=dis_bloc_callback,
-                                    lines_wd=lines_wd,
-                                    dont_dis_nulstart_bloc=dont_dis_nulstart_bloc,
-                                    attrib=attrib)
-        todo += nexts
-        blocs.add_node(cur_block)
-
-    blocs.apply_splitting(symbol_pool, dis_block_callback=dis_bloc_callback,
-                          mn=mnemo, attrib=attrib, pool_bin=pool_bin)
-    return blocs
-
-
 class AsmCFG(DiGraph):
 
     """Directed graph standing for a ASM Control Flow Graph with:
@@ -946,6 +769,14 @@ class AsmCFG(DiGraph):
                     log_asmbloc.error("Cannot split %x!!", off)
                     continue
 
+                # Remove pending from cur_block
+                # Links from new_b will be generated in rebuild_edges
+                for dst in new_b.bto:
+                    if dst.label not in self.pendings:
+                        continue
+                    self.pendings[dst.label] = set(pending for pending in self.pendings[dst.label]
+                                                   if pending.waiter != cur_block)
+
                 # The new block destinations may need to be disassembled
                 if dis_block_callback:
                     offsets_to_dis = set(constraint.label.offset
@@ -1425,11 +1256,50 @@ def asm_resolve_final(mnemo, blocks, symbol_pool, dst_interval=None):
 
 class disasmEngine(object):
 
-    def __init__(self, arch, attrib, bs=None, **kwargs):
+    """Disassembly engine, taking care of disassembler options and mutli-block
+    strategy.
+
+    Engine options:
+
+    + Object supporting membership test (offset in ..)
+     - dont_dis: stop the current disassembly branch if reached
+     - split_dis: force a basic block end if reached,
+                  with a next constraint on its successor
+
+    + On/Off
+     - follow_call: recursively disassemble CALL destinations
+     - dontdis_retcall: stop on CALL return addresses
+     - dont_dis_nulstart_bloc: stop if a block begin with a few \x00
+
+    + Number
+     - lines_wd: maximum block's size (in number of instruction)
+     - blocs_wd: maximum number of distinct disassembled block
+
+    + callback(arch, attrib, pool_bin, cur_bloc, offsets_to_dis,
+               symbol_pool)
+     - dis_bloc_callback: callback after each new disassembled block
+
+    The engine also tracks already handled block, for performance and to avoid
+    infinite cycling.
+    Addresses of disassembled block is in the attribute `job_done`.
+    To force a new disassembly, the targeted offset must first be removed from
+    this structure.
+    """
+
+    def __init__(self, arch, attrib, bin_stream, **kwargs):
+        """Instanciate a new disassembly engine
+        @arch: targeted architecture
+        @attrib: architecture attribute
+        @bin_stream: bytes source
+        @kwargs: (optional) custom options
+        """
         self.arch = arch
         self.attrib = attrib
-        self.bs = bs
+        self.bin_stream = bin_stream
         self.symbol_pool = asm_symbol_pool()
+        self.job_done = set()
+
+        # Setup options
         self.dont_dis = []
         self.split_dis = []
         self.follow_call = False
@@ -1438,33 +1308,179 @@ class disasmEngine(object):
         self.blocs_wd = None
         self.dis_bloc_callback = None
         self.dont_dis_nulstart_bloc = False
-        self.job_done = set()
+
+        # Override options if needed
         self.__dict__.update(kwargs)
 
-    def dis_bloc(self, offset):
+    def _dis_bloc(self, offset):
+        """Disassemble the block at offset @offset
+        Return the created asm_bloc and future offsets to disassemble
+        """
+
+        lines_cpt = 0
+        in_delayslot = False
+        delayslot_count = self.arch.delayslot
+        offsets_to_dis = set()
+        add_next_offset = False
         label = self.symbol_pool.getby_offset_create(offset)
-        current_block, _ = dis_bloc(self.arch, self.bs, label, offset,
-                                    self.job_done, self.symbol_pool,
-                                    dont_dis=self.dont_dis,
-                                    split_dis=self.split_dis,
-                                    follow_call=self.follow_call,
-                                    dontdis_retcall=self.dontdis_retcall,
-                                    lines_wd=self.lines_wd,
-                                    dis_bloc_callback=self.dis_bloc_callback,
-                                    dont_dis_nulstart_bloc=self.dont_dis_nulstart_bloc,
-                                    attrib=self.attrib)
+        cur_block = asm_bloc(label)
+        log_asmbloc.debug("dis at %X", int(offset))
+        while not in_delayslot or delayslot_count > 0:
+            if in_delayslot:
+                delayslot_count -= 1
+
+            if offset in self.dont_dis:
+                if not cur_block.lines:
+                    self.job_done.add(offset)
+                    # Block is empty -> bad block
+                    cur_block = asm_block_bad(label, errno=2)
+                else:
+                    # Block is not empty, stop the desassembly pass and add a
+                    # constraint to the next block
+                    cur_block.add_cst(offset, asm_constraint.c_next,
+                                      self.symbol_pool)
+                break
+
+            if lines_cpt > 0 and offset in self.split_dis:
+                cur_block.add_cst(offset, asm_constraint.c_next,
+                                  self.symbol_pool)
+                offsets_to_dis.add(offset)
+                break
+
+            lines_cpt += 1
+            if self.lines_wd is not None and lines_cpt > self.lines_wd:
+                log_asmbloc.debug("lines watchdog reached at %X", int(offset))
+                break
+
+            if offset in self.job_done:
+                cur_block.add_cst(offset, asm_constraint.c_next,
+                                  self.symbol_pool)
+                break
+
+            off_i = offset
+            try:
+                instr = self.arch.dis(self.bin_stream, self.attrib, offset)
+            except (Disasm_Exception, IOError), e:
+                log_asmbloc.warning(e)
+                instr = None
+
+            if instr is None:
+                log_asmbloc.warning("cannot disasm at %X", int(off_i))
+                if not cur_block.lines:
+                    self.job_done.add(offset)
+                    # Block is empty -> bad block
+                    cur_block = asm_block_bad(label, errno=0)
+                else:
+                    # Block is not empty, stop the desassembly pass and add a
+                    # constraint to the next block
+                    cur_block.add_cst(off_i, asm_constraint.c_next,
+                                      self.symbol_pool)
+                break
+
+            # XXX TODO nul start block option
+            if self.dont_dis_nulstart_bloc and instr.b.count('\x00') == instr.l:
+                log_asmbloc.warning("reach nul instr at %X", int(off_i))
+                if not cur_block.lines:
+                    # Block is empty -> bad block
+                    cur_block = asm_block_bad(label, errno=1)
+                else:
+                    # Block is not empty, stop the desassembly pass and add a
+                    # constraint to the next block
+                    cur_block.add_cst(off_i, asm_constraint.c_next,
+                                      self.symbol_pool)
+                break
+
+            # special case: flow graph modificator in delayslot
+            if in_delayslot and instr and (instr.splitflow() or instr.breakflow()):
+                add_next_offset = True
+                break
+
+            self.job_done.add(offset)
+            log_asmbloc.debug("dis at %X", int(offset))
+
+            offset += instr.l
+            log_asmbloc.debug(instr)
+            log_asmbloc.debug(instr.args)
+
+            cur_block.addline(instr)
+            if not instr.breakflow():
+                continue
+            # test split
+            if instr.splitflow() and not (instr.is_subcall() and self.dontdis_retcall):
+                add_next_offset = True
+                pass
+            if instr.dstflow():
+                instr.dstflow2label(self.symbol_pool)
+                dst = instr.getdstflow(self.symbol_pool)
+                dstn = []
+                for d in dst:
+                    if isinstance(d, m2_expr.ExprId) and \
+                            isinstance(d.name, asm_label):
+                        dstn.append(d.name)
+                dst = dstn
+                if (not instr.is_subcall()) or self.follow_call:
+                    cur_block.bto.update(
+                        [asm_constraint(x, asm_constraint.c_to) for x in dst])
+
+            # get in delayslot mode
+            in_delayslot = True
+            delayslot_count = instr.delayslot
+
+        for c in cur_block.bto:
+            offsets_to_dis.add(c.label.offset)
+
+        if add_next_offset:
+            cur_block.add_cst(offset, asm_constraint.c_next, self.symbol_pool)
+            offsets_to_dis.add(offset)
+
+        # Fix multiple constraints
+        cur_block.fix_constraints()
+
+        if self.dis_bloc_callback is not None:
+            self.dis_bloc_callback(mn=self.arch, attrib=self.attrib,
+                                   pool_bin=self.bin_stream, cur_bloc=cur_block,
+                                   offsets_to_dis=offsets_to_dis,
+                                   symbol_pool=self.symbol_pool)
+        return cur_block, offsets_to_dis
+
+    def dis_bloc(self, offset):
+        """Disassemble the block at offset @offset and return the created
+        asm_bloc
+        @offset: targeted offset to disassemble
+        """
+        current_block, _ = self._dis_bloc(offset)
         return current_block
 
     def dis_multibloc(self, offset, blocs=None):
-        blocs = dis_bloc_all(self.arch, self.bs, offset, self.job_done,
-                             self.symbol_pool,
-                             dont_dis=self.dont_dis, split_dis=self.split_dis,
-                             follow_call=self.follow_call,
-                             dontdis_retcall=self.dontdis_retcall,
-                             blocs_wd=self.blocs_wd,
-                             lines_wd=self.lines_wd,
-                             blocs=blocs,
-                             dis_bloc_callback=self.dis_bloc_callback,
-                             dont_dis_nulstart_bloc=self.dont_dis_nulstart_bloc,
-                             attrib=self.attrib)
+        """Disassemble every block reachable from @offset regarding
+        specific disasmEngine conditions
+        Return an AsmCFG instance containing disassembled blocks
+        @offset: starting offset
+        @blocs: (optional) AsmCFG instance of already disassembled blocks to
+                merge with
+        """
+        log_asmbloc.info("dis bloc all")
+        if blocs is None:
+            blocs = AsmCFG()
+        todo = [offset]
+
+        bloc_cpt = 0
+        while len(todo):
+            bloc_cpt += 1
+            if self.blocs_wd is not None and bloc_cpt > self.blocs_wd:
+                log_asmbloc.debug("blocs watchdog reached at %X", int(offset))
+                break
+
+            target_offset = int(todo.pop(0))
+            if (target_offset is None or
+                    target_offset in self.job_done):
+                continue
+            cur_block, nexts = self._dis_bloc(target_offset)
+            todo += nexts
+            blocs.add_node(cur_block)
+
+        blocs.apply_splitting(self.symbol_pool,
+                              dis_block_callback=self.dis_bloc_callback,
+                              mn=self.arch, attrib=self.attrib,
+                              pool_bin=self.bin_stream)
         return blocs
diff --git a/miasm2/core/bin_stream.py b/miasm2/core/bin_stream.py
index cfcdf8a5..67a67de8 100644
--- a/miasm2/core/bin_stream.py
+++ b/miasm2/core/bin_stream.py
@@ -15,7 +15,6 @@
 # with this program; if not, write to the Free Software Foundation, Inc.,
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 #
-import math
 
 
 class bin_stream(object):
@@ -78,7 +77,9 @@ class bin_stream(object):
         # Get initial bytes
         if n > self.getlen() * 8:
             raise IOError('not enough bits %r %r' % (n, len(self.bin) * 8))
-        temp = self.getbytes(start / 8, int(math.ceil(n / 8.)))
+        byte_start = start / 8
+        byte_stop = (start + n + 7) / 8
+        temp = self.getbytes(byte_start, byte_stop - byte_start)
         if not temp:
             raise IOError('cannot get bytes')
 
diff --git a/miasm2/core/graph.py b/miasm2/core/graph.py
index c98bf8a9..d97ca8be 100644
--- a/miasm2/core/graph.py
+++ b/miasm2/core/graph.py
@@ -478,6 +478,34 @@ class DiGraph(object):
         """Performs a depth first search on the reversed graph from @head"""
         return self._walk_generic_first(head, -1, self.predecessors_iter)
 
+    def has_loop(self):
+        """Return True if the graph contains at least a cycle"""
+        todo = list(self.nodes())
+        # tested nodes
+        done = set()
+        # current DFS nodes
+        current = set()
+        while todo:
+            node = todo.pop()
+            if node in done:
+                continue
+
+            if node in current:
+                # DFS branch end
+                for succ in self.successors_iter(node):
+                    if succ in current:
+                        return True
+                # A node cannot be in current AND in done
+                current.remove(node)
+                done.add(node)
+            else:
+                # Launch DFS from node
+                todo.append(node)
+                current.add(node)
+                todo += self.successors(node)
+
+        return False
+
     def compute_natural_loops(self, head):
         """
         Computes all natural loops in the graph.
diff --git a/miasm2/expression/expression_helper.py b/miasm2/expression/expression_helper.py
index 7c398105..44b4b5af 100644
--- a/miasm2/expression/expression_helper.py
+++ b/miasm2/expression/expression_helper.py
@@ -550,3 +550,127 @@ def expr_cmps(arg1, arg2):
     * 0 otherwise.
     """
     return _expr_cmp_gen(arg1, arg2).msb()
+
+
+class CondConstraint(object):
+
+    """Stand for a constraint on an Expr"""
+
+    # str of the associated operator
+    operator = ""
+
+    def __init__(self, expr):
+        self.expr = expr
+
+    def __repr__(self):
+        return "<%s %s 0>" % (self.expr, self.operator)
+
+    def to_constraint(self):
+        """Transform itself into a constraint using Expr"""
+        raise NotImplementedError("Abstract method")
+
+
+class CondConstraintZero(CondConstraint):
+
+    """Stand for a constraint like 'A == 0'"""
+    operator = "=="
+
+    def to_constraint(self):
+        return m2_expr.ExprAff(self.expr, m2_expr.ExprInt(0, self.expr.size))
+
+
+class CondConstraintNotZero(CondConstraint):
+
+    """Stand for a constraint like 'A != 0'"""
+    operator = "!="
+
+    def to_constraint(self):
+        cst1, cst2 = m2_expr.ExprInt1(0), m2_expr.ExprInt1(1)
+        return m2_expr.ExprAff(cst1, m2_expr.ExprCond(self.expr, cst1, cst2))
+
+
+ConstrainedValue = collections.namedtuple("ConstrainedValue",
+                                          ["constraints", "value"])
+
+
+class ConstrainedValues(set):
+
+    """Set of ConstrainedValue"""
+
+    def __str__(self):
+        out = []
+        for sol in self:
+            out.append("%s with constraints:" % sol.value)
+            for constraint in sol.constraints:
+                out.append("\t%s" % constraint)
+        return "\n".join(out)
+
+
+def possible_values(expr):
+    """Return possible values for expression @expr, associated with their
+    condition constraint as a ConstrainedValues instance
+    @expr: Expr instance
+    """
+
+    consvals = ConstrainedValues()
+
+    # Terminal expression
+    if (isinstance(expr, m2_expr.ExprInt) or
+            isinstance(expr, m2_expr.ExprId)):
+        consvals.add(ConstrainedValue(frozenset(), expr))
+    # Unary expression
+    elif isinstance(expr, m2_expr.ExprSlice):
+        consvals.update(ConstrainedValue(consval.constraints,
+                                         consval.value[expr.start:expr.stop])
+                        for consval in possible_values(expr.arg))
+    elif isinstance(expr, m2_expr.ExprMem):
+        consvals.update(ConstrainedValue(consval.constraints,
+                                         m2_expr.ExprMem(consval.value,
+                                                         expr.size))
+                        for consval in possible_values(expr.arg))
+    elif isinstance(expr, m2_expr.ExprAff):
+        consvals.update(possible_values(expr.src))
+    # Special case: constraint insertion
+    elif isinstance(expr, m2_expr.ExprCond):
+        to_ret = set()
+        src1cond = CondConstraintNotZero(expr.cond)
+        src2cond = CondConstraintZero(expr.cond)
+        consvals.update(ConstrainedValue(consval.constraints.union([src1cond]),
+                                         consval.value)
+                        for consval in possible_values(expr.src1))
+        consvals.update(ConstrainedValue(consval.constraints.union([src2cond]),
+                                         consval.value)
+                        for consval in possible_values(expr.src2))
+    # N-ary expression
+    elif isinstance(expr, m2_expr.ExprOp):
+        # For details, see ExprCompose
+        consvals_args = [possible_values(arg) for arg in expr.args]
+        for consvals_possibility in itertools.product(*consvals_args):
+            args_value = [consval.value for consval in consvals_possibility]
+            args_constraint = itertools.chain(*[consval.constraints
+                                                for consval in consvals_possibility])
+            consvals.add(ConstrainedValue(frozenset(args_constraint),
+                                          m2_expr.ExprOp(expr.op, *args_value)))
+    elif isinstance(expr, m2_expr.ExprCompose):
+        # Generate each possibility for sub-argument, associated with the start
+        # and stop bit
+        consvals_args = [map(lambda x: (x, arg[1], arg[2]),
+                             possible_values(arg[0]))
+                         for arg in expr.args]
+        for consvals_possibility in itertools.product(*consvals_args):
+            # Merge constraint of each sub-element
+            args_constraint = itertools.chain(*[consval[0].constraints
+                                                for consval in consvals_possibility])
+            # Gen the corresponding constraints / ExprCompose
+            consvals.add(
+                ConstrainedValue(frozenset(args_constraint),
+                                 m2_expr.ExprCompose(
+                                     [(consval[0].value,
+                                       consval[1],
+                                       consval[2])
+                                      for consval in consvals_possibility]
+                )))
+    else:
+        raise RuntimeError("Unsupported type for expr: %s" % type(expr))
+
+    return consvals
diff --git a/miasm2/ir/translators/C.py b/miasm2/ir/translators/C.py
index a7ba1a20..2c44d9e1 100644
--- a/miasm2/ir/translators/C.py
+++ b/miasm2/ir/translators/C.py
@@ -13,7 +13,6 @@ class TranslatorC(Translator):
     dct_shift = {'a>>': "right_arith",
                  '>>': "right_logic",
                  '<<': "left_logic",
-                 'a<<': "left_logic",
                  }
     dct_rot = {'<<<': 'rot_left',
                '>>>': 'rot_right',
diff --git a/miasm2/ir/translators/__init__.py b/miasm2/ir/translators/__init__.py
index 22e00d98..d3678ffc 100644
--- a/miasm2/ir/translators/__init__.py
+++ b/miasm2/ir/translators/__init__.py
@@ -3,6 +3,7 @@ from miasm2.ir.translators.translator import Translator
 import miasm2.ir.translators.C
 import miasm2.ir.translators.python
 import miasm2.ir.translators.miasm
+import miasm2.ir.translators.smt2
 try:
     import miasm2.ir.translators.z3_ir
 except ImportError:
diff --git a/miasm2/ir/translators/smt2.py b/miasm2/ir/translators/smt2.py
index e832d3b8..5bffd7f2 100644
--- a/miasm2/ir/translators/smt2.py
+++ b/miasm2/ir/translators/smt2.py
@@ -220,8 +220,6 @@ class TranslatorSMT2(Translator):
                     res = bvshl(res, arg)
                 elif expr.op == ">>":
                     res = bvlshr(res, arg)
-                elif expr.op == "a<<":
-                    res = bvshl(res, arg)
                 elif expr.op == "a>>":
                     res = bvashr(res, arg)
                 elif expr.op == "<<<":
@@ -286,13 +284,14 @@ class TranslatorSMT2(Translator):
         dst = self.from_expr(expr.dst)
         return smt2_assert(smt2_eq(src, dst))
 
-    def to_smt2(self, exprs, logic="QF_ABV"):
+    def to_smt2(self, exprs, logic="QF_ABV", model=False):
         """
         Converts a valid SMT2 file for a given list of
         SMT2 expressions.
 
         :param exprs: list of SMT2 expressions
         :param logic: SMT2 logic
+        :param model: model generation flag
         :return: String of the SMT2 file
         """
         ret = ""
@@ -315,6 +314,10 @@ class TranslatorSMT2(Translator):
         # define action
         ret += "(check-sat)\n"
 
+        # enable model generation
+        if model:
+            ret += "(get-model)\n"
+
         return ret
 
 
diff --git a/miasm2/ir/translators/z3_ir.py b/miasm2/ir/translators/z3_ir.py
index a1b90ae8..79099520 100644
--- a/miasm2/ir/translators/z3_ir.py
+++ b/miasm2/ir/translators/z3_ir.py
@@ -165,8 +165,6 @@ class TranslatorZ3(Translator):
                     res = z3.LShR(res, arg)
                 elif expr.op == "a>>":
                     res = res >> arg
-                elif expr.op == "a<<":
-                    res = res << arg
                 elif expr.op == "<<<":
                     res = z3.RotateLeft(res, arg)
                 elif expr.op == ">>>":
diff --git a/miasm2/jitter/jitcore.py b/miasm2/jitter/jitcore.py
index 59e7b752..74c438a7 100644
--- a/miasm2/jitter/jitcore.py
+++ b/miasm2/jitter/jitcore.py
@@ -56,6 +56,15 @@ class JitCore(object):
         self.options = {"jit_maxline": 50  # Maximum number of line jitted
                         }
 
+        self.mdis = asmbloc.disasmEngine(ir_arch.arch, ir_arch.attrib, bs,
+                                         lines_wd=self.options["jit_maxline"],
+                                         symbol_pool=ir_arch.symbol_pool,
+                                         follow_call=False,
+                                         dontdis_retcall=False,
+                                         split_dis=self.split_dis,
+                                         dis_bloc_callback=self.disasm_cb)
+
+
     def set_options(self, **kwargs):
         "Set options relative to the backend"
 
@@ -76,9 +85,8 @@ class JitCore(object):
         """The disassembly engine will no longer stop on address in args"""
         self.split_dis.difference_update(set(args))
 
-    def load(self, arch, attrib):
-        "Initialise the Jitter according to arch and attrib"
-
+    def load(self):
+        "Initialise the Jitter"
         raise NotImplementedError("Abstract class")
 
     def get_bloc_min_max(self, cur_bloc):
@@ -114,26 +122,24 @@ class JitCore(object):
         b.irblocs = irblocs
         self.jitirblocs(b.label, irblocs)
 
-    def disbloc(self, addr, cpu, vm):
-        "Disassemble a new bloc and JiT it"
+    def disbloc(self, addr, vm):
+        """Disassemble a new bloc and JiT it
+        @addr: address of the block to disassemble (asm_label or int)
+        @vm: VmMngr instance
+        """
 
         # Get the bloc
         if isinstance(addr, asmbloc.asm_label):
             addr = addr.offset
 
-        label = self.ir_arch.symbol_pool.getby_offset_create(addr)
+        # Prepare disassembler
+        self.mdis.job_done.clear()
+        self.mdis.lines_wd = self.options["jit_maxline"]
+        self.mdis.dis_bloc_callback = self.disasm_cb
 
         # Disassemble it
         try:
-            cur_bloc, _ = asmbloc.dis_bloc(self.ir_arch.arch, self.bs, label,
-                                           addr, set(),
-                                           self.ir_arch.symbol_pool, [],
-                                           follow_call=False,
-                                           dontdis_retcall=False,
-                                           lines_wd=self.options["jit_maxline"],
-                                           # max 10 asm lines
-                                           attrib=self.ir_arch.attrib,
-                                           split_dis=self.split_dis)
+            cur_bloc = self.mdis.dis_bloc(addr)
         except IOError:
             # vm_exception_flag is set
             cur_bloc = asmbloc.asm_bloc(label)
@@ -141,15 +147,13 @@ class JitCore(object):
         # Logging
         if self.log_newbloc:
             print cur_bloc
-        if self.disasm_cb is not None:
-            self.disasm_cb(cur_bloc)
 
         # Check for empty blocks
         if not cur_bloc.lines:
             raise ValueError("Cannot JIT a block without any assembly line")
 
         # Update label -> bloc
-        self.lbl2bloc[label] = cur_bloc
+        self.lbl2bloc[cur_bloc.label] = cur_bloc
 
         # Store min/max bloc address needed in jit automod code
         self.get_bloc_min_max(cur_bloc)
@@ -180,7 +184,7 @@ class JitCore(object):
 
         if not lbl in self.lbl2jitbloc:
             # Need to JiT the bloc
-            self.disbloc(lbl, cpu, vm)
+            self.disbloc(lbl, vm)
 
         # Run the bloc and update cpu/vmmngr state
         ret = self.jit_call(lbl, cpu, vm, breakpoints)
diff --git a/miasm2/jitter/vm_mngr.c b/miasm2/jitter/vm_mngr.c
index a0c2f5a3..1bb58a17 100644
--- a/miasm2/jitter/vm_mngr.c
+++ b/miasm2/jitter/vm_mngr.c
@@ -84,6 +84,9 @@ inline int midpoint(int imin, int imax)
 
 int find_page_node(struct memory_page_node * array, uint64_t key, int imin, int imax)
 {
+	if (imax < 1)
+		return -1;
+	imax--;
 	// continue searching while [imin,imax] is not empty
 	while (imin <= imax) {
 		// calculate the midpoint for roughly equal partition
@@ -103,7 +106,7 @@ int find_page_node(struct memory_page_node * array, uint64_t key, int imin, int
 	return -1;
 }
 
-struct memory_page_node * get_memory_page_from_address(vm_mngr_t* vm_mngr, uint64_t ad)
+struct memory_page_node * get_memory_page_from_address(vm_mngr_t* vm_mngr, uint64_t ad, int raise_exception)
 {
 	struct memory_page_node * mpn;
 	int i;
@@ -117,8 +120,10 @@ struct memory_page_node * get_memory_page_from_address(vm_mngr_t* vm_mngr, uint6
 		if ((mpn->ad <= ad) && (ad < mpn->ad + mpn->size))
 			return mpn;
 	}
-	fprintf(stderr, "WARNING: address 0x%"PRIX64" is not mapped in virtual memory:\n", ad);
-	vm_mngr->exception_flags |= EXCEPT_ACCESS_VIOL;
+	if (raise_exception) {
+		fprintf(stderr, "WARNING: address 0x%"PRIX64" is not mapped in virtual memory:\n", ad);
+		vm_mngr->exception_flags |= EXCEPT_ACCESS_VIOL;
+	}
 	return NULL;
 }
 
@@ -133,7 +138,7 @@ static uint64_t memory_page_read(vm_mngr_t* vm_mngr, unsigned int my_size, uint6
 	struct memory_breakpoint_info * b;
 
 
-	mpn = get_memory_page_from_address(vm_mngr, ad);
+	mpn = get_memory_page_from_address(vm_mngr, ad, 1);
 	if (!mpn)
 		return 0;
 
@@ -182,7 +187,7 @@ static uint64_t memory_page_read(vm_mngr_t* vm_mngr, unsigned int my_size, uint6
 		unsigned int new_size = my_size;
 		int index = 0;
 		while (new_size){
-			mpn = get_memory_page_from_address(vm_mngr, ad);
+			mpn = get_memory_page_from_address(vm_mngr, ad, 1);
 			if (!mpn)
 				return 0;
 			addr = &((unsigned char*)mpn->ad_hp)[ad - mpn->ad];
@@ -219,7 +224,7 @@ static void memory_page_write(vm_mngr_t* vm_mngr, unsigned int my_size,
 	unsigned char * addr;
 	struct memory_breakpoint_info * b;
 
-	mpn = get_memory_page_from_address(vm_mngr, ad);
+	mpn = get_memory_page_from_address(vm_mngr, ad, 1);
 	if (!mpn)
 		return;
 
@@ -283,7 +288,7 @@ static void memory_page_write(vm_mngr_t* vm_mngr, unsigned int my_size,
 			break;
 		}
 		while (my_size){
-			mpn = get_memory_page_from_address(vm_mngr, ad);
+			mpn = get_memory_page_from_address(vm_mngr, ad, 1);
 			if (!mpn)
 				return;
 
@@ -459,7 +464,7 @@ int vm_read_mem(vm_mngr_t* vm_mngr, uint64_t addr, char** buffer_ptr, uint64_t s
 
        /* read is multiple page wide */
        while (size){
-	      mpn = get_memory_page_from_address(vm_mngr, addr);
+	      mpn = get_memory_page_from_address(vm_mngr, addr, 1);
 	      if (!mpn){
 		      free(*buffer_ptr);
 		      PyErr_SetString(PyExc_RuntimeError, "Error: cannot find address");
@@ -485,7 +490,7 @@ int vm_write_mem(vm_mngr_t* vm_mngr, uint64_t addr, char *buffer, uint64_t size)
 
        /* write is multiple page wide */
        while (size){
-	      mpn = get_memory_page_from_address(vm_mngr, addr);
+	      mpn = get_memory_page_from_address(vm_mngr, addr, 1);
 	      if (!mpn){
 		      PyErr_SetString(PyExc_RuntimeError, "Error: cannot find address");
 		      return -1;
@@ -503,6 +508,27 @@ int vm_write_mem(vm_mngr_t* vm_mngr, uint64_t addr, char *buffer, uint64_t size)
 
 
 
+int is_mapped(vm_mngr_t* vm_mngr, uint64_t addr, uint64_t size)
+{
+       uint64_t len;
+       struct memory_page_node * mpn;
+
+       /* test multiple page wide */
+       while (size){
+	      mpn = get_memory_page_from_address(vm_mngr, addr, 0);
+	      if (!mpn)
+		      return 0;
+
+	      len = MIN(size, mpn->size - (addr - mpn->ad));
+	      addr += len;
+	      size -= len;
+       }
+
+       return 1;
+}
+
+
+
 unsigned int parity(unsigned int a)
 {
 #if defined(__builtin_parity)
diff --git a/miasm2/jitter/vm_mngr.h b/miasm2/jitter/vm_mngr.h
index acea4875..52d62551 100644
--- a/miasm2/jitter/vm_mngr.h
+++ b/miasm2/jitter/vm_mngr.h
@@ -178,6 +178,7 @@ int vm_write_mem(vm_mngr_t* vm_mngr, uint64_t addr, char *buffer, uint64_t size)
 unsigned int parity(unsigned int a);
 unsigned int my_imul08(unsigned int a, unsigned int b);
 
+int is_mapped(vm_mngr_t* vm_mngr, uint64_t addr, uint64_t size);
 void vm_throw(vm_mngr_t* vm_mngr, unsigned long flags);
 int shift_right_arith(unsigned int size, int a, unsigned int b);
 
@@ -312,7 +313,7 @@ void func_alloc(void);
 unsigned int get_memory_page_max_address_py(void);
 unsigned int get_memory_page_max_user_address_py(void);
 unsigned int get_memory_page_from_min_ad_py(unsigned int size);
-struct memory_page_node * get_memory_page_from_address(vm_mngr_t*, uint64_t ad);
+struct memory_page_node * get_memory_page_from_address(vm_mngr_t*, uint64_t ad, int raise_exception);
 void func_malloc_memory_page(void);
 void func_free_memory_page(void);
 void func_virtualalloc_memory_page(void);
diff --git a/miasm2/jitter/vm_mngr_py.c b/miasm2/jitter/vm_mngr_py.c
index fdadf7f1..215517ee 100644
--- a/miasm2/jitter/vm_mngr_py.c
+++ b/miasm2/jitter/vm_mngr_py.c
@@ -150,7 +150,7 @@ PyObject* vm_set_mem_access(VmMngr* self, PyObject* args)
 	PyGetInt(addr, page_addr);
 	PyGetInt(access, page_access);
 
-	mpn = get_memory_page_from_address(&self->vm_mngr, page_addr);
+	mpn = get_memory_page_from_address(&self->vm_mngr, page_addr, 1);
 	if (!mpn){
 		PyErr_SetString(PyExc_RuntimeError, "cannot find address");
 		return 0;
@@ -443,6 +443,24 @@ PyObject* vm_set_addr2obj(VmMngr* self, PyObject* args)
 }
 
 
+PyObject* vm_is_mapped(VmMngr* self, PyObject* args)
+{
+	PyObject *ad;
+	PyObject *size;
+	uint64_t b_ad;
+	uint64_t b_size;
+	int ret;
+
+	if (!PyArg_ParseTuple(args, "OO", &ad, &size))
+		return NULL;
+
+	PyGetInt(ad, b_ad);
+	PyGetInt(size, b_size);
+	ret = is_mapped(&self->vm_mngr, b_ad, b_size);
+	return PyLong_FromUnsignedLongLong((uint64_t)ret);
+}
+
+
 static PyObject *
 vm_set_big_endian(VmMngr *self, PyObject *value, void *closure)
 {
@@ -509,6 +527,8 @@ static PyMethodDef VmMngr_methods[] = {
 	 "X"},
 	{"set_addr2obj", (PyCFunction)vm_set_addr2obj, METH_VARARGS,
 	 "X"},
+	{"is_mapped", (PyCFunction)vm_is_mapped, METH_VARARGS,
+	 "X"},
 	{"add_code_bloc",(PyCFunction)vm_add_code_bloc, METH_VARARGS,
 	 "X"},
 	{"get_mem", (PyCFunction)vm_get_mem, METH_VARARGS,
diff --git a/miasm2/os_dep/win_api_x86_32_seh.py b/miasm2/os_dep/win_api_x86_32_seh.py
index f90198f9..6bf491bf 100644
--- a/miasm2/os_dep/win_api_x86_32_seh.py
+++ b/miasm2/os_dep/win_api_x86_32_seh.py
@@ -148,13 +148,15 @@ def build_ldr_data(jitter, modules_info):
     +0x00c InLoadOrderModuleList           : _LIST_ENTRY
     +0x014 InMemoryOrderModuleList         : _LIST_ENTRY
     +0x01C InInitializationOrderModuleList         : _LIST_ENTRY
+    # dummy dll base
+    +0x024 DllBase : Ptr32 Void
 
     @jitter: jitter instance
     @modules_info: LoadedModules instance
 
     """
     # ldr offset pad
-    offset = LDR_AD + peb_ldr_data_offset + 0xC
+    offset = peb_ldr_data_address + 0xC
 
     # get main pe info
     main_pe = modules_info.name2module.get(main_pe_name, None)
@@ -178,6 +180,11 @@ def build_ldr_data(jitter, modules_info):
         jitter.vm.add_memory_page(offset, PAGE_READ | PAGE_WRITE, data,
                                   "Loader struct")
 
+    # Add dummy dll base
+    jitter.vm.add_memory_page(peb_ldr_data_address + 0x24,
+                              PAGE_READ | PAGE_WRITE, pck32(0),
+                              "Loader struct dummy dllbase")
+
 
 class LoadedModules(object):
 
@@ -238,13 +245,8 @@ def create_modules_chain(jitter, name2module):
     offset_name = 0x500
     offset_path = 0x600
 
-    dummy_e = pe_init.PE()
-    dummy_e.NThdr.ImageBase = 0
-    dummy_e.Opthdr.AddressOfEntryPoint = 0
-    dummy_e.NThdr.sizeofimage = 0
-
     out = ""
-    for i, (fname, pe_obj) in enumerate([("", dummy_e)] + name2module.items()):
+    for i, (fname, pe_obj) in enumerate(name2module.items(), 1):
         if pe_obj is None:
             log.warning("Unknown module: ommited from link list (%r)",
                         fname)
@@ -291,9 +293,25 @@ def create_modules_chain(jitter, name2module):
     return modules_info
 
 
+def set_link_list_entry(jitter, loaded_modules, modules_info, offset):
+    for i, module in enumerate(loaded_modules):
+        cur_module_entry = modules_info.module2entry[module]
+        prev_module = loaded_modules[(i - 1) % len(loaded_modules)]
+        next_module = loaded_modules[(i + 1) % len(loaded_modules)]
+        prev_module_entry = modules_info.module2entry[prev_module]
+        next_module_entry = modules_info.module2entry[next_module]
+        if i == 0:
+            prev_module_entry = peb_ldr_data_address + 0xC
+        if i == len(loaded_modules) - 1:
+            next_module_entry = peb_ldr_data_address + 0xC
+        jitter.vm.set_mem(cur_module_entry + offset,
+                          (pck32(next_module_entry + offset) +
+                           pck32(prev_module_entry + offset)))
+
+
 def fix_InLoadOrderModuleList(jitter, modules_info):
     """Fix InLoadOrderModuleList double link list. First module is the main pe,
-    then ntdll, kernel32. dummy is last pe.
+    then ntdll, kernel32.
 
     @jitter: the jitter instance
     @modules_info: the LoadedModules instance
@@ -303,8 +321,7 @@ def fix_InLoadOrderModuleList(jitter, modules_info):
     main_pe = modules_info.name2module.get(main_pe_name, None)
     kernel32_pe = modules_info.name2module.get("kernel32.dll", None)
     ntdll_pe = modules_info.name2module.get("ntdll.dll", None)
-    dummy_pe = modules_info.name2module.get("", None)
-    special_modules = [main_pe, kernel32_pe, ntdll_pe, dummy_pe]
+    special_modules = [main_pe, kernel32_pe, ntdll_pe]
     if not all(special_modules):
         log.warn(
             'No main pe, ldr data will be unconsistant %r', special_modules)
@@ -315,22 +332,13 @@ def fix_InLoadOrderModuleList(jitter, modules_info):
         loaded_modules[0:0] = [main_pe]
         loaded_modules[1:1] = [ntdll_pe]
         loaded_modules[2:2] = [kernel32_pe]
-        loaded_modules.append(dummy_pe)
 
-    for i, module in enumerate(loaded_modules):
-        cur_module_entry = modules_info.module2entry[module]
-        prev_module = loaded_modules[(i - 1) % len(loaded_modules)]
-        next_module = loaded_modules[(i + 1) % len(loaded_modules)]
-        prev_module_entry = modules_info.module2entry[prev_module]
-        next_module_entry = modules_info.module2entry[next_module]
-        jitter.vm.set_mem(cur_module_entry,
-                          (pck32(next_module_entry) +
-                           pck32(prev_module_entry)))
+    set_link_list_entry(jitter, loaded_modules, modules_info, 0x0)
 
 
 def fix_InMemoryOrderModuleList(jitter, modules_info):
     """Fix InMemoryOrderLinks double link list. First module is the main pe,
-    then ntdll, kernel32. dummy is last pe.
+    then ntdll, kernel32.
 
     @jitter: the jitter instance
     @modules_info: the LoadedModules instance
@@ -340,8 +348,7 @@ def fix_InMemoryOrderModuleList(jitter, modules_info):
     main_pe = modules_info.name2module.get(main_pe_name, None)
     kernel32_pe = modules_info.name2module.get("kernel32.dll", None)
     ntdll_pe = modules_info.name2module.get("ntdll.dll", None)
-    dummy_pe = modules_info.name2module.get("", None)
-    special_modules = [main_pe, kernel32_pe, ntdll_pe, dummy_pe]
+    special_modules = [main_pe, kernel32_pe, ntdll_pe]
     if not all(special_modules):
         log.warn('No main pe, ldr data will be unconsistant')
         loaded_modules = modules_info.modules
@@ -351,22 +358,13 @@ def fix_InMemoryOrderModuleList(jitter, modules_info):
         loaded_modules[0:0] = [main_pe]
         loaded_modules[1:1] = [ntdll_pe]
         loaded_modules[2:2] = [kernel32_pe]
-        loaded_modules.append(dummy_pe)
 
-    for i, module in enumerate(loaded_modules):
-        cur_module_entry = modules_info.module2entry[module]
-        prev_module = loaded_modules[(i - 1) % len(loaded_modules)]
-        next_module = loaded_modules[(i + 1) % len(loaded_modules)]
-        prev_module_entry = modules_info.module2entry[prev_module]
-        next_module_entry = modules_info.module2entry[next_module]
-        jitter.vm.set_mem(cur_module_entry + 0x8,
-                          (pck32(next_module_entry + 0x8) +
-                           pck32(prev_module_entry + 0x8)))
+    set_link_list_entry(jitter, loaded_modules, modules_info, 0x8)
 
 
 def fix_InInitializationOrderModuleList(jitter, modules_info):
     """Fix InInitializationOrderModuleList double link list. First module is the
-    ntdll, then kernel32. dummy is last pe.
+    ntdll, then kernel32.
 
     @jitter: the jitter instance
     @modules_info: the LoadedModules instance
@@ -377,8 +375,7 @@ def fix_InInitializationOrderModuleList(jitter, modules_info):
     main_pe = modules_info.name2module.get(main_pe_name, None)
     kernel32_pe = modules_info.name2module.get("kernel32.dll", None)
     ntdll_pe = modules_info.name2module.get("ntdll.dll", None)
-    dummy_pe = modules_info.name2module.get("", None)
-    special_modules = [main_pe, kernel32_pe, ntdll_pe, dummy_pe]
+    special_modules = [main_pe, kernel32_pe, ntdll_pe]
     if not all(special_modules):
         log.warn('No main pe, ldr data will be unconsistant')
         loaded_modules = modules_info.modules
@@ -387,17 +384,8 @@ def fix_InInitializationOrderModuleList(jitter, modules_info):
                           if module not in special_modules]
         loaded_modules[0:0] = [ntdll_pe]
         loaded_modules[1:1] = [kernel32_pe]
-        loaded_modules.append(dummy_pe)
 
-    for i, module in enumerate(loaded_modules):
-        cur_module_entry = modules_info.module2entry[module]
-        prev_module = loaded_modules[(i - 1) % len(loaded_modules)]
-        next_module = loaded_modules[(i + 1) % len(loaded_modules)]
-        prev_module_entry = modules_info.module2entry[prev_module]
-        next_module_entry = modules_info.module2entry[next_module]
-        jitter.vm.set_mem(cur_module_entry + 0x10,
-                          (pck32(next_module_entry + 0x10) +
-                           pck32(prev_module_entry + 0x10)))
+    set_link_list_entry(jitter, loaded_modules, modules_info, 0x10)
 
 
 def add_process_env(jitter):
diff --git a/test/analysis/depgraph.py b/test/analysis/depgraph.py
index fafae1fb..f1d9151c 100644
--- a/test/analysis/depgraph.py
+++ b/test/analysis/depgraph.py
@@ -1,18 +1,20 @@
 """Regression test module for DependencyGraph"""
-from miasm2.expression.expression import ExprId, ExprInt32, ExprAff, ExprCond
+from miasm2.expression.expression import ExprId, ExprInt32, ExprAff, ExprCond, \
+    ExprInt
 from miasm2.core.asmbloc import asm_label
 from miasm2.ir.analysis import ira
 from miasm2.ir.ir import ir, irbloc, AssignBlock
 from miasm2.core.graph import DiGraph
-from miasm2.analysis.depgraph import DependencyNode, DependencyGraph,\
-    DependencyDict
+from miasm2.analysis.depgraph import DependencyNode, DependencyGraph
 from itertools import count
+from pdb import pm
+import re
 
-EMULATION=True
+EMULATION = True
 try:
     import z3
 except ImportError:
-    EMULATION=False
+    EMULATION = False
 
 STEP_COUNTER = count()
 A = ExprId("a")
@@ -30,9 +32,9 @@ PC = ExprId("pc")
 SP = ExprId("sp")
 
 CST0 = ExprInt32(0x0)
-CST1 = ExprInt32(0x11)
-CST2 = ExprInt32(0x12)
-CST3 = ExprInt32(0x13)
+CST1 = ExprInt32(0x1)
+CST2 = ExprInt32(0x2)
+CST3 = ExprInt32(0x3)
 CST22 = ExprInt32(0x22)
 CST23 = ExprInt32(0x23)
 CST24 = ExprInt32(0x24)
@@ -97,117 +99,138 @@ class IRATest(ira):
         return set([self.ret_reg, self.sp])
 
 
-class GraphTest(DiGraph):
+def bloc2graph(irgraph, label=False, lines=True):
+    """Render dot graph of @blocks"""
 
-    """Fake graph representation class for test cases"""
+    escape_chars = re.compile('[' + re.escape('{}') + ']')
+    label_attr = 'colspan="2" align="center" bgcolor="grey"'
+    edge_attr = 'label = "%s" color="%s" style="bold"'
+    td_attr = 'align="left"'
+    block_attr = 'shape="Mrecord" fontname="Courier New"'
 
-    def __init__(self, pira):
-        self.ira = pira
-        super(GraphTest, self).__init__()
+    out = ["digraph asm_graph {"]
+    fix_chars = lambda x: '\\' + x.group()
 
-    def __eq__(self, graph):
-        if (len(self._nodes) != len(graph.nodes()) or
-                len(self._edges) != len(graph.edges())):
-            return False
-
-        if (set([n.nostep_repr for n in self._nodes]) !=
-                set([n.nostep_repr for n in graph.nodes()])):
-            return False
-        if (sorted([(src.nostep_repr, dst.nostep_repr)
-                    for (src, dst) in self._edges])
-        != sorted([(src.nostep_repr, dst.nostep_repr)
-                   for (src, dst) in graph.edges()])):
-            return False
-        return True
+    # Generate basic blocks
+    out_blocks = []
+    for label in irgraph.graph.nodes():
+        if isinstance(label, asm_label):
+            label_name = label.name
+        else:
+            label_name = str(label)
 
-    def node2str(self, node):
-        if isinstance(node, asm_label):
-            if node not in self.ira.blocs:
-                return str(node)
+        if hasattr(irgraph, 'blocs'):
+            irblock = irgraph.blocs[label]
+        else:
+            irblock = None
+        if isinstance(label, asm_label):
+            out_block = '%s [\n' % label.name
+        else:
+            out_block = '%s [\n' % label
+        out_block += "%s " % block_attr
+        out_block += 'label =<<table border="0" cellborder="0" cellpadding="3">'
+
+        block_label = '<tr><td %s>%s</td></tr>' % (
+            label_attr, label_name)
+        block_html_lines = []
+        if lines and irblock is not None:
+            for assignblk in irblock.irs:
+                for dst, src in assignblk.iteritems():
+                    if False:
+                        out_render = "%.8X</td><td %s> " % (0, td_attr)
+                    else:
+                        out_render = ""
+                    out_render += escape_chars.sub(fix_chars, "%s = %s" % (dst, src))
+                    block_html_lines.append(out_render)
+                block_html_lines.append(" ")
+            block_html_lines.pop()
+        block_html_lines = ('<tr><td %s>' % td_attr +
+                            ('</td></tr><tr><td %s>' % td_attr).join(block_html_lines) +
+                            '</td></tr>')
+        out_block += "%s " % block_label
+        out_block += block_html_lines + "</table>> ];"
+        out_blocks.append(out_block)
+
+    out += out_blocks
+    # Generate links
+    for src, dst in irgraph.graph.edges():
+            if isinstance(src, asm_label):
+                src_name = src.name
+            else:
+                src_name = str(src)
+            if isinstance(dst, asm_label):
+                dst_name = dst.name
             else:
-                return str(self.ira.blocs[node])
-
-        assert node.label in self.ira.blocs
-        out = "(%s, %s, %s)\\l" % (node.label.name,
-                                 node.element,
-                                 node.line_nb)
-        if not 0 <= node.line_nb < len(self.ira.blocs[node.label].irs):
-            return out
-        exprs = self.ira.blocs[node.label].irs[node.line_nb]
-        exprs_str = '\\l'.join([str(x) for x in exprs])
-        return "%s %s" % (out, exprs_str)
-
-# Test structures
-print "[+] Test structures"
-
-print "[+] Test DependencyDict"
-DD0 = DependencyDict(LBL0, [])
-DEPNODES_0 = [DependencyNode(LBL0, A, linenb, next(STEP_COUNTER))
-              for linenb in xrange(10)][::-1]
-
-# Heads
-assert list(DD0.heads()) == []
-assert DD0.is_head(DEPNODES_0[-1]) == True
-assert DD0.is_head(DEPNODES_0[0]) == False
-DD0.cache[DEPNODES_0[-1]] = set(DEPNODES_0[-1:])
-assert list(DD0.heads()) == [DEPNODES_0[-1]]
-
-# Extend
-DD1 = DD0.extend(LBL1)
-
-assert DD1.label == LBL1
-assert DD1.history == [DD0]
-assert DD1.cache == DD0.cache
-assert DD1.pending == set()
-assert DD1 != DD0
-
-DD1.cache[DEPNODES_0[4]] = set(DEPNODES_0[5:9])
-assert DD1.cache != DD0.cache
-
-DD2 = DD0.copy()
-assert DD2.label == LBL0
-assert DD2.history == []
-assert DD2.cache == DD0.cache
-assert DD2.pending == DD0.pending
-assert DD2 == DD0
-
-DD2.cache[DEPNODES_0[4]] = set(DEPNODES_0[5:9])
-assert DD2.cache != DD0.cache
-
-
-print "   [+] Test dictionary equality"
-DNA = DependencyNode(LBL2, A, 0, next(STEP_COUNTER))
-DNB = DependencyNode(LBL1, B, 1, next(STEP_COUNTER))
-DNC = DependencyNode(LBL1, C, 0, next(STEP_COUNTER), True)
-DNB2 = DependencyNode(LBL1, B, 1, next(STEP_COUNTER))
-DNC2 = DependencyNode(LBL1, C, 0, next(STEP_COUNTER), True)
-DNB3 = DependencyNode(LBL1, B, 1, next(STEP_COUNTER))
-DNC3 = DependencyNode(LBL1, C, 0, next(STEP_COUNTER), True)
-
-DDCT1 = DependencyDict(LBL1, [])
-DDCT1.cache.update({DNA: set([DNB]), DNB: set([DNC])})
-
-DDCT2 = DDCT1.extend(LBL1)
-DDCT2.cache.update({DNA: set([DNB]), DNB: set([DNC]),
-                    DNC: set([DNB2]), DNB2: set([DNC2])
-                    })
-
-DDCT3 = DDCT2.extend(LBL1)
-DDCT3.cache.update(
-    {DNA: set([DNB]), DNB: set([DNC]),
-     DNC: set([DNB2]), DNB2: set([DNC2]),
-     DNC2: set([DNB3]), DNB3: set([DNC3])})
-
-assert not DDCT1.__eq__(DDCT2)
-assert DDCT2.__eq__(DDCT3)
-
-print "[+] DependencyDict OK !"
-print "[+] Structures OK !"
+                dst_name = str(dst)
+
+            edge_color = "black"
+            out.append('%s -> %s' % (src_name,
+                                     dst_name) +
+                       '[' + edge_attr % ("", edge_color) + '];')
+
+    out.append("}")
+    return '\n'.join(out)
+
+
+def dg2graph(graph, label=False, lines=True):
+    """Render dot graph of @blocks"""
+
+    escape_chars = re.compile('[' + re.escape('{}') + ']')
+    label_attr = 'colspan="2" align="center" bgcolor="grey"'
+    edge_attr = 'label = "%s" color="%s" style="bold"'
+    td_attr = 'align="left"'
+    block_attr = 'shape="Mrecord" fontname="Courier New"'
+
+    out = ["digraph asm_graph {"]
+    fix_chars = lambda x: '\\' + x.group()
+
+    # Generate basic blocks
+    out_blocks = []
+    for label in graph.nodes():
+        if isinstance(label, DependencyNode):
+            label_name = "%s %s %s" % (label.label.name,
+                                       label.element,
+                                       label.line_nb)
+        else:
+            label_name = str(label)
+        out_block = '%s [\n' % hash(label)
+        out_block += "%s " % block_attr
+        out_block += 'label =<<table border="0" cellborder="0" cellpadding="3">'
+
+        block_label = '<tr><td %s>%s</td></tr>' % (
+            label_attr, label_name)
+        block_html_lines = []
+        block_html_lines = ('<tr><td %s>' % td_attr +
+                            ('</td></tr><tr><td %s>' % td_attr).join(block_html_lines) +
+                            '</td></tr>')
+        out_block += "%s " % block_label
+        out_block += block_html_lines + "</table>> ];"
+        out_blocks.append(out_block)
+
+    out += out_blocks
+    # Generate links
+    for src, dst in graph.edges():
+            edge_color = "black"
+            out.append('%s -> %s ' % (hash(src),
+                                      hash(dst)) +
+                       '[' + edge_attr % ("", edge_color) + '];')
+
+    out.append("}")
+    return '\n'.join(out)
+
+
+print "   [+] Test dictionnary equality"
+DNA = DependencyNode(LBL2, A, 0)
+DNB = DependencyNode(LBL1, B, 1)
+DNC = DependencyNode(LBL1, C, 0)
+DNB2 = DependencyNode(LBL1, B, 1)
+DNC2 = DependencyNode(LBL1, C, 0)
+DNB3 = DependencyNode(LBL1, B, 1)
+DNC3 = DependencyNode(LBL1, C, 0)
 
 # graph 1
 
 G1_IRA = IRATest()
-G1_IRA.g = GraphTest(G1_IRA)
 
 G1_IRB0 = gen_irbloc(LBL0, [[ExprAff(C, CST1)]])
 G1_IRB1 = gen_irbloc(LBL1, [[ExprAff(B, C)]])
@@ -221,7 +244,6 @@ G1_IRA.blocs = dict([(irb.label, irb) for irb in [G1_IRB0, G1_IRB1, G1_IRB2]])
 # graph 2
 
 G2_IRA = IRATest()
-G2_IRA.g = GraphTest(G2_IRA)
 
 G2_IRB0 = gen_irbloc(LBL0, [[ExprAff(C, CST1)]])
 G2_IRB1 = gen_irbloc(LBL1, [[ExprAff(B, CST2)]])
@@ -236,7 +258,6 @@ G2_IRA.blocs = dict([(irb.label, irb) for irb in [G2_IRB0, G2_IRB1, G2_IRB2]])
 # graph 3
 
 G3_IRA = IRATest()
-G3_IRA.g = GraphTest(G3_IRA)
 
 G3_IRB0 = gen_irbloc(LBL0, [[ExprAff(C, CST1)]])
 G3_IRB1 = gen_irbloc(LBL1, [[ExprAff(B, CST2)]])
@@ -254,7 +275,6 @@ G3_IRA.blocs = dict([(irb.label, irb) for irb in [G3_IRB0, G3_IRB1,
 # graph 4
 
 G4_IRA = IRATest()
-G4_IRA.g = GraphTest(G4_IRA)
 
 G4_IRB0 = gen_irbloc(LBL0, [[ExprAff(C, CST1)]])
 G4_IRB1 = gen_irbloc(LBL1, [[ExprAff(C, C + CST2)],
@@ -274,7 +294,6 @@ G4_IRA.blocs = dict([(irb.label, irb) for irb in [G4_IRB0, G4_IRB1, G4_IRB2]])
 # graph 5
 
 G5_IRA = IRATest()
-G5_IRA.g = GraphTest(G5_IRA)
 
 G5_IRB0 = gen_irbloc(LBL0, [[ExprAff(B, CST1)]])
 G5_IRB1 = gen_irbloc(LBL1, [[ExprAff(B, B + CST2)],
@@ -293,7 +312,6 @@ G5_IRA.blocs = dict([(irb.label, irb) for irb in [G5_IRB0, G5_IRB1, G5_IRB2]])
 # graph 6
 
 G6_IRA = IRATest()
-G6_IRA.g = GraphTest(G6_IRA)
 
 G6_IRB0 = gen_irbloc(LBL0, [[ExprAff(B, CST1)]])
 G6_IRB1 = gen_irbloc(LBL1, [[ExprAff(A, B)]])
@@ -306,7 +324,6 @@ G6_IRA.blocs = dict([(irb.label, irb) for irb in [G6_IRB0, G6_IRB1]])
 # graph 7
 
 G7_IRA = IRATest()
-G7_IRA.g = GraphTest(G7_IRA)
 
 G7_IRB0 = gen_irbloc(LBL0, [[ExprAff(C, CST1)]])
 G7_IRB1 = gen_irbloc(LBL1, [[ExprAff(B, C)], [ExprAff(A, B)]])
@@ -321,7 +338,6 @@ G7_IRA.blocs = dict([(irb.label, irb) for irb in [G7_IRB0, G7_IRB1, G7_IRB2]])
 # graph 8
 
 G8_IRA = IRATest()
-G8_IRA.g = GraphTest(G8_IRA)
 
 G8_IRB0 = gen_irbloc(LBL0, [[ExprAff(C, CST1)]])
 G8_IRB1 = gen_irbloc(LBL1, [[ExprAff(B, C)], [ExprAff(C, D)]])
@@ -338,7 +354,6 @@ G8_IRA.blocs = dict([(irb.label, irb) for irb in [G8_IRB0, G8_IRB1, G8_IRB2]])
 # graph 10
 
 G10_IRA = IRATest()
-G10_IRA.g = GraphTest(G10_IRA)
 
 G10_IRB1 = gen_irbloc(LBL1, [[ExprAff(B, B + CST2)]])
 G10_IRB2 = gen_irbloc(LBL2, [[ExprAff(A, B)]])
@@ -351,7 +366,6 @@ G10_IRA.blocs = dict([(irb.label, irb) for irb in [G10_IRB1, G10_IRB2]])
 # graph 11
 
 G11_IRA = IRATest()
-G11_IRA.g = GraphTest(G11_IRA)
 
 G11_IRB0 = gen_irbloc(LBL0, [[ExprAff(A, CST1),
                               ExprAff(B, CST2)]])
@@ -368,7 +382,6 @@ G11_IRA.blocs = dict([(irb.label, irb)
 # graph 12
 
 G12_IRA = IRATest()
-G12_IRA.g = GraphTest(G12_IRA)
 
 G12_IRB0 = gen_irbloc(LBL0, [[ExprAff(B, CST1)]])
 G12_IRB1 = gen_irbloc(LBL1, [[ExprAff(A, B)], [ExprAff(B, B + CST2)]])
@@ -385,7 +398,6 @@ G12_IRA.blocs = dict([(irb.label, irb) for irb in [G12_IRB0, G12_IRB1,
 # graph 13
 
 G13_IRA = IRATest()
-G13_IRA.g = GraphTest(G13_IRA)
 
 G13_IRB0 = gen_irbloc(LBL0, [[ExprAff(A, CST1)],
                              #[ExprAff(B, A)],
@@ -414,7 +426,6 @@ G13_IRA.blocs = dict([(irb.label, irb) for irb in [G13_IRB0, G13_IRB1,
 # graph 14
 
 G14_IRA = IRATest()
-G14_IRA.g = GraphTest(G14_IRA)
 
 G14_IRB0 = gen_irbloc(LBL0, [[ExprAff(A, CST1)],
                              [ExprAff(G14_IRA.IRDst,
@@ -445,7 +456,6 @@ G14_IRA.blocs = dict([(irb.label, irb) for irb in [G14_IRB0, G14_IRB1,
 # graph 16
 
 G15_IRA = IRATest()
-G15_IRA.g = GraphTest(G15_IRA)
 
 G15_IRB0 = gen_irbloc(LBL0, [[ExprAff(A, CST1)]])
 G15_IRB1 = gen_irbloc(LBL1, [[ExprAff(D, A + B)],
@@ -463,7 +473,6 @@ G15_IRA.blocs = dict([(irb.label, irb) for irb in [G15_IRB0, G15_IRB1,
 # graph 16
 
 G16_IRA = IRATest()
-G16_IRA.g = GraphTest(G16_IRA)
 
 G16_IRB0 = gen_irbloc(LBL0, [[ExprAff(A, CST1)]])
 G16_IRB1 = gen_irbloc(LBL1, [[ExprAff(R, D)]])
@@ -488,7 +497,6 @@ G16_IRA.blocs = dict([(irb.label, irb) for irb in [G16_IRB0, G16_IRB1,
 # graph 17
 
 G17_IRA = IRATest()
-G17_IRA.g = GraphTest(G17_IRA)
 
 G17_IRB0 = gen_irbloc(LBL0, [[ExprAff(A, CST1),
                               ExprAff(D, CST2)]])
@@ -503,627 +511,518 @@ G17_IRA.blocs = dict([(irb.label, irb) for irb in [G17_IRB0, G17_IRB1,
                                                    G17_IRB2]])
 
 # Test graph 1
-
-G1_TEST1 = GraphTest(G1_IRA)
-
 G1_TEST1_DN1 = DependencyNode(
-    G1_IRB2.label, A, len(G1_IRB2.irs), next(STEP_COUNTER))
-G1_TEST1_DN2 = DependencyNode(G1_IRB2.label, B, 0, next(STEP_COUNTER))
-G1_TEST1_DN3 = DependencyNode(G1_IRB1.label, C, 0, next(STEP_COUNTER))
-G1_TEST1_DN4 = DependencyNode(G1_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-G1_TEST1.add_uniq_edge(G1_TEST1_DN4, G1_TEST1_DN3)
-G1_TEST1.add_uniq_edge(G1_TEST1_DN3, G1_TEST1_DN2)
-G1_TEST1.add_uniq_edge(G1_TEST1_DN2, G1_TEST1_DN1)
+    G1_IRB2.label, A, len(G1_IRB2.irs))
 
 G1_INPUT = (set([G1_TEST1_DN1]), set([G1_IRB0.label]))
 
-G1_OUTPUT = {"graph": [G1_TEST1],
-             "emul": [{A: CST1}],
-             "unresolved": [set()],
-             "has_loop": [False]}
-
 # Test graph 2
 
-G2_TEST1 = GraphTest(G2_IRA)
-
 G2_TEST1_DN1 = DependencyNode(
-    G2_IRB2.label, A, len(G2_IRB2.irs), next(STEP_COUNTER))
-G2_TEST1_DN2 = DependencyNode(G2_IRB2.label, B, 0, next(STEP_COUNTER))
-G2_TEST1_DN3 = DependencyNode(G2_IRB2.label, C, 0, next(STEP_COUNTER))
-G2_TEST1_DN4 = DependencyNode(G2_IRB1.label, CST2, 0, next(STEP_COUNTER))
-G2_TEST1_DN5 = DependencyNode(G2_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-G2_TEST1.add_uniq_edge(G2_TEST1_DN5, G2_TEST1_DN3)
-G2_TEST1.add_uniq_edge(G2_TEST1_DN4, G2_TEST1_DN2)
-G2_TEST1.add_uniq_edge(G2_TEST1_DN2, G2_TEST1_DN1)
-G2_TEST1.add_uniq_edge(G2_TEST1_DN3, G2_TEST1_DN1)
+    G2_IRB2.label, A, len(G2_IRB2.irs))
 
 G2_INPUT = (set([G2_TEST1_DN1]), set([G2_IRB0.label]))
-G2_OUTPUT = {"graph": [G2_TEST1],
-             "emul": [{A: ExprInt32(int(CST1.arg) + int(CST2.arg))}],
-             "unresolved": [set()],
-             "has_loop": [False]}
 
 # Test graph 3
 
-G3_TEST1_0 = GraphTest(G3_IRA)
-G3_TEST1_1 = GraphTest(G3_IRA)
-
 G3_TEST1_0_DN1 = DependencyNode(
-    G3_IRB3.label, A, len(G3_IRB3.irs), next(STEP_COUNTER))
-G3_TEST1_0_DN2 = DependencyNode(G3_IRB3.label, B, 0, next(STEP_COUNTER))
-G3_TEST1_0_DN3 = DependencyNode(G3_IRB3.label, C, 0, next(STEP_COUNTER))
-G3_TEST1_0_DN4 = DependencyNode(G3_IRB2.label, CST3, 0, next(STEP_COUNTER))
-G3_TEST1_0_DN5 = DependencyNode(G3_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-G3_TEST1_1_DN2 = DependencyNode(G3_IRB3.label, B, 0, next(STEP_COUNTER))
-G3_TEST1_1_DN3 = DependencyNode(G3_IRB3.label, C, 0, next(STEP_COUNTER))
-G3_TEST1_1_DN4 = DependencyNode(G3_IRB1.label, CST2, 0, next(STEP_COUNTER))
-G3_TEST1_1_DN5 = DependencyNode(G3_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-G3_TEST1_0.add_uniq_edge(G3_TEST1_0_DN5, G3_TEST1_0_DN3)
-G3_TEST1_0.add_uniq_edge(G3_TEST1_0_DN4, G3_TEST1_0_DN2)
-G3_TEST1_0.add_uniq_edge(G3_TEST1_0_DN2, G3_TEST1_0_DN1)
-G3_TEST1_0.add_uniq_edge(G3_TEST1_0_DN3, G3_TEST1_0_DN1)
-
-G3_TEST1_1.add_uniq_edge(G3_TEST1_1_DN5, G3_TEST1_1_DN3)
-G3_TEST1_1.add_uniq_edge(G3_TEST1_1_DN4, G3_TEST1_1_DN2)
-G3_TEST1_1.add_uniq_edge(G3_TEST1_1_DN2, G3_TEST1_0_DN1)
-G3_TEST1_1.add_uniq_edge(G3_TEST1_1_DN3, G3_TEST1_0_DN1)
+    G3_IRB3.label, A, len(G3_IRB3.irs))
 
 G3_INPUT = (set([G3_TEST1_0_DN1]), set([G3_IRB0.label]))
 
-G3_OUTPUT = {"graph": [G3_TEST1_0, G3_TEST1_1],
-             "emul": [{A: ExprInt32(int(CST1.arg) + int(CST3.arg))},
-                      {A: ExprInt32(int(CST1.arg) + int(CST2.arg))}],
-             "unresolved": [set(),
-                            set()],
-             "has_loop": [False, False]}
-
 # Test graph 4
 
-G4_TEST1 = GraphTest(G4_IRA)
-
 G4_TEST1_DN1 = DependencyNode(
-    G4_IRB2.label, A, len(G2_IRB0.irs), next(STEP_COUNTER))
-G4_TEST1_DN2 = DependencyNode(G4_IRB2.label, B, 0, next(STEP_COUNTER))
-G4_TEST1_DN3 = DependencyNode(G4_IRB0.label, B, 0, 10)
-G4_TEST1_DN4 = DependencyNode(G4_IRB0.label, G4_IRA.IRDst, 0, 0)
-
-G4_TEST1.add_uniq_edge(G4_TEST1_DN2, G4_TEST1_DN1)
+    G4_IRB2.label, A, len(G2_IRB0.irs))
 
 G4_INPUT = (set([G4_TEST1_DN1]), set([G4_IRB0.label]))
 
-G4_OUTPUT = {"graph": [G4_TEST1],
-             "emul": [{A: B_INIT}],
-             "unresolved": [set([G4_TEST1_DN3.nostep_repr])],
-             "has_loop": [False]}
-
 # Test graph 5
 
-G5_TEST1_0 = GraphTest(G5_IRA)
-G5_TEST1_1 = GraphTest(G5_IRA)
-
 G5_TEST1_0_DN1 = DependencyNode(
-    G5_IRB2.label, A, len(G5_IRB2.irs), next(STEP_COUNTER))
-G5_TEST1_0_DN2 = DependencyNode(G5_IRB2.label, B, 0, next(STEP_COUNTER))
-G5_TEST1_0_DN3 = DependencyNode(G5_IRB1.label, B, 0, next(STEP_COUNTER))
-G5_TEST1_0_DN4 = DependencyNode(G5_IRB0.label, CST1, 0, next(STEP_COUNTER))
-G5_TEST1_0_DN5 = DependencyNode(G5_IRB1.label, CST2, 0, next(STEP_COUNTER))
-
-G5_TEST1_0.add_uniq_edge(G5_TEST1_0_DN4, G5_TEST1_0_DN3)
-G5_TEST1_0.add_uniq_edge(G5_TEST1_0_DN3, G5_TEST1_0_DN2)
-G5_TEST1_0.add_uniq_edge(G5_TEST1_0_DN5, G5_TEST1_0_DN2)
-G5_TEST1_0.add_uniq_edge(G5_TEST1_0_DN2, G5_TEST1_0_DN1)
-
-G5_TEST1_1_DN3 = DependencyNode(G5_IRB1.label, B, 0, next(STEP_COUNTER))
-G5_TEST1_1_DN5 = DependencyNode(G5_IRB1.label, CST2, 0, next(STEP_COUNTER))
-
-G5_TEST1_1.add_uniq_edge(G5_TEST1_0_DN4, G5_TEST1_1_DN3)
-G5_TEST1_1.add_uniq_edge(G5_TEST1_1_DN3, G5_TEST1_0_DN3)
-G5_TEST1_1.add_uniq_edge(G5_TEST1_1_DN5, G5_TEST1_0_DN3)
-G5_TEST1_1.add_uniq_edge(G5_TEST1_0_DN3, G5_TEST1_0_DN2)
-G5_TEST1_1.add_uniq_edge(G5_TEST1_0_DN5, G5_TEST1_0_DN2)
-G5_TEST1_1.add_uniq_edge(G5_TEST1_0_DN2, G5_TEST1_0_DN1)
+    G5_IRB2.label, A, len(G5_IRB2.irs))
 
 G5_INPUT = (set([G5_TEST1_0_DN1]), set([G5_IRB0.label]))
 
-G5_OUTPUT = {"graph": [G5_TEST1_0, G5_TEST1_1],
-             "emul": [{A: CST35}, {A: CST23}],
-             "unresolved": [set(), set()],
-             "has_loop": [True, False]}
-
 # Test graph 6
 
-G6_TEST1_0 = GraphTest(G6_IRA)
-
 G6_TEST1_0_DN1 = DependencyNode(
-    G6_IRB1.label, A, len(G6_IRB1.irs), next(STEP_COUNTER))
-G6_TEST1_0_DN2 = DependencyNode(G6_IRB1.label, B, 0, next(STEP_COUNTER))
-G6_TEST1_0_DN3 = DependencyNode(G6_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-
-G6_TEST1_0.add_uniq_edge(G6_TEST1_0_DN3, G6_TEST1_0_DN2)
-G6_TEST1_0.add_uniq_edge(G6_TEST1_0_DN2, G6_TEST1_0_DN1)
+    G6_IRB1.label, A, len(G6_IRB1.irs))
 
 G6_INPUT = (set([G6_TEST1_0_DN1]), set([G6_IRB0.label]))
 
-G6_OUTPUT = {"graph": [G6_TEST1_0],
-             "emul": [{A: CST1}],
-             "unresolved": [set()],
-             "has_loop": [False]}
-
 # Test graph 7
 
-G7_TEST1_0 = GraphTest(G7_IRA)
-
 G7_TEST1_0_DN1 = DependencyNode(
-    G7_IRB2.label, A, len(G7_IRB2.irs), next(STEP_COUNTER))
-G7_TEST1_0_DN2 = DependencyNode(G7_IRB1.label, B, 1, next(STEP_COUNTER))
-G7_TEST1_0_DN3 = DependencyNode(G7_IRB1.label, C, 0, next(STEP_COUNTER))
-G7_TEST1_0_DN4 = DependencyNode(G7_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-
-G7_TEST1_0.add_uniq_edge(G7_TEST1_0_DN4, G7_TEST1_0_DN3)
-G7_TEST1_0.add_uniq_edge(G7_TEST1_0_DN3, G7_TEST1_0_DN2)
-G7_TEST1_0.add_uniq_edge(G7_TEST1_0_DN2, G7_TEST1_0_DN1)
+    G7_IRB2.label, D, len(G7_IRB2.irs))
 
 G7_INPUT = (set([G7_TEST1_0_DN1]), set([G7_IRB0.label]))
 
-G7_OUTPUT = {"graph": [G7_TEST1_0],
-             "emul": [{A: CST1}],
-             "unresolved": [set()],
-             "has_loop": [False]}
-
 # Test graph 8
 
-G8_TEST1_0 = GraphTest(G8_IRA)
-G8_TEST1_1 = GraphTest(G8_IRA)
-
 G8_TEST1_0_DN1 = DependencyNode(
-    G8_IRB2.label, A, len(G8_IRB2.irs), next(STEP_COUNTER))
-G8_TEST1_0_DN2 = DependencyNode(G8_IRB2.label, B, 0, next(STEP_COUNTER))
-G8_TEST1_0_DN3 = DependencyNode(G8_IRB1.label, C, 0, next(STEP_COUNTER))
-G8_TEST1_0_DN4 = DependencyNode(G8_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-G8_TEST1_1_DN1 = DependencyNode(
-    G8_IRB2.label, A, len(G8_IRB2.irs), next(STEP_COUNTER))
-G8_TEST1_1_DN2 = DependencyNode(G8_IRB2.label, B, 0, next(STEP_COUNTER))
-G8_TEST1_1_DN3 = DependencyNode(G8_IRB1.label, C, 0, next(STEP_COUNTER))
-G8_TEST1_1_DN4 = DependencyNode(G8_IRB1.label, D, 1, next(STEP_COUNTER))
-
-G8_TEST1_1_DN5 = DependencyNode(G8_IRB0.label, D, 0, next(STEP_COUNTER))
-
-
-G8_TEST1_0.add_uniq_edge(G8_TEST1_0_DN4, G8_TEST1_0_DN3)
-G8_TEST1_0.add_uniq_edge(G8_TEST1_0_DN3, G8_TEST1_0_DN2)
-G8_TEST1_0.add_uniq_edge(G8_TEST1_0_DN2, G8_TEST1_0_DN1)
-
-G8_TEST1_1.add_uniq_edge(G8_TEST1_1_DN4, G8_TEST1_1_DN3)
-G8_TEST1_1.add_uniq_edge(G8_TEST1_1_DN3, G8_TEST1_1_DN2)
-G8_TEST1_1.add_uniq_edge(G8_TEST1_1_DN2, G8_TEST1_1_DN1)
+    G8_IRB2.label, A, len(G8_IRB2.irs))
 
 G8_INPUT = (set([G8_TEST1_0_DN1]), set([G3_IRB0.label]))
 
-G8_OUTPUT = {"graph": [G8_TEST1_0, G8_TEST1_1],
-             "emul": [{A: D_INIT}, {A: CST1}],
-             "unresolved": [set([G8_TEST1_1_DN5.nostep_repr]), set()],
-             "has_loop": [True, False]}
-
 # Test 9: Multi elements
 
-G9_TEST1_0 = GraphTest(G8_IRA)
-G9_TEST1_1 = GraphTest(G8_IRA)
-
 G9_TEST1_0_DN1 = DependencyNode(
-    G8_IRB2.label, A, len(G8_IRB2.irs), next(STEP_COUNTER))
-G9_TEST1_0_DN2 = DependencyNode(G8_IRB2.label, B, 0, next(STEP_COUNTER))
-G9_TEST1_0_DN3 = DependencyNode(G8_IRB1.label, C, 0, next(STEP_COUNTER))
-G9_TEST1_0_DN4 = DependencyNode(G8_IRB0.label, CST1, 0, next(STEP_COUNTER))
+    G8_IRB2.label, A, len(G8_IRB2.irs))
 G9_TEST1_0_DN5 = DependencyNode(
-    G8_IRB2.label, C, len(G8_IRB2.irs), next(STEP_COUNTER))
-G9_TEST1_0_DN6 = DependencyNode(G8_IRB1.label, D, 1, next(STEP_COUNTER))
-
-G9_TEST1_1_DN1 = DependencyNode(
-    G8_IRB2.label, A, len(G8_IRB2.irs), next(STEP_COUNTER))
-G9_TEST1_1_DN2 = DependencyNode(G8_IRB2.label, B, 0, next(STEP_COUNTER))
-G9_TEST1_1_DN3 = DependencyNode(G8_IRB1.label, C, 0, next(STEP_COUNTER))
-G9_TEST1_1_DN4 = DependencyNode(G8_IRB1.label, D, 1, next(STEP_COUNTER))
-G9_TEST1_1_DN5 = DependencyNode(
-    G8_IRB2.label, C, len(G8_IRB2.irs), next(STEP_COUNTER))
-G9_TEST1_1_DN6 = DependencyNode(G8_IRB1.label, D, 1, next(STEP_COUNTER))
-
-
-G9_TEST1_0.add_uniq_edge(G9_TEST1_0_DN4, G9_TEST1_0_DN3)
-G9_TEST1_0.add_uniq_edge(G9_TEST1_0_DN3, G9_TEST1_0_DN2)
-G9_TEST1_0.add_uniq_edge(G9_TEST1_0_DN2, G9_TEST1_0_DN1)
-G9_TEST1_0.add_uniq_edge(G9_TEST1_0_DN6, G9_TEST1_0_DN5)
-
-G9_TEST1_1.add_uniq_edge(G9_TEST1_1_DN6, G9_TEST1_1_DN5)
-G9_TEST1_1.add_uniq_edge(G9_TEST1_1_DN4, G9_TEST1_1_DN3)
-G9_TEST1_1.add_uniq_edge(G9_TEST1_1_DN3, G9_TEST1_1_DN2)
-G9_TEST1_1.add_uniq_edge(G9_TEST1_1_DN2, G9_TEST1_1_DN1)
+    G8_IRB2.label, C, len(G8_IRB2.irs))
 
 G9_INPUT = (set([G9_TEST1_0_DN1, G9_TEST1_0_DN5]), set([G8_IRB0.label]))
 
-G9_OUTPUT = {"graph": [G9_TEST1_1, G9_TEST1_0],
-             "emul": [{A: D_INIT, C: D_INIT},
-                      {A: CST1, C: D_INIT}],
-             "unresolved": [set([G8_TEST1_1_DN5.nostep_repr]),
-                            set([G8_TEST1_1_DN5.nostep_repr])],
-             "has_loop": [True, False]}
-
 # Test 10: loop at beginning
 
-G10_TEST1_0 = GraphTest(G10_IRA)
-G10_TEST1_1 = GraphTest(G10_IRA)
-
 G10_TEST1_0_DN1 = DependencyNode(
-    G10_IRB2.label, A, len(G10_IRB2.irs), next(STEP_COUNTER))
-G10_TEST1_0_DN2 = DependencyNode(G10_IRB2.label, B, 0, next(STEP_COUNTER))
-G10_TEST1_0_DN3 = DependencyNode(G10_IRB1.label, B, 0, next(STEP_COUNTER))
-G10_TEST1_0_DN4 = DependencyNode(G10_IRB1.label, CST2, 0, next(STEP_COUNTER))
-
-G10_TEST1_0.add_uniq_edge(G10_TEST1_0_DN3, G10_TEST1_0_DN2)
-G10_TEST1_0.add_uniq_edge(G10_TEST1_0_DN4, G10_TEST1_0_DN2)
-G10_TEST1_0.add_uniq_edge(G10_TEST1_0_DN2, G10_TEST1_0_DN1)
-
-G10_TEST1_1_DN3 = DependencyNode(G10_IRB1.label, B, 0, next(STEP_COUNTER))
-G10_TEST1_1_DN4 = DependencyNode(G10_IRB1.label, CST2, 0, next(STEP_COUNTER))
-
-G10_TEST1_1.add_uniq_edge(G10_TEST1_1_DN3, G10_TEST1_0_DN3)
-G10_TEST1_1.add_uniq_edge(G10_TEST1_1_DN4, G10_TEST1_0_DN3)
-G10_TEST1_1.add_uniq_edge(G10_TEST1_0_DN3, G10_TEST1_0_DN2)
-G10_TEST1_1.add_uniq_edge(G10_TEST1_0_DN4, G10_TEST1_0_DN2)
-G10_TEST1_1.add_uniq_edge(G10_TEST1_0_DN2, G10_TEST1_0_DN1)
+    G10_IRB2.label, A, len(G10_IRB2.irs))
 
 G10_INPUT = (set([G10_TEST1_0_DN1]), set([G10_IRB1.label]))
 
-G10_OUTPUT = {"graph": [G10_TEST1_0, G10_TEST1_1],
-              "emul": [{A: B_INIT + CST24}, {A: B_INIT + CST2}],
-              "unresolved": [set([G10_TEST1_0_DN3.nostep_repr]),
-                             set([G10_TEST1_0_DN3.nostep_repr])],
-              "has_loop": [True, False]}
-
 
 # Test 11: no dual bloc emulation
-G11_TEST1 = GraphTest(G11_IRA)
 
 G11_TEST1_DN1 = DependencyNode(
-    G11_IRB2.label, A, len(G11_IRB2.irs), next(STEP_COUNTER))
-G11_TEST1_DN2 = DependencyNode(G11_IRB2.label, A, 0, next(STEP_COUNTER))
-G11_TEST1_DN3 = DependencyNode(G11_IRB2.label, B, 0, next(STEP_COUNTER))
-G11_TEST1_DN4 = DependencyNode(G11_IRB1.label, A, 0, next(STEP_COUNTER))
-G11_TEST1_DN5 = DependencyNode(G11_IRB1.label, B, 0, next(STEP_COUNTER))
-G11_TEST1_DN6 = DependencyNode(G11_IRB0.label, CST1, 0, next(STEP_COUNTER))
-G11_TEST1_DN7 = DependencyNode(G11_IRB0.label, CST2, 0, next(STEP_COUNTER))
-
-G11_TEST1.add_uniq_edge(G11_TEST1_DN7, G11_TEST1_DN5)
-G11_TEST1.add_uniq_edge(G11_TEST1_DN6, G11_TEST1_DN4)
-G11_TEST1.add_uniq_edge(G11_TEST1_DN5, G11_TEST1_DN2)
-G11_TEST1.add_uniq_edge(G11_TEST1_DN4, G11_TEST1_DN3)
-G11_TEST1.add_uniq_edge(G11_TEST1_DN3, G11_TEST1_DN1)
-G11_TEST1.add_uniq_edge(G11_TEST1_DN2, G11_TEST1_DN1)
+    G11_IRB2.label, A, len(G11_IRB2.irs))
 
 G11_INPUT = (set([G11_TEST1_DN1]), set([G11_IRB0.label]))
 
-G11_OUTPUT = {"graph": [G11_TEST1],
-              "emul": [{A: ExprInt32(0x1)}],
-              "unresolved": [set()],
-              "has_loop": [False]}
 # Test graph 12
 
-G12_TEST1_0 = GraphTest(G12_IRA)
-G12_TEST1_1 = GraphTest(G12_IRA)
-
-G12_TEST1_0_DN1 = DependencyNode(G12_IRB2.label, B, 1, next(STEP_COUNTER))
-G12_TEST1_0_DN2 = DependencyNode(G12_IRB2.label, A, 0, next(STEP_COUNTER))
-G12_TEST1_0_DN3 = DependencyNode(G12_IRB1.label, B, 0, next(STEP_COUNTER))
-G12_TEST1_0_DN4 = DependencyNode(G12_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-
-G12_TEST1_0.add_uniq_edge(G12_TEST1_0_DN2, G12_TEST1_0_DN1)
-G12_TEST1_0.add_uniq_edge(G12_TEST1_0_DN3, G12_TEST1_0_DN2)
-G12_TEST1_0.add_uniq_edge(G12_TEST1_0_DN4, G12_TEST1_0_DN3)
-
-G12_TEST1_1_DN3 = DependencyNode(G12_IRB1.label, B, 1, next(STEP_COUNTER))
-G12_TEST1_1_DN5 = DependencyNode(G12_IRB1.label, CST2, 1, next(STEP_COUNTER))
-
-G12_TEST1_1.add_uniq_edge(G12_TEST1_0_DN4, G12_TEST1_1_DN3)
-G12_TEST1_1.add_uniq_edge(G12_TEST1_1_DN5, G12_TEST1_0_DN3)
-G12_TEST1_1.add_uniq_edge(G12_TEST1_1_DN3, G12_TEST1_0_DN3)
-G12_TEST1_1.add_uniq_edge(G12_TEST1_0_DN3, G12_TEST1_0_DN2)
-G12_TEST1_1.add_uniq_edge(G12_TEST1_0_DN2, G12_TEST1_0_DN1)
-
+G12_TEST1_0_DN1 = DependencyNode(G12_IRB2.label, B, 1)
 
 G12_INPUT = (set([G12_TEST1_0_DN1]), set([]))
 
-G12_OUTPUT = {"graph": [G12_TEST1_0, G12_TEST1_1],
-              "emul": [{B: CST23}, {B: CST1}],
-              "unresolved": [set(), set()],
-              "has_loop": [True, False]}
-
 # Test graph 13:
 
 # All filters
-G13_TEST1_0 = GraphTest(G13_IRA)
-G13_TEST1_1 = GraphTest(G13_IRA)
-
-G13_TEST1_0_DN1 = DependencyNode(G13_IRB0.label, CST1, 0, next(STEP_COUNTER))
-G13_TEST1_0_DN2 = DependencyNode(G13_IRB1.label, A, 0, next(STEP_COUNTER))
-G13_TEST1_0_DN3 = DependencyNode(G13_IRB3.label, C, 0, next(STEP_COUNTER))
-G13_TEST1_0_DN4 = DependencyNode(G13_IRB3.label, R, 1, next(STEP_COUNTER))
 
-G13_TEST1_0.add_uniq_edge(G13_TEST1_0_DN3, G13_TEST1_0_DN4)
-G13_TEST1_0.add_uniq_edge(G13_TEST1_0_DN2, G13_TEST1_0_DN3)
-G13_TEST1_0.add_uniq_edge(G13_TEST1_0_DN1, G13_TEST1_0_DN2)
-
-G13_TEST1_1_DN5 = DependencyNode(G13_IRB2.label, A, 0, next(STEP_COUNTER))
-G13_TEST1_1_DN6 = DependencyNode(G13_IRB2.label, CST3, 0, next(STEP_COUNTER))
-G13_TEST1_1_DN7 = DependencyNode(G13_IRB2.label, B, 1, next(STEP_COUNTER))
-G13_TEST1_1_DN8 = DependencyNode(G13_IRB2.label, CST3, 1, next(STEP_COUNTER))
-
-G13_TEST1_1.add_uniq_edge(G13_TEST1_0_DN3, G13_TEST1_0_DN4)
-G13_TEST1_1.add_uniq_edge(G13_TEST1_0_DN2, G13_TEST1_0_DN3)
-
-G13_TEST1_1.add_uniq_edge(G13_TEST1_1_DN7, G13_TEST1_0_DN2)
-G13_TEST1_1.add_uniq_edge(G13_TEST1_1_DN8, G13_TEST1_0_DN2)
-G13_TEST1_1.add_uniq_edge(G13_TEST1_1_DN5, G13_TEST1_1_DN7)
-G13_TEST1_1.add_uniq_edge(G13_TEST1_1_DN6, G13_TEST1_1_DN7)
-
-G13_TEST1_1.add_uniq_edge(G13_TEST1_0_DN1, G13_TEST1_1_DN5)
-
-# Implicit dependencies
-
-G13_TEST2_0 = GraphTest(G13_IRA)
-G13_TEST2_1 = GraphTest(G13_IRA)
-
-G13_TEST2_0_DN1 = DependencyNode(G13_IRB0.label, CST1, 0, next(STEP_COUNTER))
-G13_TEST2_0_DN2 = DependencyNode(G13_IRB1.label, A, 0, next(STEP_COUNTER))
-G13_TEST2_0_DN3 = DependencyNode(G13_IRB3.label, C, 0, next(STEP_COUNTER))
-G13_TEST2_0_DN4 = DependencyNode(G13_IRB3.label, R, 1, next(STEP_COUNTER))
-G13_TEST2_0_DN5 = DependencyNode(G13_IRB1.label, R, 1, next(STEP_COUNTER))
-
-G13_TEST2_0.add_uniq_edge(G13_TEST2_0_DN3, G13_TEST2_0_DN4)
-G13_TEST2_0.add_uniq_edge(G13_TEST2_0_DN2, G13_TEST2_0_DN3)
-G13_TEST2_0.add_uniq_edge(G13_TEST2_0_DN1, G13_TEST2_0_DN2)
-G13_TEST2_0.add_uniq_edge(G13_TEST2_0_DN5, G13_TEST2_0_DN3)
-
-G13_TEST2_1_DN5 = DependencyNode(G13_IRB2.label, A, 0, next(STEP_COUNTER))
-G13_TEST2_1_DN6 = DependencyNode(G13_IRB2.label, CST3, 0, next(STEP_COUNTER))
-G13_TEST2_1_DN7 = DependencyNode(G13_IRB2.label, B, 1, next(STEP_COUNTER))
-G13_TEST2_1_DN8 = DependencyNode(G13_IRB2.label, CST3, 1, next(STEP_COUNTER))
-G13_TEST2_1_DN9 = DependencyNode(G13_IRB1.label, R, 1, next(STEP_COUNTER))
-
-G13_TEST2_1.add_uniq_edge(G13_TEST2_0_DN3, G13_TEST2_0_DN4)
-G13_TEST2_1.add_uniq_edge(G13_TEST2_0_DN2, G13_TEST2_0_DN3)
-G13_TEST2_1.add_uniq_edge(G13_TEST2_0_DN5, G13_TEST2_0_DN3)
-
-G13_TEST2_1.add_uniq_edge(G13_TEST2_1_DN7, G13_TEST2_0_DN2)
-G13_TEST2_1.add_uniq_edge(G13_TEST2_1_DN8, G13_TEST2_0_DN2)
-G13_TEST2_1.add_uniq_edge(G13_TEST2_1_DN5, G13_TEST2_1_DN7)
-G13_TEST2_1.add_uniq_edge(G13_TEST2_1_DN6, G13_TEST2_1_DN7)
-
-G13_TEST2_1.add_uniq_edge(G13_TEST2_0_DN1, G13_TEST2_1_DN5)
-G13_TEST2_1.add_uniq_edge(G13_TEST2_1_DN9, G13_TEST2_0_DN5)
-G13_TEST2_1.add_uniq_edge(G13_TEST2_1_DN9, G13_TEST2_1_DN5)
-
-
-DN13_UR_R = DependencyNode(G13_IRB0.label, R, 0, 0).nostep_repr
+G13_TEST1_0_DN4 = DependencyNode(G13_IRB3.label, R, 1)
 
 G13_INPUT = (set([G13_TEST1_0_DN4]), set([]))
 
-G13_OUTPUT = {"graph": [G13_TEST1_0, G13_TEST1_1],
-              "graph_implicit": [G13_TEST2_0, G13_TEST2_1],
-              "emul": [{R: CST37}, {R: CST1}],
-              "unresolved": [set(), set()],
-              "unresolved_implicit": [set([DN13_UR_R]), set([DN13_UR_R])],
-              "has_loop": [True, False]}
-
 # Test graph 14
 
 # All filters
-G14_TEST1_0 = GraphTest(G14_IRA)
-G14_TEST1_1 = GraphTest(G14_IRA)
-
-G14_TEST1_0_DN1 = DependencyNode(G14_IRB3.label, R, 1, next(STEP_COUNTER))
-G14_TEST1_0_DN2 = DependencyNode(G14_IRB3.label, D, 0, next(STEP_COUNTER))
-G14_TEST1_0_DN3 = DependencyNode(G14_IRB3.label, B, 0, next(STEP_COUNTER))
-G14_TEST1_0_DN4 = DependencyNode(G14_IRB1.label, A, 0, next(STEP_COUNTER))
-G14_TEST1_0_DN5 = DependencyNode(G14_IRB0.label, CST1, 0, next(STEP_COUNTER))
-
-G14_TEST1_0.add_uniq_edge(G14_TEST1_0_DN2, G14_TEST1_0_DN1)
-G14_TEST1_0.add_uniq_edge(G14_TEST1_0_DN3, G14_TEST1_0_DN1)
-
-G14_TEST1_0.add_uniq_edge(G14_TEST1_0_DN4, G14_TEST1_0_DN3)
-G14_TEST1_0.add_uniq_edge(G14_TEST1_0_DN5, G14_TEST1_0_DN4)
-
-G14_TEST1_1_DN5 = DependencyNode(G14_IRB2.label, D, 1, next(STEP_COUNTER))
-G14_TEST1_1_DN6 = DependencyNode(G14_IRB2.label, CST1, 1, next(STEP_COUNTER))
-G14_TEST1_1_DN7 = DependencyNode(G14_IRB2.label, A, 0, next(STEP_COUNTER))
-#G14_TEST1_1_DN8 = DependencyNode(
-#    G14_IRB2.label, A, 0, next(STEP_COUNTER) + 1)
-#G14_TEST1_1_DN9 = DependencyNode(
-#    G14_IRB0.label, CST1, 0, next(STEP_COUNTER) + 1)
-
-# 1 loop
-G14_TEST1_1.add_uniq_edge(G14_TEST1_0_DN2, G14_TEST1_0_DN1)
-G14_TEST1_1.add_uniq_edge(G14_TEST1_0_DN3, G14_TEST1_0_DN1)
-
-G14_TEST1_1.add_uniq_edge(G14_TEST1_0_DN4, G14_TEST1_0_DN3)
-G14_TEST1_1.add_uniq_edge(G14_TEST1_1_DN5, G14_TEST1_0_DN4)
-G14_TEST1_1.add_uniq_edge(G14_TEST1_1_DN6, G14_TEST1_0_DN4)
-G14_TEST1_1.add_uniq_edge(G14_TEST1_1_DN7, G14_TEST1_1_DN5)
-G14_TEST1_1.add_uniq_edge(G14_TEST1_0_DN5, G14_TEST1_1_DN7)
-
-G14_TEST1_1.add_uniq_edge(G14_TEST1_1_DN7, G14_TEST1_0_DN2)
-# G14_TEST1_1.add_uniq_edge(G14_TEST1_1_DN5, G14_TEST1_1_DN8)
-
-# Implicit dependencies
-G14_TEST2_0 = GraphTest(G14_IRA)
-G14_TEST2_1 = GraphTest(G14_IRA)
-
-G14_TEST2_0_DN6 = DependencyNode(G14_IRB1.label, C, 1, next(STEP_COUNTER))
-
-G14_TEST2_0.add_uniq_edge(G14_TEST1_0_DN2, G14_TEST1_0_DN1)
-G14_TEST2_0.add_uniq_edge(G14_TEST1_0_DN3, G14_TEST1_0_DN1)
-
-G14_TEST2_0.add_uniq_edge(G14_TEST1_0_DN4, G14_TEST1_0_DN3)
-G14_TEST2_0.add_uniq_edge(G14_TEST1_0_DN5, G14_TEST1_0_DN4)
-
-G14_TEST2_0.add_uniq_edge(G14_TEST2_0_DN6, G14_TEST1_0_DN3)
-G14_TEST2_0.add_uniq_edge(G14_TEST2_0_DN6, G14_TEST1_0_DN2)
-
-# 1 loop
-G14_TEST2_0_DN7 = DependencyNode(G14_IRB1.label, C, 1, next(STEP_COUNTER))
-
-G14_TEST2_1.add_uniq_edge(G14_TEST1_0_DN2, G14_TEST1_0_DN1)
-G14_TEST2_1.add_uniq_edge(G14_TEST1_0_DN3, G14_TEST1_0_DN1)
-
-G14_TEST2_1.add_uniq_edge(G14_TEST1_0_DN4, G14_TEST1_0_DN3)
-G14_TEST2_1.add_uniq_edge(G14_TEST1_1_DN5, G14_TEST1_0_DN4)
-G14_TEST2_1.add_uniq_edge(G14_TEST1_1_DN6, G14_TEST1_0_DN4)
-G14_TEST2_1.add_uniq_edge(G14_TEST1_1_DN7, G14_TEST1_1_DN5)
-G14_TEST2_1.add_uniq_edge(G14_TEST1_0_DN5, G14_TEST1_1_DN7)
-
-G14_TEST2_1.add_uniq_edge(G14_TEST1_1_DN7, G14_TEST1_0_DN2)
-
-G14_TEST2_1.add_uniq_edge(G14_TEST2_0_DN6, G14_TEST1_0_DN3)
-G14_TEST2_1.add_uniq_edge(G14_TEST2_0_DN6, G14_TEST1_0_DN2)
 
-DN14_UR_D = DependencyNode(G14_IRB0.label, D, 0, 0).nostep_repr
-DN14_UR_C = DependencyNode(G14_IRB0.label, C, 0, 0).nostep_repr
+G14_TEST1_0_DN1 = DependencyNode(G14_IRB3.label, R, 1)
 
 G14_INPUT = (set([G14_TEST1_0_DN1]), set([]))
 
-G14_OUTPUT = {"graph": [G14_TEST1_0, G14_TEST1_1],
-              "graph_implicit": [G14_TEST2_0, G14_TEST2_1],
-              "emul": [{R: CST33}, {R: D_INIT + CST1}],
-              "unresolved": [set(), set([DN14_UR_D])],
-              "unresolved_implicit": [set([DN14_UR_C]),
-                                      set([DN14_UR_D, DN14_UR_C])],
-              "has_loop": [True, False]}
-
 # Test graph 15
 
-G15_TEST1_0 = GraphTest(G15_IRA)
-G15_TEST1_1 = GraphTest(G15_IRA)
-
-G15_TEST1_0_DN1 = DependencyNode(G15_IRB2.label, R, 1, next(STEP_COUNTER))
-G15_TEST1_0_DN2 = DependencyNode(G15_IRB2.label, B, 0, next(STEP_COUNTER))
-G15_TEST1_0_DN3 = DependencyNode(G15_IRB1.label, C, 2, next(STEP_COUNTER))
-G15_TEST1_0_DN4 = DependencyNode(G15_IRB1.label, D, 1, next(STEP_COUNTER))
-G15_TEST1_0_DN5 = DependencyNode(G15_IRB1.label, B, 0, next(STEP_COUNTER))
-G15_TEST1_0_DN6 = DependencyNode(G15_IRB1.label, A, 0, next(STEP_COUNTER))
-G15_TEST1_0_DN7 = DependencyNode(G15_IRB0.label, CST1, 0, next(STEP_COUNTER))
-G15_TEST1_0_DN8 = DependencyNode(G15_IRB1.label, C, 2, next(STEP_COUNTER))
-G15_TEST1_0_DN9 = DependencyNode(G15_IRB1.label, D, 1, next(STEP_COUNTER))
-G15_TEST1_0_DN10 = DependencyNode(G15_IRB1.label, B, 0, next(STEP_COUNTER))
-
-
-# 1 loop
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN2, G15_TEST1_0_DN1)
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN3, G15_TEST1_0_DN2)
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN4, G15_TEST1_0_DN3)
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN5, G15_TEST1_0_DN4)
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN6, G15_TEST1_0_DN4)
-
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN7, G15_TEST1_0_DN6)
-
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN8, G15_TEST1_0_DN5)
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN9, G15_TEST1_0_DN8)
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN6, G15_TEST1_0_DN9)
-G15_TEST1_0.add_uniq_edge(G15_TEST1_0_DN10, G15_TEST1_0_DN9)
-
-# 0 loop
-
-G15_TEST1_1.add_uniq_edge(G15_TEST1_0_DN2, G15_TEST1_0_DN1)
-G15_TEST1_1.add_uniq_edge(G15_TEST1_0_DN3, G15_TEST1_0_DN2)
-G15_TEST1_1.add_uniq_edge(G15_TEST1_0_DN4, G15_TEST1_0_DN3)
-G15_TEST1_1.add_uniq_edge(G15_TEST1_0_DN5, G15_TEST1_0_DN4)
-G15_TEST1_1.add_uniq_edge(G15_TEST1_0_DN6, G15_TEST1_0_DN4)
-G15_TEST1_1.add_uniq_edge(G15_TEST1_0_DN7, G15_TEST1_0_DN6)
+G15_TEST1_0_DN1 = DependencyNode(G15_IRB2.label, R, 1)
 
 G15_INPUT = (set([G15_TEST1_0_DN1]), set([]))
 
-DN15_UNRESOLVED = DependencyNode(G15_IRB0.label, B, 0, 0).nostep_repr
-G15_OUTPUT = {"graph": [G15_TEST1_0, G15_TEST1_1],
-              "emul": [{R: B_INIT + CST22}, {R: B_INIT + CST1}],
-              "unresolved": [set([DN15_UNRESOLVED]), set([DN15_UNRESOLVED])],
-              "has_loop": [True, False]}
-
 # Test graph 16
-G16_TEST1_0_DN1 = DependencyNode(G16_IRB5.label, R, 1, next(STEP_COUNTER))
-G16_TEST1_0_DN2 = DependencyNode(G16_IRB5.label, A, 0, next(STEP_COUNTER))
-G16_TEST1_0_DN3 = DependencyNode(G16_IRB0.label, CST1, 0, next(STEP_COUNTER))
+G16_TEST1_0_DN1 = DependencyNode(G16_IRB5.label, R, 1)
 
-G16_TEST1_0 = GraphTest(G16_IRA)
+G16_INPUT = (set([G16_TEST1_0_DN1]), set([]))
 
-G16_TEST1_0.add_uniq_edge(G16_TEST1_0_DN3, G16_TEST1_0_DN2)
-G16_TEST1_0.add_uniq_edge(G16_TEST1_0_DN2, G16_TEST1_0_DN1)
+# Test graph 17
 
-G16_INPUT = (set([G16_TEST1_0_DN1]), set([]))
+G17_TEST1_DN1 = DependencyNode(G17_IRB2.label, A, 1)
 
-G16_OUTPUT = {"graph": [G16_TEST1_0],
-              "emul": [{R: CST1}],
-              "unresolved": [set()],
-              "has_loop": [False]}
+G17_INPUT = (set([G17_TEST1_DN1]), set([]))
 
-# Test graph 17
 
-G17_TEST1 = GraphTest(G17_IRA)
+FAILED = set()
 
-G17_TEST1_DN1 = DependencyNode(G17_IRB2.label, A, 1, next(STEP_COUNTER))
-G17_TEST1_DN2 = DependencyNode(G17_IRB2.label, B, 0, next(STEP_COUNTER))
-G17_TEST1_DN3 = DependencyNode(G17_IRB2.label, A, 0, next(STEP_COUNTER))
-G17_TEST1_DN4 = DependencyNode(G17_IRB1.label, D, 0, next(STEP_COUNTER))
-G17_TEST1_DN5 = DependencyNode(G17_IRB0.label, CST2, 0, next(STEP_COUNTER))
 
-G17_TEST1.add_uniq_edge(G17_TEST1_DN2, G17_TEST1_DN1)
-G17_TEST1.add_uniq_edge(G17_TEST1_DN3, G17_TEST1_DN1)
-G17_TEST1.add_uniq_edge(G17_TEST1_DN4, G17_TEST1_DN2)
-G17_TEST1.add_uniq_edge(G17_TEST1_DN4, G17_TEST1_DN3)
-G17_TEST1.add_uniq_edge(G17_TEST1_DN5, G17_TEST1_DN4)
+def flatNode(node):
+    if isinstance(node, DependencyNode):
+        if isinstance(node.element, ExprId):
+            element = node.element.name
+        elif isinstance(node.element, ExprInt):
+            element = int(node.element.arg.arg)
+        else:
+            RuntimeError("Unsupported type '%s'" % type(enode.element))
+        return (node.label.name,
+                element,
+                node.line_nb)
+    else:
+        return str(node)
+
+
+def flatGraph(graph):
+    out_nodes, out_edges = set(), set()
+    for node in graph.nodes():
+        out_nodes.add(flatNode(node))
+    for nodeA, nodeB in graph.edges():
+        out_edges.add((flatNode(nodeA), flatNode(nodeB)))
+    out = (tuple(sorted(list(out_nodes))),
+           tuple(sorted(list(out_edges))))
+    return out
+
+
+def unflatGraph(flat_graph):
+    graph = DiGraph()
+    nodes, edges = flat_graph
+    for node in nodes:
+        graph.add_node(node)
+    for nodeA, nodeB in edges:
+        graph.add_edge(nodeA, nodeB)
+    return graph
+
+
+def get_node_noidx(node):
+    if isinstance(node, tuple):
+        return (node[0], node[1], node[2])
+    else:
+        return node
+
+
+def test_result(graphA, graphB, leaves):
+    """
+    Test graph equality without using node index
+    """
 
-G17_INPUT = (set([G17_TEST1_DN1]), set([]))
+    todo = set((leaf, leaf) for leaf in leaves)
+    done = set()
+    while todo:
+        nodeA, nodeB = todo.pop()
+        if (nodeA, nodeB) in done:
+            continue
+        done.add((nodeA, nodeB))
 
-G17_OUTPUT = {"graph": [G17_TEST1],
-              "emul": [{A: CST0}],
-              "unresolved": [set()],
-              "has_loop": [False]}
+        if get_node_noidx(nodeA) != get_node_noidx(nodeB):
+            return False
+        if nodeA not in graphA.nodes():
+            return False
+        if nodeB not in graphB.nodes():
+            return False
 
+        parentsA = graphA.predecessors(nodeA)
+        parentsB = graphB.predecessors(nodeB)
+        if len(parentsA) != len(parentsB):
+            return False
 
+        parentsA_noidx, parentsB_noidx = {}, {}
+        for parents, parents_noidx in ((parentsA, parentsA_noidx),
+                                       (parentsB, parentsB_noidx)):
+            for node in parents:
+                node_noidx = get_node_noidx(node)
+                assert(node_noidx not in parents_noidx)
+                parents_noidx[node_noidx] = node
 
-FAILED = set()
+        if set(parentsA_noidx.keys()) != set(parentsB_noidx.keys()):
+            return False
 
+        for node_noidx, nodeA in parentsA_noidx.iteritems():
+            nodeB = parentsB_noidx[node_noidx]
+            todo.add((nodeA, nodeB))
 
+    return True
 
+
+def match_results(resultsA, resultsB, nodes):
+    """
+    Match computed list of graph against test cases
+    """
+    out = []
+
+    if len(resultsA) != len(resultsB):
+        return False
+
+    for resultA in resultsA:
+        nodes = resultA.leaves()
+        for resultB in resultsB:
+            if test_result(resultA, resultB, nodes):
+                out.append((resultA, resultB))
+    return len(out) == len(resultsB)
+
+
+def get_flat_init_depnodes(depnodes):
+    out = []
+    for node in depnodes:
+        out.append((node.label.name,
+                    node.element.name,
+                    node.line_nb,
+                    0))
+    return out
+
+# TESTS
+flat_test_results = [[((('lbl0', 1, 0), ('lbl0', 'c', 0), ('lbl1', 'b', 0), ('lbl2', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'c', 0)),
+                        (('lbl0', 'c', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0))))],
+                     [((('lbl0', 1, 0),
+                        ('lbl0', 'c', 0),
+                        ('lbl1', 2, 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl2', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'c', 0)),
+                        (('lbl0', 'c', 0), ('lbl2', 'a', 0)),
+                        (('lbl1', 2, 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0))))],
+                     [((('lbl0', 1, 0),
+                        ('lbl0', 'c', 0),
+                        ('lbl1', 2, 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl3', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'c', 0)),
+                        (('lbl0', 'c', 0), ('lbl3', 'a', 0)),
+                        (('lbl1', 2, 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl3', 'a', 0)))),
+                      ((('lbl0', 1, 0),
+                        ('lbl0', 'c', 0),
+                        ('lbl2', 3, 0),
+                        ('lbl2', 'b', 0),
+                        ('lbl3', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'c', 0)),
+                        (('lbl0', 'c', 0), ('lbl3', 'a', 0)),
+                        (('lbl2', 3, 0), ('lbl2', 'b', 0)),
+                        (('lbl2', 'b', 0), ('lbl3', 'a', 0))))],
+                     [(('b', ('lbl2', 'a', 0)), (('b', ('lbl2', 'a', 0)),))],
+                     [((('lbl0', 1, 0),
+                        ('lbl0', 'b', 0),
+                        ('lbl1', 2, 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl2', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'b', 0)),
+                        (('lbl0', 'b', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 2, 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0)))),
+                      ((('lbl0', 1, 0),
+                        ('lbl0', 'b', 0),
+                        ('lbl1', 2, 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl2', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'b', 0)),
+                        (('lbl0', 'b', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 2, 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0))))],
+                     [((('lbl0', 1, 0), ('lbl0', 'b', 0), ('lbl1', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'b', 0)),
+                        (('lbl0', 'b', 0), ('lbl1', 'a', 0))))],
+                     [((('lbl0', 1, 0),
+                        ('lbl0', 'c', 0),
+                        ('lbl1', 'a', 1),
+                        ('lbl1', 'b', 0),
+                        ('lbl2', 'd', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'c', 0)),
+                        (('lbl0', 'c', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'a', 1), ('lbl2', 'd', 0)),
+                        (('lbl1', 'b', 0), ('lbl1', 'a', 1))))],
+                     [(('d', ('lbl1', 'b', 0), ('lbl1', 'c', 1), ('lbl2', 'a', 0)),
+                       (('d', ('lbl1', 'c', 1)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0)),
+                        (('lbl1', 'c', 1), ('lbl1', 'b', 0)))),
+                      ((('lbl0', 1, 0), ('lbl0', 'c', 0), ('lbl1', 'b', 0), ('lbl2', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'c', 0)),
+                        (('lbl0', 'c', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0))))],
+                     [(('d',
+                        ('lbl0', 1, 0),
+                        ('lbl0', 'c', 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl1', 'c', 1),
+                        ('lbl2', 'a', 0)),
+                       (('d', ('lbl1', 'c', 1)),
+                        (('lbl0', 1, 0), ('lbl0', 'c', 0)),
+                        (('lbl0', 'c', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0)))),
+                      (('d', ('lbl1', 'b', 0), ('lbl1', 'c', 1), ('lbl2', 'a', 0)),
+                       (('d', ('lbl1', 'c', 1)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0)),
+                        (('lbl1', 'c', 1), ('lbl1', 'b', 0))))],
+                     [(('b', ('lbl1', 2, 0), ('lbl1', 'b', 0), ('lbl2', 'a', 0)),
+                       (('b', ('lbl1', 'b', 0)),
+                        (('lbl1', 2, 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0)))),
+                      (('b', ('lbl1', 2, 0), ('lbl1', 'b', 0), ('lbl2', 'a', 0)),
+                       (('b', ('lbl1', 'b', 0)),
+                        (('lbl1', 2, 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0))))],
+                     [((('lbl0', 1, 0),
+                        ('lbl0', 2, 0),
+                        ('lbl0', 'a', 0),
+                        ('lbl0', 'b', 0),
+                        ('lbl1', 'a', 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl2', 'a', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 2, 0), ('lbl0', 'b', 0)),
+                        (('lbl0', 'a', 0), ('lbl1', 'b', 0)),
+                        (('lbl0', 'b', 0), ('lbl1', 'a', 0)),
+                        (('lbl1', 'a', 0), ('lbl2', 'a', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0))))],
+                     [((('lbl0', 1, 0),
+                        ('lbl0', 'b', 0),
+                        ('lbl1', 2, 1),
+                        ('lbl1', 'a', 0),
+                        ('lbl1', 'b', 1),
+                        ('lbl2', 'b', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'b', 0)),
+                        (('lbl0', 'b', 0), ('lbl1', 'b', 1)),
+                        (('lbl1', 2, 1), ('lbl1', 'b', 1)),
+                        (('lbl1', 'a', 0), ('lbl2', 'b', 0)),
+                        (('lbl1', 'b', 1), ('lbl1', 'a', 0)))),
+                      ((('lbl0', 1, 0),
+                        ('lbl0', 'b', 0),
+                        ('lbl1', 2, 1),
+                        ('lbl1', 'a', 0),
+                        ('lbl1', 'b', 1),
+                        ('lbl2', 'b', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'b', 0)),
+                        (('lbl0', 'b', 0), ('lbl1', 'b', 1)),
+                        (('lbl1', 2, 1), ('lbl1', 'b', 1)),
+                        (('lbl1', 'a', 0), ('lbl2', 'b', 0)),
+                        (('lbl1', 'b', 1), ('lbl1', 'a', 0)),
+                        (('lbl1', 'b', 1), ('lbl1', 'b', 1)))),
+                      ((('lbl0', 1, 0), ('lbl0', 'b', 0), ('lbl1', 'a', 0), ('lbl2', 'b', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'b', 0)),
+                        (('lbl0', 'b', 0), ('lbl1', 'a', 0)),
+                        (('lbl1', 'a', 0), ('lbl2', 'b', 0))))],
+                     [((('lbl0', 1, 0),
+                        ('lbl0', 'a', 0),
+                        ('lbl1', 'c', 0),
+                        ('lbl2', 3, 0),
+                        ('lbl2', 3, 1),
+                        ('lbl2', 'a', 1),
+                        ('lbl2', 'b', 0),
+                        ('lbl3', 'r', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl2', 'b', 0)),
+                        (('lbl1', 'c', 0), ('lbl3', 'r', 0)),
+                        (('lbl2', 3, 0), ('lbl2', 'b', 0)),
+                        (('lbl2', 3, 1), ('lbl2', 'a', 1)),
+                        (('lbl2', 'a', 1), ('lbl1', 'c', 0)),
+                        (('lbl2', 'a', 1), ('lbl2', 'b', 0)),
+                        (('lbl2', 'b', 0), ('lbl2', 'a', 1)))),
+                      ((('lbl0', 1, 0),
+                        ('lbl0', 'a', 0),
+                        ('lbl1', 'c', 0),
+                        ('lbl2', 3, 0),
+                        ('lbl2', 3, 1),
+                        ('lbl2', 'a', 1),
+                        ('lbl2', 'b', 0),
+                        ('lbl3', 'r', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl2', 'b', 0)),
+                        (('lbl1', 'c', 0), ('lbl3', 'r', 0)),
+                        (('lbl2', 3, 0), ('lbl2', 'b', 0)),
+                        (('lbl2', 3, 1), ('lbl2', 'a', 1)),
+                        (('lbl2', 'a', 1), ('lbl1', 'c', 0)),
+                        (('lbl2', 'b', 0), ('lbl2', 'a', 1)))),
+                      ((('lbl0', 1, 0), ('lbl0', 'a', 0), ('lbl1', 'c', 0), ('lbl3', 'r', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl1', 'c', 0)),
+                        (('lbl1', 'c', 0), ('lbl3', 'r', 0))))],
+                     [(('d',
+                        ('lbl0', 1, 0),
+                        ('lbl0', 'a', 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl3', 'r', 0)),
+                       (('d', ('lbl3', 'r', 0)),
+                        (('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'b', 0), ('lbl3', 'r', 0)))),
+                      ((('lbl0', 1, 0),
+                        ('lbl0', 'a', 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl2', 1, 1),
+                        ('lbl2', 'a', 1),
+                        ('lbl2', 'd', 0),
+                        ('lbl3', 'r', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl2', 'd', 0)),
+                        (('lbl1', 'b', 0), ('lbl3', 'r', 0)),
+                        (('lbl2', 1, 1), ('lbl2', 'a', 1)),
+                        (('lbl2', 'a', 1), ('lbl1', 'b', 0)),
+                        (('lbl2', 'a', 1), ('lbl2', 'd', 0)),
+                        (('lbl2', 'd', 0), ('lbl2', 'a', 1)),
+                        (('lbl2', 'd', 0), ('lbl3', 'r', 0)))),
+                      ((('lbl0', 1, 0),
+                        ('lbl0', 'a', 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl2', 1, 1),
+                        ('lbl2', 'a', 1),
+                        ('lbl2', 'd', 0),
+                        ('lbl3', 'r', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl2', 'd', 0)),
+                        (('lbl1', 'b', 0), ('lbl3', 'r', 0)),
+                        (('lbl2', 1, 1), ('lbl2', 'a', 1)),
+                        (('lbl2', 'a', 1), ('lbl1', 'b', 0)),
+                        (('lbl2', 'd', 0), ('lbl2', 'a', 1)),
+                        (('lbl2', 'd', 0), ('lbl3', 'r', 0))))],
+                     [(('b',
+                        ('lbl0', 1, 0),
+                        ('lbl0', 'a', 0),
+                        ('lbl1', 'b', 2),
+                        ('lbl1', 'c', 1),
+                        ('lbl1', 'd', 0),
+                        ('lbl2', 'r', 0)),
+                       (('b', ('lbl1', 'd', 0)),
+                        (('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl1', 'd', 0)),
+                        (('lbl1', 'b', 2), ('lbl1', 'd', 0)),
+                        (('lbl1', 'b', 2), ('lbl2', 'r', 0)),
+                        (('lbl1', 'c', 1), ('lbl1', 'b', 2)),
+                        (('lbl1', 'd', 0), ('lbl1', 'c', 1)))),
+                      (('b',
+                        ('lbl0', 1, 0),
+                        ('lbl0', 'a', 0),
+                        ('lbl1', 'b', 2),
+                        ('lbl1', 'c', 1),
+                        ('lbl1', 'd', 0),
+                        ('lbl2', 'r', 0)),
+                       (('b', ('lbl1', 'd', 0)),
+                        (('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl1', 'd', 0)),
+                        (('lbl1', 'b', 2), ('lbl2', 'r', 0)),
+                        (('lbl1', 'c', 1), ('lbl1', 'b', 2)),
+                        (('lbl1', 'd', 0), ('lbl1', 'c', 1))))],
+                     [((('lbl0', 1, 0), ('lbl0', 'a', 0), ('lbl5', 'r', 0)),
+                       ((('lbl0', 1, 0), ('lbl0', 'a', 0)),
+                        (('lbl0', 'a', 0), ('lbl5', 'r', 0))))],
+                     [((('lbl0', 2, 0),
+                        ('lbl0', 'd', 0),
+                        ('lbl1', 'a', 0),
+                        ('lbl1', 'b', 0),
+                        ('lbl2', 'a', 0)),
+                       ((('lbl0', 2, 0), ('lbl0', 'd', 0)),
+                        (('lbl0', 'd', 0), ('lbl1', 'a', 0)),
+                        (('lbl0', 'd', 0), ('lbl1', 'b', 0)),
+                        (('lbl1', 'a', 0), ('lbl2', 'a', 0)),
+                        (('lbl1', 'b', 0), ('lbl2', 'a', 0))))]]
+
+test_results = [[unflatGraph(flat_result) for flat_result in flat_results]
+                for flat_results in flat_test_results]
+
+all_flats = []
 # Launch tests
-for test_nb, test in enumerate([(G1_IRA, G1_INPUT, G1_OUTPUT),
-                                (G2_IRA, G2_INPUT, G2_OUTPUT),
-                                (G3_IRA, G3_INPUT, G3_OUTPUT),
-                                (G4_IRA, G4_INPUT, G4_OUTPUT),
-                                (G5_IRA, G5_INPUT, G5_OUTPUT),
-                                (G6_IRA, G6_INPUT, G6_OUTPUT),
-                                (G7_IRA, G7_INPUT, G7_OUTPUT),
-                                (G8_IRA, G8_INPUT, G8_OUTPUT),
-                                (G8_IRA, G9_INPUT, G9_OUTPUT),
-                                (G10_IRA, G10_INPUT, G10_OUTPUT),
-                                (G11_IRA, G11_INPUT, G11_OUTPUT),
-                                (G12_IRA, G12_INPUT, G12_OUTPUT),
-                                (G13_IRA, G13_INPUT, G13_OUTPUT),
-                                (G14_IRA, G14_INPUT, G14_OUTPUT),
-                                (G15_IRA, G15_INPUT, G15_OUTPUT),
-                                (G16_IRA, G16_INPUT, G16_OUTPUT),
-                                (G17_IRA, G17_INPUT, G17_OUTPUT),
+for test_nb, test in enumerate([(G1_IRA, G1_INPUT),
+                                (G2_IRA, G2_INPUT),
+                                (G3_IRA, G3_INPUT),
+                                (G4_IRA, G4_INPUT),
+                                (G5_IRA, G5_INPUT),
+                                (G6_IRA, G6_INPUT),
+                                (G7_IRA, G7_INPUT),
+                                (G8_IRA, G8_INPUT),
+                                (G8_IRA, G9_INPUT),
+                                (G10_IRA, G10_INPUT),
+                                (G11_IRA, G11_INPUT),
+                                (G12_IRA, G12_INPUT),
+                                (G13_IRA, G13_INPUT),
+                                (G14_IRA, G14_INPUT),
+                                (G15_IRA, G15_INPUT),
+                                (G16_IRA, G16_INPUT),
+                                (G17_IRA, G17_INPUT),
                                 ]):
 
     # Extract test elements
     print "[+] Test", test_nb + 1
-    g_ira, (depnodes, heads), g_test_output = test
+    g_ira, (depnodes, heads) = test
 
     open("graph_%02d.dot" % (test_nb + 1), "w").write(g_ira.graph.dot())
+    open("graph_%02d.dot" % (test_nb + 1), "w").write(bloc2graph(g_ira))
 
     # Different options
     suffix_key_list = ["", "_nosimp", "_nomem", "_nocall",
@@ -1134,125 +1033,41 @@ for test_nb, test in enumerate([(G1_IRA, G1_INPUT, G1_OUTPUT),
                                    DependencyGraph(g_ira, follow_mem=False),
                                    DependencyGraph(g_ira, follow_mem=False,
                                                    follow_call=False),
-                                   DependencyGraph(g_ira, implicit=True),
+                                   # DependencyGraph(g_ira, implicit=True),
                                    ]):
-        if g_ind == 4:
-            # TODO: Implicit specifications
-            continue
+        # if g_ind == 4:
+        # TODO: Implicit specifications
+        #    continue
         print " - Class %s - %s" % (g_dep.__class__.__name__,
                                     suffix_key_list[g_ind])
         # Select the correct result key
         mode_suffix = suffix_key_list[g_ind]
         graph_test_key = "graph" + mode_suffix
-        if not g_test_output.has_key(graph_test_key):
-            graph_test_key = "graph"
-
-        expected_results = g_test_output[graph_test_key]
 
         # Test public APIs
-        for api_i, g_list in enumerate(
-            [g_dep.get_from_depnodes(depnodes, heads),
-             g_dep.get(list(depnodes)[0].label,
-                       [depnode.element for
-                        depnode in depnodes],
-                       list(depnodes)[0].line_nb,
-                       heads)
-             ]):
-            print " - - API %s" % ("get_from_depnodes"
-                                   if api_i == 0 else "get")
-
-            # Expand result iterator
-            g_list = list(g_list)
-
-            # Dump outputs graphs for debug means
-            for result_nb, result_graph in enumerate(g_list):
-                open("graph_test_%02d_%02d.dot" % (test_nb + 1, result_nb),
-                     "w").write(result_graph.graph.dot())
-
-            for result_nb, result_graph in enumerate(expected_results):
-                open("exp_graph_test_%02d_%02d.dot" % (test_nb + 1, result_nb),
-                     "w").write(result_graph.dot())
-
-            try:
-                # The number of results should be the same
-                print " - - - number of results %d/%d" % (len(g_list),
-                                                          len(expected_results))
-
-                error = 'len:' + \
-                        str(len(g_list)) + '/' + str(len(expected_results))
-                assert len(g_list) == len(expected_results)
-
-                # Check that every result appears in expected_results
-                for j, result in enumerate(g_list):
-                    print " - - - result %d" % j
-                    found = False
-                    for expected in expected_results:
-                        if expected.__eq__(result.graph):
-                            found = True
-                error = "found1"
-                assert found
-
-                # Check that every expected result appears in real results
-                for j, expected in enumerate(expected_results):
-                    print " - - - expected %d" % j
-                    found = False
-                    for result in g_list:
-                        if expected.__eq__(result.graph):
-                            found = True
-                error = "found2"
-                assert found
-
-                if not EMULATION:
-                    continue
-                # Test emulation results and other properties
-                unresolved_test_key = "unresolved" + mode_suffix
-                if not g_test_output.has_key(unresolved_test_key):
-                    unresolved_test_key = "unresolved"
-
-                # Check that every computed result was expected
-                for emul_nb, result in enumerate(g_list):
-                    print " - - - - emul %d" % emul_nb
-                    emul_result = result.emul()
-
-                    error = "emul"
-                    found = False
-                    for exp_nb in xrange(len(g_list)):
-                        if (emul_result == g_test_output["emul"][exp_nb] and
-                            getattr(result, "unresolved") ==
-                            g_test_output[unresolved_test_key][exp_nb] and
-                            g_test_output["has_loop"][exp_nb] ==
-                            getattr(result, "has_loop")
-                                ):
-                            found = True
-                            break
-                    assert found
-
-                # Check that every expected result has been computed
-                for exp_nb in xrange(len(g_list)):
-                    print " - - - - emul %d" % exp_nb
-
-                    error = "emul2"
-                    found = False
-                    for emul_nb, result in enumerate(g_list):
-                        emul_result = result.emul()
-                        if (emul_result == g_test_output["emul"][exp_nb] and
-                            getattr(result, "unresolved") ==
-                            g_test_output[unresolved_test_key][exp_nb] and
-                            g_test_output["has_loop"][exp_nb] ==
-                            getattr(result, "has_loop")
-                            ):
-                            found = True
-                            break
-                    assert found
-
-            except AssertionError:
-                FAILED.add((test_nb + 1, error))
-                continue
+        results = g_dep.get_from_depnodes(depnodes, heads)
+        print "RESULTS"
+        all_results = set()
+        all_flat = set()
+        for i, result in enumerate(results):
+            all_flat.add(flatGraph(result.graph))
+            all_results.add(unflatGraph(flatGraph(result.graph)))
+            open("graph_test_%02d_%02d.dot" % (test_nb + 1, i),
+                 "w").write(dg2graph(result.graph))
+        # print all_flat
+        if g_ind == 0:
+            all_flat = sorted(all_flat)
+            all_flats.append(all_flat)
+        flat_depnodes = get_flat_init_depnodes(depnodes)
+        if not match_results(all_results, test_results[test_nb], flat_depnodes):
+            FAILED.add(test_nb)
+            # fds
+        continue
 
 if FAILED:
     print "FAILED :", len(FAILED)
-    for i in sorted(FAILED, key=lambda (u, _): u):
-        print i,
+    for test_num in sorted(FAILED):
+        print test_num,
 else:
     print "SUCCESS"
 
diff --git a/test/analysis/dg_check.py b/test/analysis/dg_check.py
new file mode 100644
index 00000000..0d680b6f
--- /dev/null
+++ b/test/analysis/dg_check.py
@@ -0,0 +1,20 @@
+from pdb import pm
+import sys
+import subprocess
+import json
+
+
+expected_file = sys.argv[1]
+dg = subprocess.Popen(["python"] + sys.argv[2:], stdout=subprocess.PIPE)
+
+stdout, _ = dg.communicate()
+expected = json.load(open(expected_file))
+result = json.loads(stdout)
+
+
+expected.sort()
+result.sort()
+
+print expected
+print result
+assert expected == result
diff --git a/test/analysis/dg_test_00_expected.json b/test/analysis/dg_test_00_expected.json
new file mode 100644
index 00000000..cae8b582
--- /dev/null
+++ b/test/analysis/dg_test_00_expected.json
@@ -0,0 +1 @@
+[{"EAX": "0x1", "has_loop": false}]
diff --git a/test/analysis/dg_test_00_implicit_expected.json b/test/analysis/dg_test_00_implicit_expected.json
new file mode 100644
index 00000000..186704d0
--- /dev/null
+++ b/test/analysis/dg_test_00_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x1", "satisfiability": true, "constraints": {}}]
diff --git a/test/analysis/dg_test_01_expected.json b/test/analysis/dg_test_01_expected.json
new file mode 100644
index 00000000..724d74a3
--- /dev/null
+++ b/test/analysis/dg_test_01_expected.json
@@ -0,0 +1 @@
+[{"EAX": "0x3", "has_loop": false}]
diff --git a/test/analysis/dg_test_01_implicit_expected.json b/test/analysis/dg_test_01_implicit_expected.json
new file mode 100644
index 00000000..2edb5ede
--- /dev/null
+++ b/test/analysis/dg_test_01_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x3", "satisfiability": true, "constraints": {}}]
diff --git a/test/analysis/dg_test_02_expected.json b/test/analysis/dg_test_02_expected.json
new file mode 100644
index 00000000..16d409fb
--- /dev/null
+++ b/test/analysis/dg_test_02_expected.json
@@ -0,0 +1 @@
+[{"EAX": "0x3", "has_loop": false}, {"EAX": "0x4", "has_loop": false}]
diff --git a/test/analysis/dg_test_02_implicit_expected.json b/test/analysis/dg_test_02_implicit_expected.json
new file mode 100644
index 00000000..9394f01d
--- /dev/null
+++ b/test/analysis/dg_test_02_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x4", "satisfiability": true, "constraints": {"zf_init": "0x1"}}, {"has_loop": false, "EAX": "0x3", "satisfiability": true, "constraints": {"zf_init": "0x0"}}]
diff --git a/test/analysis/dg_test_03_expected.json b/test/analysis/dg_test_03_expected.json
new file mode 100644
index 00000000..9d0dfdbd
--- /dev/null
+++ b/test/analysis/dg_test_03_expected.json
@@ -0,0 +1 @@
+[{"EAX": "0x3", "has_loop": false}, {"EAX": "0x5", "has_loop": true}]
diff --git a/test/analysis/dg_test_03_implicit_expected.json b/test/analysis/dg_test_03_implicit_expected.json
new file mode 100644
index 00000000..9f7ddea0
--- /dev/null
+++ b/test/analysis/dg_test_03_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x3", "satisfiability": false, "constraints": {}}, {"has_loop": true, "EAX": "0x5", "satisfiability": false, "constraints": {}}]
diff --git a/test/analysis/dg_test_04_expected.json b/test/analysis/dg_test_04_expected.json
new file mode 100644
index 00000000..fb115835
--- /dev/null
+++ b/test/analysis/dg_test_04_expected.json
@@ -0,0 +1 @@
+[{"EAX": "EBX_init", "has_loop": false}]
diff --git a/test/analysis/dg_test_04_implicit_expected.json b/test/analysis/dg_test_04_implicit_expected.json
new file mode 100644
index 00000000..73e7209e
--- /dev/null
+++ b/test/analysis/dg_test_04_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "EBX_init", "satisfiability": true, "constraints": {}}, {"has_loop": true, "EAX": "EBX_init", "satisfiability": false, "constraints": {}}]
diff --git a/test/analysis/dg_test_05_expected.json b/test/analysis/dg_test_05_expected.json
new file mode 100644
index 00000000..724d74a3
--- /dev/null
+++ b/test/analysis/dg_test_05_expected.json
@@ -0,0 +1 @@
+[{"EAX": "0x3", "has_loop": false}]
diff --git a/test/analysis/dg_test_05_implicit_expected.json b/test/analysis/dg_test_05_implicit_expected.json
new file mode 100644
index 00000000..ef39008f
--- /dev/null
+++ b/test/analysis/dg_test_05_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x3", "satisfiability": true, "constraints": {}}, {"has_loop": true, "EAX": "0x3", "satisfiability": false, "constraints": {}}]
diff --git a/test/analysis/dg_test_06_expected.json b/test/analysis/dg_test_06_expected.json
new file mode 100644
index 00000000..c181cd2d
--- /dev/null
+++ b/test/analysis/dg_test_06_expected.json
@@ -0,0 +1 @@
+[{"EAX": "0x1", "has_loop": false}, {"EAX": "0x2", "has_loop": false}]
diff --git a/test/analysis/dg_test_06_implicit_expected.json b/test/analysis/dg_test_06_implicit_expected.json
new file mode 100644
index 00000000..bda75296
--- /dev/null
+++ b/test/analysis/dg_test_06_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x1", "satisfiability": true, "constraints": {"EAX_init": "0xffffffff"}}, {"has_loop": false, "EAX": "0x2", "satisfiability": false, "constraints": {}}]
diff --git a/test/analysis/dg_test_07_expected.json b/test/analysis/dg_test_07_expected.json
new file mode 100644
index 00000000..0ed0be95
--- /dev/null
+++ b/test/analysis/dg_test_07_expected.json
@@ -0,0 +1 @@
+[{"EAX": "0x1", "ECX": "0x2", "has_loop": false}]
diff --git a/test/analysis/dg_test_07_implicit_expected.json b/test/analysis/dg_test_07_implicit_expected.json
new file mode 100644
index 00000000..4f8c782a
--- /dev/null
+++ b/test/analysis/dg_test_07_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x1", "constraints": {}, "satisfiability": true, "ECX": "0x2"}]
diff --git a/test/analysis/dg_test_08_expected.json b/test/analysis/dg_test_08_expected.json
new file mode 100644
index 00000000..b4c379d1
--- /dev/null
+++ b/test/analysis/dg_test_08_expected.json
@@ -0,0 +1 @@
+[{"ECX": "0x2", "has_loop": false}]
diff --git a/test/analysis/dg_test_08_implicit_expected.json b/test/analysis/dg_test_08_implicit_expected.json
new file mode 100644
index 00000000..a0416a4e
--- /dev/null
+++ b/test/analysis/dg_test_08_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "constraints": {}, "satisfiability": true, "ECX": "0x2"}]
diff --git a/test/analysis/dg_test_09_expected.json b/test/analysis/dg_test_09_expected.json
new file mode 100644
index 00000000..ba0e4673
--- /dev/null
+++ b/test/analysis/dg_test_09_expected.json
@@ -0,0 +1,2 @@
+[{"EAX": "0x1", "EBX": "0x2", "has_loop": false},
+ {"EAX": "0x3", "EBX": "0x4", "has_loop": false}]
diff --git a/test/analysis/dg_test_09_implicit_expected.json b/test/analysis/dg_test_09_implicit_expected.json
new file mode 100644
index 00000000..1ea2976f
--- /dev/null
+++ b/test/analysis/dg_test_09_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x1", "EBX": "0x2", "satisfiability": true, "constraints": {}}, {"has_loop": false, "EAX": "0x3", "EBX": "0x4", "satisfiability": true, "constraints": {}}]
diff --git a/test/analysis/dg_test_10_expected.json b/test/analysis/dg_test_10_expected.json
new file mode 100644
index 00000000..720b85e2
--- /dev/null
+++ b/test/analysis/dg_test_10_expected.json
@@ -0,0 +1 @@
+[{"EAX": "0x1", "EBX": "0x3", "has_loop": false}, {"EAX": "0x1", "EBX": "0x4", "has_loop": false}, {"EAX": "0x2", "EBX": "0x4", "has_loop": false}, {"EAX": "0x2", "EBX": "0x3", "has_loop": false}]
diff --git a/test/analysis/dg_test_10_implicit_expected.json b/test/analysis/dg_test_10_implicit_expected.json
new file mode 100644
index 00000000..05b34918
--- /dev/null
+++ b/test/analysis/dg_test_10_implicit_expected.json
@@ -0,0 +1 @@
+[{"has_loop": false, "EAX": "0x1", "EBX": "0x3", "satisfiability": true, "constraints": {"zf_init": "0x0"}}, {"has_loop": false, "EAX": "0x2", "EBX": "0x3", "satisfiability": false, "constraints": {}}, {"has_loop": false, "EAX": "0x1", "EBX": "0x4", "satisfiability": false, "constraints": {}}, {"has_loop": false, "EAX": "0x2", "EBX": "0x4", "satisfiability": true, "constraints": {"zf_init": "0x1"}}]
diff --git a/test/arch/x86/unit/asm_test.py b/test/arch/x86/unit/asm_test.py
index bf609aa5..118a57b4 100644
--- a/test/arch/x86/unit/asm_test.py
+++ b/test/arch/x86/unit/asm_test.py
@@ -21,22 +21,38 @@ if filename and os.path.isfile(filename):
 reg_and_id = dict(mn_x86.regs.all_regs_ids_byname)
 
 class Asm_Test(object):
+    run_addr = 0x0
+
     def __init__(self):
-        self.myjit = Machine("x86_32").jitter()
+        self.myjit = Machine(self.arch_name).jitter()
         self.myjit.init_stack()
 
         self.myjit.jit.log_regs = False
         self.myjit.jit.log_mn = False
 
+    def test_init(self):
+        pass
+
+    def prepare(self):
+        pass
 
     def __call__(self):
+        self.prepare()
         self.asm()
+        self.init_machine()
+        self.test_init()
         self.run()
         self.check()
 
+    def run(self):
+
+        self.myjit.init_run(self.run_addr)
+        self.myjit.continue_run()
+
+        assert(self.myjit.pc == self.ret_addr)
 
     def asm(self):
-        blocs, symbol_pool = parse_asm.parse_txt(mn_x86, 32, self.TXT,
+        blocs, symbol_pool = parse_asm.parse_txt(mn_x86, self.arch_attrib, self.TXT,
                                                  symbol_pool = self.myjit.ir_arch.symbol_pool)
         # fix shellcode addr
         symbol_pool.set_offset(symbol_pool.getby_name("main"), 0x0)
@@ -48,18 +64,37 @@ class Asm_Test(object):
         s = str(s)
         self.assembly = s
 
-    def run(self):
-        run_addr = 0
-        self.myjit.vm.add_memory_page(run_addr, PAGE_READ | PAGE_WRITE, self.assembly)
+    def check(self):
+        raise NotImplementedError('abstract method')
 
-        self.myjit.push_uint32_t(0x1337beef)
 
-        self.myjit.add_breakpoint(0x1337beef, lambda x:False)
+class Asm_Test_32(Asm_Test):
+    arch_name = "x86_32"
+    arch_attrib = 32
+    ret_addr = 0x1337beef
 
-        self.myjit.init_run(run_addr)
-        self.myjit.continue_run()
+    def init_machine(self):
+        self.myjit.vm.add_memory_page(self.run_addr, PAGE_READ | PAGE_WRITE, self.assembly)
+        self.myjit.push_uint32_t(self.ret_addr)
+        self.myjit.add_breakpoint(self.ret_addr, lambda x:False)
 
-        assert(self.myjit.pc == 0x1337beef)
 
-    def check(self):
-        raise NotImplementedError('abstract method')
+class Asm_Test_16(Asm_Test):
+    arch_name = "x86_16"
+    arch_attrib = 16
+    ret_addr = 0x1337
+
+    def __init__(self):
+        self.myjit = Machine(self.arch_name).jitter()
+        self.myjit.stack_base = 0x1000
+        self.myjit.stack_size = 0x1000
+        self.myjit.init_stack()
+
+        self.myjit.jit.log_regs = False
+        self.myjit.jit.log_mn = False
+
+
+    def init_machine(self):
+        self.myjit.vm.add_memory_page(self.run_addr, PAGE_READ | PAGE_WRITE, self.assembly)
+        self.myjit.push_uint16_t(self.ret_addr)
+        self.myjit.add_breakpoint(self.ret_addr, lambda x:False)
diff --git a/test/arch/x86/unit/mn_daa.py b/test/arch/x86/unit/mn_daa.py
index cb96a22b..7aadf582 100644
--- a/test/arch/x86/unit/mn_daa.py
+++ b/test/arch/x86/unit/mn_daa.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 
 
-class Test_DAA(Asm_Test):
+class Test_DAA(Asm_Test_32):
     TXT = '''
     main:
        MOV     EBP, ESP
diff --git a/test/arch/x86/unit/mn_das.py b/test/arch/x86/unit/mn_das.py
index ba84abdd..0828cafe 100644
--- a/test/arch/x86/unit/mn_das.py
+++ b/test/arch/x86/unit/mn_das.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 
 
-class Test_DAS(Asm_Test):
+class Test_DAS(Asm_Test_32):
     TXT = '''
     main:
        MOV     EBP, ESP
diff --git a/test/arch/x86/unit/mn_float.py b/test/arch/x86/unit/mn_float.py
index 863e86c3..81eb518b 100644
--- a/test/arch/x86/unit/mn_float.py
+++ b/test/arch/x86/unit/mn_float.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 
 
-class Test_FADD(Asm_Test):
+class Test_FADD(Asm_Test_32):
     TXT = '''
     main:
        ; test float
diff --git a/test/arch/x86/unit/mn_int.py b/test/arch/x86/unit/mn_int.py
index 119e5b08..0f4a5717 100644
--- a/test/arch/x86/unit/mn_int.py
+++ b/test/arch/x86/unit/mn_int.py
@@ -1,9 +1,9 @@
 #! /usr/bin/env python
 from miasm2.jitter.csts import EXCEPT_INT_XX
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 
 
-class Test_INT(Asm_Test):
+class Test_INT(Asm_Test_32):
     TXT = '''
     main:
        INT 0x42
diff --git a/test/arch/x86/unit/mn_pcmpeq.py b/test/arch/x86/unit/mn_pcmpeq.py
index a8774cbc..06815e76 100644
--- a/test/arch/x86/unit/mn_pcmpeq.py
+++ b/test/arch/x86/unit/mn_pcmpeq.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PCMPEQB(Asm_Test):
+class Test_PCMPEQB(Asm_Test_32):
     TXT = '''
     main:
        CALL    next
@@ -21,7 +21,7 @@ class Test_PCMPEQB(Asm_Test):
         assert self.myjit.cpu.MM1 == 0xFF00000000FF0000
 
 
-class Test_PCMPEQW(Asm_Test):
+class Test_PCMPEQW(Asm_Test_32):
     TXT = '''
     main:
        CALL    next
@@ -41,7 +41,7 @@ class Test_PCMPEQW(Asm_Test):
 
 
 
-class Test_PCMPEQD(Asm_Test):
+class Test_PCMPEQD(Asm_Test_32):
     TXT = '''
     main:
        CALL    next
diff --git a/test/arch/x86/unit/mn_pextr.py b/test/arch/x86/unit/mn_pextr.py
index eb724cf9..0469eed7 100644
--- a/test/arch/x86/unit/mn_pextr.py
+++ b/test/arch/x86/unit/mn_pextr.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PEXTRB(Asm_Test):
+class Test_PEXTRB(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
diff --git a/test/arch/x86/unit/mn_pinsr.py b/test/arch/x86/unit/mn_pinsr.py
index b7a86d2d..a10cd286 100644
--- a/test/arch/x86/unit/mn_pinsr.py
+++ b/test/arch/x86/unit/mn_pinsr.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PINSRB(Asm_Test):
+class Test_PINSRB(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
diff --git a/test/arch/x86/unit/mn_pmaxu.py b/test/arch/x86/unit/mn_pmaxu.py
index 08e54c03..50cbff94 100644
--- a/test/arch/x86/unit/mn_pmaxu.py
+++ b/test/arch/x86/unit/mn_pmaxu.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PMAXU(Asm_Test):
+class Test_PMAXU(Asm_Test_32):
     TXT = '''
     main:
        CALL   next
diff --git a/test/arch/x86/unit/mn_pminu.py b/test/arch/x86/unit/mn_pminu.py
index 38a29787..27c9ad1e 100644
--- a/test/arch/x86/unit/mn_pminu.py
+++ b/test/arch/x86/unit/mn_pminu.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PMINU(Asm_Test):
+class Test_PMINU(Asm_Test_32):
     TXT = '''
     main:
        CALL   next
diff --git a/test/arch/x86/unit/mn_pmovmskb.py b/test/arch/x86/unit/mn_pmovmskb.py
index 97435794..796e977c 100644
--- a/test/arch/x86/unit/mn_pmovmskb.py
+++ b/test/arch/x86/unit/mn_pmovmskb.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PMOVMSKB(Asm_Test):
+class Test_PMOVMSKB(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
diff --git a/test/arch/x86/unit/mn_pshufb.py b/test/arch/x86/unit/mn_pshufb.py
index 187b2f72..594b0870 100644
--- a/test/arch/x86/unit/mn_pshufb.py
+++ b/test/arch/x86/unit/mn_pshufb.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PSHUFB(Asm_Test):
+class Test_PSHUFB(Asm_Test_32):
     TXT = '''
     main:
        CALL   next
diff --git a/test/arch/x86/unit/mn_psrl_psll.py b/test/arch/x86/unit/mn_psrl_psll.py
index 93a356f7..79125612 100644
--- a/test/arch/x86/unit/mn_psrl_psll.py
+++ b/test/arch/x86/unit/mn_psrl_psll.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PSRL(Asm_Test):
+class Test_PSRL(Asm_Test_32):
     TXT = '''
     main:
        CALL   next
@@ -26,7 +26,7 @@ class Test_PSRL(Asm_Test):
         assert self.myjit.cpu.MM2 == 0x0112233405566778L
         assert self.myjit.cpu.MM3 == 0x0112233445566778L
 
-class Test_PSLL(Asm_Test):
+class Test_PSLL(Asm_Test_32):
     TXT = '''
     main:
        CALL   next
diff --git a/test/arch/x86/unit/mn_punpck.py b/test/arch/x86/unit/mn_punpck.py
index 84d86c32..8b655aa0 100644
--- a/test/arch/x86/unit/mn_punpck.py
+++ b/test/arch/x86/unit/mn_punpck.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 import sys
 
-class Test_PUNPCKHBW(Asm_Test):
+class Test_PUNPCKHBW(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
@@ -21,7 +21,7 @@ class Test_PUNPCKHBW(Asm_Test):
         assert self.myjit.cpu.MM1 == 0xAA11BB22CC33DD44
 
 
-class Test_PUNPCKHWD(Asm_Test):
+class Test_PUNPCKHWD(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
@@ -41,7 +41,7 @@ class Test_PUNPCKHWD(Asm_Test):
 
 
 
-class Test_PUNPCKHDQ(Asm_Test):
+class Test_PUNPCKHDQ(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
@@ -62,7 +62,7 @@ class Test_PUNPCKHDQ(Asm_Test):
 
 
 
-class Test_PUNPCKLBW(Asm_Test):
+class Test_PUNPCKLBW(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
@@ -81,7 +81,7 @@ class Test_PUNPCKLBW(Asm_Test):
         assert self.myjit.cpu.MM1 == 0xEE55FF6602770188
 
 
-class Test_PUNPCKLWD(Asm_Test):
+class Test_PUNPCKLWD(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
@@ -101,7 +101,7 @@ class Test_PUNPCKLWD(Asm_Test):
 
 
 
-class Test_PUNPCKLDQ(Asm_Test):
+class Test_PUNPCKLDQ(Asm_Test_32):
     TXT = '''
     main:
        CALL      next
diff --git a/test/arch/x86/unit/mn_pushpop.py b/test/arch/x86/unit/mn_pushpop.py
new file mode 100644
index 00000000..d230a088
--- /dev/null
+++ b/test/arch/x86/unit/mn_pushpop.py
@@ -0,0 +1,125 @@
+#! /usr/bin/env python
+from asm_test import Asm_Test_16, Asm_Test_32
+from miasm2.core.utils import pck16, pck32
+
+
+def init_regs(test):
+    test.myjit.cpu.EAX = 0x11111111
+    test.myjit.cpu.EBX = 0x22222222
+    test.myjit.cpu.ECX = 0x33333333
+    test.myjit.cpu.EDX = 0x44444444
+    test.myjit.cpu.ESI = 0x55555555
+    test.myjit.cpu.EDI = 0x66666666
+    test.myjit.cpu.EBP = 0x77777777
+    test.stk_origin = test.myjit.cpu.ESP
+
+
+class Test_PUSHAD_32(Asm_Test_32):
+    MYSTRING = "test pushad 32"
+
+    def prepare(self):
+        self.myjit.ir_arch.symbol_pool.add_label("lbl_ret", self.ret_addr)
+
+    def test_init(self):
+        init_regs(self)
+        self.buf = ""
+        for reg_name in reversed(["EAX", "ECX",
+                                  "EDX", "EBX",
+                                  "ESP", "EBP",
+                                  "ESI", "EDI"]):
+            self.buf += pck32(getattr(self.myjit.cpu, reg_name))
+
+    TXT = '''
+    main:
+       PUSHAD
+       JMP lbl_ret
+    '''
+
+    def check(self):
+        buf = self.myjit.vm.get_mem(self.myjit.cpu.ESP, 0x4 * 8)
+        assert(buf == self.buf)
+
+
+class Test_PUSHA_32(Asm_Test_32):
+    MYSTRING = "test pusha 32"
+
+    def prepare(self):
+        self.myjit.ir_arch.symbol_pool.add_label("lbl_ret", self.ret_addr)
+
+    def test_init(self):
+        init_regs(self)
+        self.buf = ""
+        for reg_name in reversed(["AX", "CX",
+                                  "DX", "BX",
+                                  "SP", "BP",
+                                  "SI", "DI"]):
+            self.buf += pck16(getattr(self.myjit.cpu, reg_name))
+
+    TXT = '''
+    main:
+       PUSHA
+       JMP lbl_ret
+    '''
+
+    def check(self):
+        buf = self.myjit.vm.get_mem(self.myjit.cpu.ESP, 0x2 * 8)
+        assert(buf == self.buf)
+
+
+class Test_PUSHA_16(Asm_Test_16):
+    MYSTRING = "test pusha 16"
+
+    def prepare(self):
+        self.myjit.ir_arch.symbol_pool.add_label("lbl_ret", self.ret_addr)
+
+    def test_init(self):
+        init_regs(self)
+        self.buf = ""
+        for reg_name in reversed(["AX", "CX",
+                                  "DX", "BX",
+                                  "SP", "BP",
+                                  "SI", "DI"]):
+            self.buf += pck16(getattr(self.myjit.cpu, reg_name))
+
+    TXT = '''
+    main:
+       PUSHA
+       JMP lbl_ret
+    '''
+
+    def check(self):
+        buf = self.myjit.vm.get_mem(self.myjit.cpu.SP, 0x2 * 8)
+        assert(buf == self.buf)
+
+
+class Test_PUSHAD_16(Asm_Test_16):
+    MYSTRING = "test pushad 16"
+
+    def prepare(self):
+        self.myjit.ir_arch.symbol_pool.add_label("lbl_ret", self.ret_addr)
+
+    def test_init(self):
+        init_regs(self)
+        self.buf = ""
+        for reg_name in reversed(["EAX", "ECX",
+                                  "EDX", "EBX",
+                                  "ESP", "EBP",
+                                  "ESI", "EDI"]):
+            self.buf += pck32(getattr(self.myjit.cpu, reg_name))
+
+    TXT = '''
+    main:
+       PUSHAD
+       JMP lbl_ret
+    '''
+
+    def check(self):
+        buf = self.myjit.vm.get_mem(self.myjit.cpu.SP, 0x4 * 8)
+        assert(buf == self.buf)
+
+
+if __name__ == "__main__":
+    [test()() for test in [Test_PUSHA_16, Test_PUSHA_32,
+                           Test_PUSHAD_16, Test_PUSHAD_32
+                           ]
+     ]
diff --git a/test/arch/x86/unit/mn_stack.py b/test/arch/x86/unit/mn_stack.py
index dd349d54..6ae26d67 100644
--- a/test/arch/x86/unit/mn_stack.py
+++ b/test/arch/x86/unit/mn_stack.py
@@ -1,8 +1,8 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 
 
-class Test_PUSHPOP(Asm_Test):
+class Test_PUSHPOP(Asm_Test_32):
     TXT = '''
     main:
        MOV     EBP, ESP
diff --git a/test/arch/x86/unit/mn_strings.py b/test/arch/x86/unit/mn_strings.py
index db52fa74..f8055665 100644
--- a/test/arch/x86/unit/mn_strings.py
+++ b/test/arch/x86/unit/mn_strings.py
@@ -1,7 +1,7 @@
 #! /usr/bin/env python
-from asm_test import Asm_Test
+from asm_test import Asm_Test_32
 
-class Test_SCAS(Asm_Test):
+class Test_SCAS(Asm_Test_32):
     MYSTRING = "test string"
     TXT = '''
     main:
@@ -22,7 +22,7 @@ class Test_SCAS(Asm_Test):
         assert(self.myjit.cpu.EDI == self.myjit.ir_arch.symbol_pool.getby_name('mystr').offset + len(self.MYSTRING)+1)
 
 
-class Test_MOVS(Asm_Test):
+class Test_MOVS(Asm_Test_32):
     MYSTRING = "test string"
     TXT = '''
     main:
diff --git a/test/core/asmbloc.py b/test/core/asmbloc.py
index 45f7f27f..5fbdca3e 100644
--- a/test/core/asmbloc.py
+++ b/test/core/asmbloc.py
@@ -5,10 +5,9 @@ from miasm2.analysis.binary import Container
 from miasm2.core.asmbloc import AsmCFG, asm_constraint, asm_bloc, \
     asm_label, asm_block_bad, asm_constraint_to, asm_constraint_next, \
     bbl_simplifier
-from miasm2.core.graph import DiGraphSimplifier
+from miasm2.core.graph import DiGraphSimplifier, MatchGraphJoker
 from miasm2.expression.expression import ExprId
 
-
 # Initial data: from 'samples/simple_test.bin'
 data = "5589e583ec10837d08007509c745fc01100000eb73837d08017709c745fc02100000eb64837d08057709c745fc03100000eb55837d080774138b450801c083f80e7509c745fc04100000eb3c8b450801c083f80e7509c745fc05100000eb298b450883e03085c07409c745fc06100000eb16837d08427509c745fc07100000eb07c745fc081000008b45fcc9c3".decode("hex")
 cont = Container.from_string(data)
@@ -280,3 +279,30 @@ assert entry_block in preds
 assert tob in preds
 assert blocks.edges2constraint[(entry_block, newb)] == asm_constraint.c_next
 assert blocks.edges2constraint[(tob, newb)] == asm_constraint.c_to
+
+
+# Check double block split
+data = "74097405b8020000007405b803000000b804000000c3".decode('hex')
+cont = Container.from_string(data)
+mdis = dis_x86_32(cont.bin_stream)
+blocks = mdis.dis_multibloc(0)
+## Check resulting disasm
+assert len(blocks.nodes()) == 6
+blocks.sanity_check()
+## Check graph structure
+bbl0 = MatchGraphJoker(name="0")
+bbl2 = MatchGraphJoker(name="2")
+bbl4 = MatchGraphJoker(name="4")
+bbl9 = MatchGraphJoker(name="9")
+bblB = MatchGraphJoker(name="B")
+bbl10 = MatchGraphJoker(name="10")
+
+matcher = bbl0 >> bbl2 >> bbl4 >> bbl9 >> bblB >> bbl10
+matcher += bbl2 >> bbl9 >> bbl10
+matcher += bbl0 >> bblB
+
+solutions = list(matcher.match(blocks))
+assert len(solutions) == 1
+solution = solutions.pop()
+for jbbl, block in solution.iteritems():
+    assert block.label.offset == int(jbbl._name, 16)
diff --git a/test/expression/expression.py b/test/expression/expression.py
index 1fdc7680..90236744 100644
--- a/test/expression/expression.py
+++ b/test/expression/expression.py
@@ -3,5 +3,45 @@
 #
 from pdb import pm
 from miasm2.expression.expression import *
+from miasm2.expression.expression_helper import *
 
 assert(ExprInt64(-1) != ExprInt64(-2))
+
+# Possible values
+#- Common constants
+A = ExprId("A")
+cond1 = ExprId("cond1", 1)
+cond2 = ExprId("cond2", 16)
+cst1 = ExprInt32(1)
+cst2 = ExprInt32(2)
+cst3 = ExprInt32(3)
+cst4 = ExprInt32(4)
+
+#- Launch tests
+for expr in [
+        cst1,
+        A,
+        ExprMem(cst1, 32),
+        ExprCond(cond1, cst1, cst2),
+        ExprMem(ExprCond(cond1, cst1, cst2), 16),
+        ExprCond(cond1,
+                 ExprCond(cond2, cst3, cst4),
+                 cst2),
+        A + cst1,
+        A + ExprCond(cond1, cst1, cst2),
+        ExprCond(cond1, cst1, cst2) + ExprCond(cond2, cst3, cst4),
+        ExprCompose([(A, 0, 32), (cst1, 32, 64)]),
+        ExprCompose([(ExprCond(cond1, cst1, cst2), 0, 32), (A, 32, 64)]),
+        ExprCompose([(ExprCond(cond1, cst1, cst2), 0, 32),
+                     (ExprCond(cond2, cst3, cst4), 32, 64)]),
+        ExprCond(ExprCond(cond1, cst1, cst2), cst3, cst4),
+]:
+    print "*" * 80
+    print expr
+    sol = possible_values(expr)
+    print sol
+    print "Resulting constraints:"
+    for consval in sol:
+        print "For value %s" % consval.value
+        for constraint in consval.constraints:
+            print "\t%s" % constraint.to_constraint()
diff --git a/test/samples/x86_32/dg_test_00.S b/test/samples/x86_32/dg_test_00.S
new file mode 100644
index 00000000..dc9ef665
--- /dev/null
+++ b/test/samples/x86_32/dg_test_00.S
@@ -0,0 +1,9 @@
+main:
+	MOV ECX, 0x1
+	JMP lbl1
+lbl1:
+	MOV EBX, ECX
+	JMP lbl2
+lbl2:
+	MOV EAX, EBX
+	RET
diff --git a/test/samples/x86_32/dg_test_01.S b/test/samples/x86_32/dg_test_01.S
new file mode 100644
index 00000000..4f4ff80d
--- /dev/null
+++ b/test/samples/x86_32/dg_test_01.S
@@ -0,0 +1,9 @@
+main:
+	MOV ECX, 0x1
+	JMP lbl1
+lbl1:
+	MOV EBX, 0x2
+	JMP lbl2
+lbl2:
+	LEA EAX, DWORD PTR [EBX + ECX]
+	RET
diff --git a/test/samples/x86_32/dg_test_02.S b/test/samples/x86_32/dg_test_02.S
new file mode 100644
index 00000000..164dd90a
--- /dev/null
+++ b/test/samples/x86_32/dg_test_02.S
@@ -0,0 +1,12 @@
+main:
+	MOV ECX, 0x1
+	JZ lbl2
+lbl1:
+	MOV EBX, 0x2
+	JMP end
+lbl2:
+	MOV EBX, 0x3
+	JMP end
+end:
+	LEA EAX, DWORD PTR [EBX + ECX]
+	RET
diff --git a/test/samples/x86_32/dg_test_03.S b/test/samples/x86_32/dg_test_03.S
new file mode 100644
index 00000000..ac0edc56
--- /dev/null
+++ b/test/samples/x86_32/dg_test_03.S
@@ -0,0 +1,10 @@
+main:
+	MOV EBX, 0x1
+	JMP lbl1
+lbl1:
+	ADD EBX, 0x2
+	CMP EBX, 0x0
+	JNZ lbl1
+end:
+	MOV EAX, EBX
+	RET
diff --git a/test/samples/x86_32/dg_test_04.S b/test/samples/x86_32/dg_test_04.S
new file mode 100644
index 00000000..393430ed
--- /dev/null
+++ b/test/samples/x86_32/dg_test_04.S
@@ -0,0 +1,10 @@
+main:
+	MOV ECX, 0x1
+	JMP lbl1
+lbl1:
+	ADD ECX, 0x2
+	CMP ECX, 0x0
+	JZ lbl1
+end:
+	MOV EAX, EBX
+	RET
diff --git a/test/samples/x86_32/dg_test_05.S b/test/samples/x86_32/dg_test_05.S
new file mode 100644
index 00000000..336ee3f5
--- /dev/null
+++ b/test/samples/x86_32/dg_test_05.S
@@ -0,0 +1,11 @@
+main:
+	MOV ECX, 0x1
+	MOV EBX, 0x3
+	JMP lbl1
+lbl1:
+	ADD ECX, 0x2
+	CMP ECX, 0x0
+	JZ lbl1
+end:
+	MOV EAX, EBX
+	RET
diff --git a/test/samples/x86_32/dg_test_06.S b/test/samples/x86_32/dg_test_06.S
new file mode 100644
index 00000000..e12766f2
--- /dev/null
+++ b/test/samples/x86_32/dg_test_06.S
@@ -0,0 +1,12 @@
+main:
+	MOV ECX, 0x1
+	MOV EDX, 0x2
+	JMP lbl1
+lbl1:
+	MOV EBX, ECX
+	MOV ECX, EDX
+	CMP EAX, 0x0
+	JZ lbl1
+end:
+	MOV EAX, EBX
+	RET
diff --git a/test/samples/x86_32/dg_test_07.S b/test/samples/x86_32/dg_test_07.S
new file mode 100644
index 00000000..a23d5b66
--- /dev/null
+++ b/test/samples/x86_32/dg_test_07.S
@@ -0,0 +1,10 @@
+main:
+	MOV ECX, 0x1
+	MOV EBX, 0x2
+	JMP lbl1
+lbl1:
+	XCHG EBX, ECX
+	JMP end
+end:
+	MOV EAX, EBX
+	RET
diff --git a/test/samples/x86_32/dg_test_08.S b/test/samples/x86_32/dg_test_08.S
new file mode 100644
index 00000000..a23d5b66
--- /dev/null
+++ b/test/samples/x86_32/dg_test_08.S
@@ -0,0 +1,10 @@
+main:
+	MOV ECX, 0x1
+	MOV EBX, 0x2
+	JMP lbl1
+lbl1:
+	XCHG EBX, ECX
+	JMP end
+end:
+	MOV EAX, EBX
+	RET
diff --git a/test/samples/x86_32/dg_test_09.S b/test/samples/x86_32/dg_test_09.S
new file mode 100644
index 00000000..d4d053f5
--- /dev/null
+++ b/test/samples/x86_32/dg_test_09.S
@@ -0,0 +1,13 @@
+main:
+	MOV ECX, 0x8
+	JZ lbl2
+lbl1:
+	MOV EAX, 0x1
+	MOV EBX, 0x2
+	JMP end
+lbl2:
+	MOV EAX, 0x3
+	MOV EBX, 0x4
+	JMP end
+end:
+	RET
diff --git a/test/samples/x86_32/dg_test_10.S b/test/samples/x86_32/dg_test_10.S
new file mode 100644
index 00000000..5825da67
--- /dev/null
+++ b/test/samples/x86_32/dg_test_10.S
@@ -0,0 +1,20 @@
+main:
+	MOV ECX, 0x8
+	JZ lbl2
+lbl1:
+	MOV EAX, 0x1
+	JMP end1
+lbl2:
+	MOV EAX, 0x2
+	JMP end1
+end1:
+	JZ lbl4
+lbl3:
+	MOV EBX, 0x3
+	JMP end
+lbl4:
+	MOV EBX, 0x4
+	JMP end
+
+end:
+	RET
diff --git a/test/test_all.py b/test/test_all.py
index adee5f2d..53e8d513 100644
--- a/test/test_all.py
+++ b/test/test_all.py
@@ -5,7 +5,8 @@ import tempfile
 
 from utils.test import Test
 from utils.testset import TestSet
-from utils import cosmetics, monothread, screendisplay
+from utils import cosmetics, multithread
+from multiprocessing import Queue
 
 testset = TestSet("../")
 TAGS = {"regression": "REGRESSION", # Regression tests
@@ -22,11 +23,19 @@ class RegressionTest(Test):
     - @base_dir: test/@base_dir
     - @tags: TAGS["regression"]"""
 
+    sample_dir = os.path.join("..", "samples")
+
     def __init__(self, *args, **kwargs):
         super(RegressionTest, self).__init__(*args, **kwargs)
         self.base_dir = os.path.join("test", self.base_dir)
         self.tags.append(TAGS["regression"])
 
+    @classmethod
+    def get_sample(cls, sample_name):
+        "Return the relative path of @sample_name"
+        return os.path.join(cls.sample_dir, sample_name)
+
+
 ## Architecture
 testset += RegressionTest(["x86/arch.py"], base_dir="arch",
                           products=["x86_speed_reg_test.bin",
@@ -49,6 +58,7 @@ for script in ["x86/sem.py",
                "x86/unit/mn_pinsr.py",
                "x86/unit/mn_pextr.py",
                "x86/unit/mn_pmovmskb.py",
+               "x86/unit/mn_pushpop.py",
                "arm/arch.py",
                "arm/sem.py",
                "aarch64/unit/mn_ubfm.py",
@@ -242,6 +252,56 @@ testset += RegressionTest(["depgraph.py"], base_dir="analysis",
                                                         (14, 1), (15, 1)))
                            for fname in fnames])
 
+## Degraph
+class TestDepgraph(RegressionTest):
+    """Dependency graph test"""
+    example_depgraph = os.path.join("..", "..", "example", "symbol_exec",
+                                    "depgraph.py")
+    launcher = "dg_check.py"
+
+
+    def __init__(self, test_nb, implicit, base_addr, target_addr, elements,
+                 *args, **kwargs):
+        super(TestDepgraph, self).__init__([self.launcher],
+                                           *args, **kwargs)
+        self.base_dir = os.path.join(self.base_dir, "analysis")
+        if implicit:
+            expected_fname = "dg_test_%.2d_implicit_expected.json"
+            self.tags.append(TAGS["z3"])
+        else:
+            expected_fname = "dg_test_%.2d_expected.json"
+
+        self.command_line += [
+            expected_fname % test_nb,
+            self.example_depgraph,
+            "-m", "x86_32",
+            "--json",
+            self.get_sample(os.path.join("x86_32",
+                                         "dg_test_%.2d.bin" % test_nb)),
+            hex(base_addr),
+            hex(target_addr)] + elements
+        if implicit:
+            self.command_line.append("-i")
+
+# Depgraph emulation regression test
+test_args = [(0x401000, 0x40100d, ["EAX"]),
+             (0x401000, 0x401011, ["EAX"]),
+             (0x401000, 0x401018, ["EAX"]),
+             (0x401000, 0x401011, ["EAX"]),
+             (0x401000, 0x401011, ["EAX"]),
+             (0x401000, 0x401016, ["EAX"]),
+             (0x401000, 0x401017, ["EAX"]),
+             (0x401000, 0x401012, ["EAX", "ECX"]),
+             (0x401000, 0x401012, ["ECX"]),
+             (0x401000, 0x40101f, ["EAX", "EBX"]),
+             (0x401000, 0x401025, ["EAX", "EBX"]),
+]
+for i, test_args in enumerate(test_args):
+    test_dg = SemanticTestAsm("x86_32", "PE", ["dg_test_%.2d" % i])
+    testset += test_dg
+    testset += TestDepgraph(i, False, *test_args, depends=[test_dg])
+    testset += TestDepgraph(i, True, *test_args, depends=[test_dg])
+
 ## Jitter
 for script in ["jitload.py",
                ]:
@@ -322,6 +382,7 @@ test_mips32b = ExampleShellcode(["mips32b", "mips32.S", "mips32_sc_b.bin"])
 test_mips32l = ExampleShellcode(["mips32l", "mips32.S", "mips32_sc_l.bin"])
 test_x86_64 = ExampleShellcode(["x86_64", "x86_64.S", "demo_x86_64.bin",
                                 "--PE"])
+test_x86_32_if_reg = ExampleShellcode(['x86_32', 'x86_32_if_reg.S', "x86_32_if_reg.bin"])
 
 testset += test_armb
 testset += test_arml
@@ -335,6 +396,7 @@ testset += test_msp430
 testset += test_mips32b
 testset += test_mips32l
 testset += test_x86_64
+testset += test_x86_32_if_reg
 
 class ExampleDisassembler(Example):
     """Disassembler examples specificities:
@@ -387,6 +449,8 @@ testset += ExampleDisasmFull(["aarch64b", Example.get_sample("demo_aarch64_b.bin
                               "0"], depends=[test_aarch64b])
 testset += ExampleDisasmFull(["x86_32", Example.get_sample("x86_32_simple.bin"),
                               "0x401000"], depends=[test_box["simple"]])
+testset += ExampleDisasmFull(["x86_32", Example.get_sample("x86_32_if_reg.bin"),
+                              "0x0"], depends=[test_x86_32_if_reg])
 testset += ExampleDisasmFull(["msp430", Example.get_sample("msp430_sc.bin"),
                               "0"], depends=[test_msp430])
 testset += ExampleDisasmFull(["mips32l", Example.get_sample("mips32_sc_l.bin"),
@@ -397,6 +461,10 @@ testset += ExampleDisasmFull(["x86_64", Example.get_sample("demo_x86_64.bin"),
                               "0x401000"], depends=[test_x86_64])
 testset += ExampleDisasmFull(["aarch64l", Example.get_sample("md5_aarch64l"),
                               "0x400A00"], depends=[test_aarch64l])
+testset += ExampleDisasmFull(["x86_32", os.path.join("..", "..", "test",
+                                                     "arch", "x86", "qemu",
+                                                     "test-i386"),
+                              "func_iret"])
 
 
 ## Expression
@@ -441,13 +509,24 @@ class ExampleSymbolExec(Example):
 
 testset += ExampleSymbolExec(["single_instr.py"])
 for options, nb_sol, tag in [([], 8, []),
-                             (["-i", "--rename-args"], 10, [TAGS["z3"]])]:
+                             (["-i", "--rename-args"], 12, [TAGS["z3"]])]:
     testset += ExampleSymbolExec(["depgraph.py",
                                   Example.get_sample("simple_test.bin"),
                                   "-m", "x86_32", "0x0", "0x8b",
-                                  "eax"] + options,
+                                  "EAX"] + options,
+                                 products=["sol_%d.dot" % nb
+                                           for nb in xrange(nb_sol)],
+                                 tags=tag)
+
+for options, nb_sol, tag in [([], 4, []),
+                             (["-i", "--rename-args"], 4, [TAGS["z3"]])]:
+    testset += ExampleSymbolExec(["depgraph.py",
+                                  Example.get_sample("x86_32_if_reg.bin"),
+                                  "-m", "x86_32", "0x0", "0x19",
+                                  "EAX"] + options,
                                  products=["sol_%d.dot" % nb
                                            for nb in xrange(nb_sol)],
+                                 depends=[test_x86_32_if_reg],
                                  tags=tag)
 
 ## Jitter
@@ -590,16 +669,15 @@ By default, no tag is omitted." % ", ".join(TAGS.keys()), default="")
             "Z3 and its python binding are necessary for TranslatorZ3."
         if TAGS["z3"] not in exclude_tags:
             exclude_tags.append(TAGS["z3"])
+    test_ko = []
+    test_ok = []
 
     # Set callbacks
     if multiproc is False:
-        testset.set_callback(task_done=monothread.task_done,
-                             task_new=monothread.task_new)
         testset.set_cpu_numbers(1)
-    else:
-        screendisplay.init(testset.cpu_c)
-        testset.set_callback(task_done=screendisplay.task_done,
-                             task_new=screendisplay.task_new)
+    testset.set_callback(task_done=lambda test, error:multithread.task_done(test, error, test_ok, test_ko),
+                         task_new=multithread.task_new)
+
 
     # Filter testset according to tags
     testset.filter_tags(exclude_tags=exclude_tags)
@@ -609,6 +687,13 @@ By default, no tag is omitted." % ", ".join(TAGS.keys()), default="")
 
     # Finalize
     testset.end(clean=not args.do_not_clean)
-
+    print
+    print (cosmetics.colors["green"] +
+           "Result: %d/%d pass" % (len(test_ok), len(test_ok) + len(test_ko)) +
+           cosmetics.colors["end"])
+    for test, error in test_ko:
+        command_line = " ".join(test.command_line)
+        print cosmetics.colors["red"] + 'ERROR', cosmetics.colors["lightcyan"] + command_line + cosmetics.colors["end"]
+        print error
     # Exit with an error if at least a test failed
     exit(testset.tests_passed())
diff --git a/test/utils/monothread.py b/test/utils/monothread.py
deleted file mode 100644
index ae64f3c5..00000000
--- a/test/utils/monothread.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import sys
-import cosmetics
-
-
-def task_done(test, error):
-    s = "[%s] Running tests on %s ..." % (test.base_dir.upper(),
-                                          " ".join(test.command_line))
-    already_printed = len(s)
-    if error is not None:
-        cosmetics.write_colored("ERROR", "red", already_printed)
-        print error
-    else:
-        cosmetics.write_colored("OK", "green", already_printed)
-
-
-def task_new(test):
-    s = "[%s] Running tests on %s ..." % (test.base_dir.upper(),
-                                          " ".join(test.command_line))
-    sys.stdout.write(s)
-    sys.stdout.flush()
diff --git a/test/utils/multithread.py b/test/utils/multithread.py
new file mode 100644
index 00000000..287b5ebd
--- /dev/null
+++ b/test/utils/multithread.py
@@ -0,0 +1,24 @@
+import sys
+import cosmetics
+import time
+
+
+def task_done(test, error, test_ok, test_ko):
+    command_line = " ".join(test.command_line)
+    if error is not None:
+        print cosmetics.colors["red"] + 'ERROR',
+        print cosmetics.colors["lightcyan"] + command_line + cosmetics.colors["end"]
+        print error
+        test_ko.append((test, error))
+    else:
+        print cosmetics.colors["green"] + 'DONE',
+        print cosmetics.colors["lightcyan"] + command_line + cosmetics.colors["end"],
+        print "%ds" % (time.time() - test.start_time)
+        test_ok.append((test, error))
+
+
+def task_new(test):
+    command_line = " ".join(test.command_line)
+    print cosmetics.colors["lightcyan"],
+    print test.base_dir.upper(), command_line,
+    print cosmetics.colors["end"]
diff --git a/test/utils/screendisplay.py b/test/utils/screendisplay.py
deleted file mode 100644
index 7c7bfde1..00000000
--- a/test/utils/screendisplay.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import time
-import signal
-from cosmetics import getTerminalSize, colors
-
-
-global_state = {"termSize": getTerminalSize(),
-                "message": "",
-                "pstate": []}
-
-
-def print_conf(conf, value):
-    "Print a configuration line"
-    return colors["green"] + conf + ": " + colors["end"] + str(value)
-
-
-def clr_screen():
-    "Update the screen to display some information"
-
-    # Header
-    to_print = []
-    to_print.append(" " * (global_state["termSize"][0] / 2 - 10) + colors[
-                    "blue"] + "Miasm2 Regression tests" + colors["end"])
-    to_print.append("")
-    to_print.append("=" * global_state["termSize"][0])
-    to_print.append("")
-    to_print.append(print_conf("Current mode", "Multiprocessing"))
-    to_print.append(print_conf("Nb CPU detected", global_state["cpu_c"]))
-    to_print.append("")
-    to_print.append("=" * global_state["termSize"][0])
-    to_print.append("")
-    test_done = 0
-    test_failed = 0
-    message = global_state["message"] + "\n"
-    for v in global_state["pstate"]:
-        if v["status"] != "running":
-            test_done += 1
-            if v["status"] != 0:
-                test_failed += 1
-                cmd_line = " ".join(v["test"].command_line)
-                message += colors["red"] + "FAIL:" + colors["end"] + cmd_line
-                message += "\n" + v["message"] + "\n"
-
-    to_print.append(print_conf("Success rate", "%d/%d" %
-                    (test_done - test_failed, test_done)))
-    printed_time = time.strftime(
-        "%M:%S", time.gmtime(time.time() - global_state["init_time"]))
-    to_print.append(print_conf("Cumulated time", printed_time))
-    to_print.append("")
-    to_print.append("=" * global_state["termSize"][0])
-
-    cur = "\n".join(to_print)
-    cur += "\n"
-
-    # Message
-    cur += message
-    print cur
-    already_printed = cur.count("\n")
-
-    # Current state
-    current_job = []
-    for process in global_state["pstate"]:
-        if process["status"] == "running":
-            current_job.append(process)
-    print "\n" * (global_state["termSize"][1] - already_printed - 3 - len(current_job))
-
-    for job in current_job:
-        command_line = " ".join(job["test"].command_line)
-        base_dir = job["test"].base_dir.upper()
-        s = "[" + colors["lightcyan"] + command_line + colors["end"]
-        s_end = base_dir
-        cur_time = time.strftime(
-            "%M:%Ss", time.gmtime(time.time() - job["init_time"]))
-        l = len(command_line) + len(s_end) + 4 + len(str(cur_time)) + 2
-        s_end += "    " + colors["blue"] + cur_time + colors["end"] + "]"
-        print "%s%s%s" % (s, " " * (global_state["termSize"][0] - l), s_end)
-
-
-def on_signal(sig1, sig2):
-    "Update view every second"
-    clr_screen()
-    signal.alarm(1)
-
-
-def init(cpu_c):
-    """Initialize global state
-    @cpu_c: number of cpu (for conf displaying)
-    """
-    # Init global_state
-    global_state["cpu_c"] = cpu_c
-    global_state["init_time"] = time.time()
-
-    # Launch view updater
-    signal.signal(signal.SIGALRM, on_signal)
-    signal.alarm(1)
-
-
-def task_done(test, error):
-    "Report a test done"
-    for task in global_state["pstate"]:
-        if task["test"] == test:
-            if error is not None:
-                task["status"] = -1
-                task["message"] = error
-            else:
-                task["status"] = 0
-            break
-    clr_screen()
-
-
-def task_new(test):
-    "Report a new test"
-    global_state["pstate"].append({"status": "running",
-                                   "test": test,
-                                   "init_time": time.time()})
-    clr_screen()
diff --git a/test/utils/testset.py b/test/utils/testset.py
index 54df732c..4336f4fa 100644
--- a/test/utils/testset.py
+++ b/test/utils/testset.py
@@ -1,34 +1,43 @@
 import os
 import subprocess
 import sys
+import time
 from multiprocessing import cpu_count, Queue, Process
+
 from test import Test
 
 
 class Message(object):
+
     "Message exchanged in the TestSet message queue"
     pass
 
 
 class MessageTaskNew(Message):
+
     "Stand for a new task"
+
     def __init__(self, task):
         self.task = task
 
 
 class MessageTaskDone(Message):
+
     "Stand for a task done"
+
     def __init__(self, task, error):
         self.task = task
         self.error = error
 
 
 class MessageClose(Message):
+
     "Close the channel"
     pass
 
 
 class TestSet(object):
+
     "Manage a set of test"
 
     def __init__(self, base_dir):
@@ -39,7 +48,7 @@ class TestSet(object):
         self.base_dir = base_dir
 
         # Init internals
-        self.task_done_cb = lambda tst, err: None # On task done callback
+        self.task_done_cb = lambda tst, err: None  # On task done callback
         self.task_new_cb = lambda tst: None       # On new task callback
         self.todo_queue = Queue()                 # Tasks to do
         self.message_queue = Queue()              # Messages with workers
@@ -136,6 +145,7 @@ class TestSet(object):
             test = todo_queue.get()
             if test is None:
                 break
+            test.start_time = time.time()
             message_queue.put(MessageTaskNew(test))
 
             # Go to the expected directory