summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--arch_init.c177
-rw-r--r--hmp-commands.hx16
-rw-r--r--hmp.c14
-rw-r--r--hmp.h1
-rw-r--r--hw/core/machine.c20
-rw-r--r--hw/i386/pc_piix.c1
-rw-r--r--hw/i386/pc_q35.c1
-rw-r--r--include/hw/boards.h1
-rw-r--r--include/migration/migration.h2
-rw-r--r--include/migration/qemu-file.h2
-rw-r--r--migration/migration.c48
-rw-r--r--migration/qemu-file.c3
-rw-r--r--migration/rdma.c2
-rw-r--r--qapi-schema.json15
-rw-r--r--qemu-options.hx3
-rw-r--r--qmp-commands.hx31
-rw-r--r--savevm.c36
-rw-r--r--target-tricore/cpu.h7
-rw-r--r--target-tricore/helper.h12
-rw-r--r--target-tricore/op_helper.c436
-rw-r--r--target-tricore/translate.c1319
-rw-r--r--target-tricore/tricore-opcodes.h56
-rw-r--r--tcg/optimize.c5
23 files changed, 2096 insertions, 112 deletions
diff --git a/arch_init.c b/arch_init.c
index 691b5e2e91..c3f7d3f56d 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -305,34 +305,50 @@ uint64_t xbzrle_mig_pages_overflow(void)
     return acct_info.xbzrle_overflows;
 }
 
-static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
-                             int cont, int flag)
+/* This is the last block that we have visited serching for dirty pages
+ */
+static RAMBlock *last_seen_block;
+/* This is the last block from where we have sent data */
+static RAMBlock *last_sent_block;
+static ram_addr_t last_offset;
+static unsigned long *migration_bitmap;
+static uint64_t migration_dirty_pages;
+static uint32_t last_version;
+static bool ram_bulk_stage;
+
+/**
+ * save_page_header: Write page header to wire
+ *
+ * If this is the 1st block, it also writes the block identification
+ *
+ * Returns: Number of bytes written
+ *
+ * @f: QEMUFile where to send the data
+ * @block: block that contains the page we want to send
+ * @offset: offset inside the block for the page
+ *          in the lower bits, it contains flags
+ */
+static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
 {
     size_t size;
 
-    qemu_put_be64(f, offset | cont | flag);
+    if (block == last_sent_block) {
+        offset |= RAM_SAVE_FLAG_CONTINUE;
+    }
+
+    qemu_put_be64(f, offset);
     size = 8;
 
-    if (!cont) {
+    if (block != last_sent_block) {
         qemu_put_byte(f, strlen(block->idstr));
         qemu_put_buffer(f, (uint8_t *)block->idstr,
                         strlen(block->idstr));
         size += 1 + strlen(block->idstr);
+        last_sent_block = block;
     }
     return size;
 }
 
-/* This is the last block that we have visited serching for dirty pages
- */
-static RAMBlock *last_seen_block;
-/* This is the last block from where we have sent data */
-static RAMBlock *last_sent_block;
-static ram_addr_t last_offset;
-static unsigned long *migration_bitmap;
-static uint64_t migration_dirty_pages;
-static uint32_t last_version;
-static bool ram_bulk_stage;
-
 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
  * The important thing is that a stale (not-yet-0'd) page be replaced
  * by the new data.
@@ -353,11 +369,27 @@ static void xbzrle_cache_zero_page(ram_addr_t current_addr)
 
 #define ENCODING_FLAG_XBZRLE 0x1
 
+/**
+ * save_xbzrle_page: compress and send current page
+ *
+ * Returns: 1 means that we wrote the page
+ *          0 means that page is identical to the one already sent
+ *          -1 means that xbzrle would be longer than normal
+ *
+ * @f: QEMUFile where to send the data
+ * @current_data:
+ * @current_addr:
+ * @block: block that contains the page we want to send
+ * @offset: offset inside the block for the page
+ * @last_stage: if we are at the completion stage
+ * @bytes_transferred: increase it with the number of transferred bytes
+ */
 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
                             ram_addr_t current_addr, RAMBlock *block,
-                            ram_addr_t offset, int cont, bool last_stage)
+                            ram_addr_t offset, bool last_stage,
+                            uint64_t *bytes_transferred)
 {
-    int encoded_len = 0, bytes_sent = -1;
+    int encoded_len = 0, bytes_xbzrle;
     uint8_t *prev_cached_page;
 
     if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
@@ -404,15 +436,16 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
     }
 
     /* Send XBZRLE based compressed page */
-    bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
+    bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
     qemu_put_be16(f, encoded_len);
     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
-    bytes_sent += encoded_len + 1 + 2;
+    bytes_xbzrle += encoded_len + 1 + 2;
     acct_info.xbzrle_pages++;
-    acct_info.xbzrle_bytes += bytes_sent;
+    acct_info.xbzrle_bytes += bytes_xbzrle;
+    *bytes_transferred += bytes_xbzrle;
 
-    return bytes_sent;
+    return 1;
 }
 
 static inline
@@ -575,55 +608,64 @@ static void migration_bitmap_sync(void)
     }
 }
 
-/*
+/**
  * ram_save_page: Send the given page to the stream
  *
- * Returns: Number of bytes written.
+ * Returns: Number of pages written.
+ *
+ * @f: QEMUFile where to send the data
+ * @block: block that contains the page we want to send
+ * @offset: offset inside the block for the page
+ * @last_stage: if we are at the completion stage
+ * @bytes_transferred: increase it with the number of transferred bytes
  */
 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
-                         bool last_stage)
+                         bool last_stage, uint64_t *bytes_transferred)
 {
-    int bytes_sent;
-    int cont;
+    int pages = -1;
+    uint64_t bytes_xmit;
     ram_addr_t current_addr;
     MemoryRegion *mr = block->mr;
     uint8_t *p;
     int ret;
     bool send_async = true;
 
-    cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
-
     p = memory_region_get_ram_ptr(mr) + offset;
 
     /* In doubt sent page as normal */
-    bytes_sent = -1;
+    bytes_xmit = 0;
     ret = ram_control_save_page(f, block->offset,
-                           offset, TARGET_PAGE_SIZE, &bytes_sent);
+                           offset, TARGET_PAGE_SIZE, &bytes_xmit);
+    if (bytes_xmit) {
+        *bytes_transferred += bytes_xmit;
+        pages = 1;
+    }
 
     XBZRLE_cache_lock();
 
     current_addr = block->offset + offset;
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
-            if (bytes_sent > 0) {
+            if (bytes_xmit > 0) {
                 acct_info.norm_pages++;
-            } else if (bytes_sent == 0) {
+            } else if (bytes_xmit == 0) {
                 acct_info.dup_pages++;
             }
         }
     } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
         acct_info.dup_pages++;
-        bytes_sent = save_block_hdr(f, block, offset, cont,
-                                    RAM_SAVE_FLAG_COMPRESS);
+        *bytes_transferred += save_page_header(f, block,
+                                               offset | RAM_SAVE_FLAG_COMPRESS);
         qemu_put_byte(f, 0);
-        bytes_sent++;
+        *bytes_transferred += 1;
+        pages = 1;
         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
          * page would be stale
          */
         xbzrle_cache_zero_page(current_addr);
     } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
-        bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
-                                      offset, cont, last_stage);
+        pages = save_xbzrle_page(f, &p, current_addr, block,
+                                 offset, last_stage, bytes_transferred);
         if (!last_stage) {
             /* Can't send this cached data async, since the cache page
              * might get updated before it gets to the wire
@@ -633,37 +675,44 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
     }
 
     /* XBZRLE overflow or normal page */
-    if (bytes_sent == -1) {
-        bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
+    if (pages == -1) {
+        *bytes_transferred += save_page_header(f, block,
+                                               offset | RAM_SAVE_FLAG_PAGE);
         if (send_async) {
             qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
         } else {
             qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
         }
-        bytes_sent += TARGET_PAGE_SIZE;
+        *bytes_transferred += TARGET_PAGE_SIZE;
+        pages = 1;
         acct_info.norm_pages++;
     }
 
     XBZRLE_cache_unlock();
 
-    return bytes_sent;
+    return pages;
 }
 
-/*
- * ram_find_and_save_block: Finds a page to send and sends it to f
+/**
+ * ram_find_and_save_block: Finds a dirty page and sends it to f
  *
  * Called within an RCU critical section.
  *
- * Returns:  The number of bytes written.
+ * Returns:  The number of pages written
  *           0 means no dirty pages
+ *
+ * @f: QEMUFile where to send the data
+ * @last_stage: if we are at the completion stage
+ * @bytes_transferred: increase it with the number of transferred bytes
  */
 
-static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
+static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
+                                   uint64_t *bytes_transferred)
 {
     RAMBlock *block = last_seen_block;
     ram_addr_t offset = last_offset;
     bool complete_round = false;
-    int bytes_sent = 0;
+    int pages = 0;
     MemoryRegion *mr;
 
     if (!block)
@@ -685,11 +734,11 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
                 ram_bulk_stage = false;
             }
         } else {
-            bytes_sent = ram_save_page(f, block, offset, last_stage);
+            pages = ram_save_page(f, block, offset, last_stage,
+                                  bytes_transferred);
 
             /* if page is unmodified, continue to the next */
-            if (bytes_sent > 0) {
-                last_sent_block = block;
+            if (pages > 0) {
                 break;
             }
         }
@@ -697,7 +746,8 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
 
     last_seen_block = block;
     last_offset = offset;
-    return bytes_sent;
+
+    return pages;
 }
 
 static uint64_t bytes_transferred;
@@ -881,7 +931,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
     int ret;
     int i;
     int64_t t0;
-    int total_sent = 0;
+    int pages_sent = 0;
 
     rcu_read_lock();
     if (ram_list.version != last_version) {
@@ -896,14 +946,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
     i = 0;
     while ((ret = qemu_file_rate_limit(f)) == 0) {
-        int bytes_sent;
+        int pages;
 
-        bytes_sent = ram_find_and_save_block(f, false);
-        /* no more blocks to sent */
-        if (bytes_sent == 0) {
+        pages = ram_find_and_save_block(f, false, &bytes_transferred);
+        /* no more pages to sent */
+        if (pages == 0) {
             break;
         }
-        total_sent += bytes_sent;
+        pages_sent += pages;
         acct_info.iterations++;
         check_guest_throttling();
         /* we want to check in the 1st loop, just in case it was the 1st time
@@ -929,12 +979,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
      */
     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
 
-    bytes_transferred += total_sent;
-
-    /*
-     * Do not count these 8 bytes into total_sent, so that we can
-     * return 0 if no page had been dirtied.
-     */
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
     bytes_transferred += 8;
 
@@ -943,7 +987,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
         return ret;
     }
 
-    return total_sent;
+    return pages_sent;
 }
 
 /* Called with iothread lock */
@@ -959,14 +1003,13 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
 
     /* flush all remaining blocks regardless of rate limiting */
     while (true) {
-        int bytes_sent;
+        int pages;
 
-        bytes_sent = ram_find_and_save_block(f, true);
+        pages = ram_find_and_save_block(f, true, &bytes_transferred);
         /* no more blocks to sent */
-        if (bytes_sent == 0) {
+        if (pages == 0) {
             break;
         }
-        bytes_transferred += bytes_sent;
     }
 
     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
diff --git a/hmp-commands.hx b/hmp-commands.hx
index d5022d843c..9c1e849859 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -922,6 +922,22 @@ Cancel the current VM migration.
 ETEXI
 
     {
+        .name       = "migrate_incoming",
+        .args_type  = "uri:s",
+        .params     = "uri",
+        .help       = "Continue an incoming migration from an -incoming defer",
+        .mhandler.cmd = hmp_migrate_incoming,
+    },
+
+STEXI
+@item migrate_incoming @var{uri}
+@findex migrate_incoming
+Continue an incoming migration using the @var{uri} (that has the same syntax
+as the -incoming option).
+
+ETEXI
+
+    {
         .name       = "migrate_set_cache_size",
         .args_type  = "value:o",
         .params     = "value",
diff --git a/hmp.c b/hmp.c
index 71c28bc816..f6cde86b93 100644
--- a/hmp.c
+++ b/hmp.c
@@ -1116,6 +1116,20 @@ void hmp_migrate_cancel(Monitor *mon, const QDict *qdict)
     qmp_migrate_cancel(NULL);
 }
 
+void hmp_migrate_incoming(Monitor *mon, const QDict *qdict)
+{
+    Error *err = NULL;
+    const char *uri = qdict_get_str(qdict, "uri");
+
+    qmp_migrate_incoming(uri, &err);
+
+    if (err) {
+        monitor_printf(mon, "%s\n", error_get_pretty(err));
+        error_free(err);
+        return;
+    }
+}
+
 void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict)
 {
     double value = qdict_get_double(qdict, "value");
diff --git a/hmp.h b/hmp.h
index 81177b247d..371f8d45cd 100644
--- a/hmp.h
+++ b/hmp.h
@@ -60,6 +60,7 @@ void hmp_snapshot_delete_blkdev_internal(Monitor *mon, const QDict *qdict);
 void hmp_drive_mirror(Monitor *mon, const QDict *qdict);
 void hmp_drive_backup(Monitor *mon, const QDict *qdict);
 void hmp_migrate_cancel(Monitor *mon, const QDict *qdict);
+void hmp_migrate_incoming(Monitor *mon, const QDict *qdict);
 void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict);
 void hmp_migrate_set_speed(Monitor *mon, const QDict *qdict);
 void hmp_migrate_set_capability(Monitor *mon, const QDict *qdict);
diff --git a/hw/core/machine.c b/hw/core/machine.c
index e3a3e2ab73..cb1185ada4 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -254,6 +254,20 @@ static void machine_set_iommu(Object *obj, bool value, Error **errp)
     ms->iommu = value;
 }
 
+static void machine_set_suppress_vmdesc(Object *obj, bool value, Error **errp)
+{
+    MachineState *ms = MACHINE(obj);
+
+    ms->suppress_vmdesc = value;
+}
+
+static bool machine_get_suppress_vmdesc(Object *obj, Error **errp)
+{
+    MachineState *ms = MACHINE(obj);
+
+    return ms->suppress_vmdesc;
+}
+
 static int error_on_sysbus_device(SysBusDevice *sbdev, void *opaque)
 {
     error_report("Option '-device %s' cannot be handled by this machine",
@@ -377,6 +391,12 @@ static void machine_initfn(Object *obj)
     object_property_set_description(obj, "iommu",
                                     "Set on/off to enable/disable Intel IOMMU (VT-d)",
                                     NULL);
+    object_property_add_bool(obj, "suppress-vmdesc",
+                             machine_get_suppress_vmdesc,
+                             machine_set_suppress_vmdesc, NULL);
+    object_property_set_description(obj, "suppress-vmdesc",
+                                    "Set on to disable self-describing migration",
+                                    NULL);
 
     /* Register notifier when init is done for sysbus sanity checks */
     ms->sysbus_notifier.notify = machine_init_notify;
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 8eab4ba94d..36c69d71ef 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -335,6 +335,7 @@ static void pc_compat_2_2(MachineState *machine)
                                 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_RTM, 0);
     x86_cpu_compat_set_features("Broadwell", FEAT_7_0_EBX,
                                 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_RTM, 0);
+    machine->suppress_vmdesc = true;
 }
 
 static void pc_compat_2_1(MachineState *machine)
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index c0f21fe725..bc40537d55 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -314,6 +314,7 @@ static void pc_compat_2_2(MachineState *machine)
                                 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_RTM, 0);
     x86_cpu_compat_set_features("Broadwell", FEAT_7_0_EBX,
                                 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_RTM, 0);
+    machine->suppress_vmdesc = true;
 }
 
 static void pc_compat_2_1(MachineState *machine)
diff --git a/include/hw/boards.h b/include/hw/boards.h
index f44d6f542a..1feea2b176 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -143,6 +143,7 @@ struct MachineState {
     bool usb;
     char *firmware;
     bool iommu;
+    bool suppress_vmdesc;
 
     ram_addr_t ram_size;
     ram_addr_t maxram_size;
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 703b7d7c4b..5e16af60fd 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -169,6 +169,6 @@ void ram_control_load_hook(QEMUFile *f, uint64_t flags);
 
 size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
                              ram_addr_t offset, size_t size,
-                             int *bytes_sent);
+                             uint64_t *bytes_sent);
 
 #endif
diff --git a/include/migration/qemu-file.h b/include/migration/qemu-file.h
index a923cec2a6..94a8c978a4 100644
--- a/include/migration/qemu-file.h
+++ b/include/migration/qemu-file.h
@@ -82,7 +82,7 @@ typedef size_t (QEMURamSaveFunc)(QEMUFile *f, void *opaque,
                                ram_addr_t block_offset,
                                ram_addr_t offset,
                                size_t size,
-                               int *bytes_sent);
+                               uint64_t *bytes_sent);
 
 /*
  * Stop any read or write (depending on flags) on the underlying
diff --git a/migration/migration.c b/migration/migration.c
index b3adbc653a..2c805f11f5 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -49,6 +49,8 @@ enum {
 static NotifierList migration_state_notifiers =
     NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
 
+static bool deferred_incoming;
+
 /* When we add fault tolerance, we could have several
    migrations at once.  For now we don't need to add
    dynamic creation of migration */
@@ -65,25 +67,40 @@ MigrationState *migrate_get_current(void)
     return &current_migration;
 }
 
+/*
+ * Called on -incoming with a defer: uri.
+ * The migration can be started later after any parameters have been
+ * changed.
+ */
+static void deferred_incoming_migration(Error **errp)
+{
+    if (deferred_incoming) {
+        error_setg(errp, "Incoming migration already deferred");
+    }
+    deferred_incoming = true;
+}
+
 void qemu_start_incoming_migration(const char *uri, Error **errp)
 {
     const char *p;
 
-    if (strstart(uri, "tcp:", &p))
+    if (!strcmp(uri, "defer")) {
+        deferred_incoming_migration(errp);
+    } else if (strstart(uri, "tcp:", &p)) {
         tcp_start_incoming_migration(p, errp);
 #ifdef CONFIG_RDMA
-    else if (strstart(uri, "rdma:", &p))
+    } else if (strstart(uri, "rdma:", &p)) {
         rdma_start_incoming_migration(p, errp);
 #endif
 #if !defined(WIN32)
-    else if (strstart(uri, "exec:", &p))
+    } else if (strstart(uri, "exec:", &p)) {
         exec_start_incoming_migration(p, errp);
-    else if (strstart(uri, "unix:", &p))
+    } else if (strstart(uri, "unix:", &p)) {
         unix_start_incoming_migration(p, errp);
-    else if (strstart(uri, "fd:", &p))
+    } else if (strstart(uri, "fd:", &p)) {
         fd_start_incoming_migration(p, errp);
 #endif
-    else {
+    } else {
         error_setg(errp, "unknown migration protocol: %s", uri);
     }
 }
@@ -415,6 +432,25 @@ void migrate_del_blocker(Error *reason)
     migration_blockers = g_slist_remove(migration_blockers, reason);
 }
 
+void qmp_migrate_incoming(const char *uri, Error **errp)
+{
+    Error *local_err = NULL;
+
+    if (!deferred_incoming) {
+        error_setg(errp, "'-incoming defer' is required for migrate_incoming");
+        return;
+    }
+
+    qemu_start_incoming_migration(uri, &local_err);
+
+    if (local_err) {
+        error_propagate(errp, local_err);
+        return;
+    }
+
+    deferred_incoming = false;
+}
+
 void qmp_migrate(const char *uri, bool has_blk, bool blk,
                  bool has_inc, bool inc, bool has_detach, bool detach,
                  Error **errp)
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index e66e55712f..1a4f9868ed 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -161,7 +161,8 @@ void ram_control_load_hook(QEMUFile *f, uint64_t flags)
 }
 
 size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
-                         ram_addr_t offset, size_t size, int *bytes_sent)
+                             ram_addr_t offset, size_t size,
+                             uint64_t *bytes_sent)
 {
     if (f->ops->save_page) {
         int ret = f->ops->save_page(f, f->opaque, block_offset,
diff --git a/migration/rdma.c b/migration/rdma.c
index 42d443cc64..d1c19ffdac 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -2654,7 +2654,7 @@ static int qemu_rdma_close(void *opaque)
  */
 static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque,
                                   ram_addr_t block_offset, ram_addr_t offset,
-                                  size_t size, int *bytes_sent)
+                                  size_t size, uint64_t *bytes_sent)
 {
     QEMUFileRDMA *rfile = opaque;
     RDMAContext *rdma = rfile->rdma;
diff --git a/qapi-schema.json b/qapi-schema.json
index 8141f71fb3..2b3e275983 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -1738,6 +1738,21 @@
 { 'command': 'migrate',
   'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', '*detach': 'bool' } }
 
+##
+# @migrate-incoming
+#
+# Start an incoming migration, the qemu must have been started
+# with -incoming defer
+#
+# @uri: The Uniform Resource Identifier identifying the source or
+#       address to listen on
+#
+# Returns: nothing on success
+#
+# Since: 2.3
+##
+{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
+
 # @xen-save-devices-state:
 #
 # Save the state of all devices to file. The RAM and the block devices
diff --git a/qemu-options.hx b/qemu-options.hx
index ad07ddecd0..c513352ea8 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -39,7 +39,8 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
     "                mem-merge=on|off controls memory merge support (default: on)\n"
     "                iommu=on|off controls emulated Intel IOMMU (VT-d) support (default=off)\n"
     "                aes-key-wrap=on|off controls support for AES key wrapping (default=on)\n"
-    "                dea-key-wrap=on|off controls support for DEA key wrapping (default=on)\n",
+    "                dea-key-wrap=on|off controls support for DEA key wrapping (default=on)\n"
+    "                suppress-vmdesc=on|off disables self-describing migration (default=off)\n",
     QEMU_ARCH_ALL)
 STEXI
 @item -machine [type=]@var{name}[,prop=@var{value}[,...]]
diff --git a/qmp-commands.hx b/qmp-commands.hx
index c12334a370..06639248f2 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -661,7 +661,36 @@ Example:
 <- { "return": {} }
 
 EQMP
-{
+
+    {
+        .name       = "migrate-incoming",
+        .args_type  = "uri:s",
+        .mhandler.cmd_new = qmp_marshal_input_migrate_incoming,
+    },
+
+SQMP
+migrate-incoming
+----------------
+
+Continue an incoming migration
+
+Arguments:
+
+- "uri": Source/listening URI (json-string)
+
+Example:
+
+-> { "execute": "migrate-incoming", "arguments": { "uri": "tcp::4446" } }
+<- { "return": {} }
+
+Notes:
+
+(1) QEMU must be started with -incoming defer to allow migrate-incoming to
+    be used
+(2) The uri format is the same as to -incoming
+
+EQMP
+    {
         .name       = "migrate-set-cache-size",
         .args_type  = "value:o",
         .mhandler.cmd_new = qmp_marshal_input_migrate_set_cache_size,
diff --git a/savevm.c b/savevm.c
index ce2b6a25a6..e7d97eea93 100644
--- a/savevm.c
+++ b/savevm.c
@@ -710,6 +710,12 @@ int qemu_savevm_state_iterate(QEMUFile *f)
     return ret;
 }
 
+static bool should_send_vmdesc(void)
+{
+    MachineState *machine = MACHINE(qdev_get_machine());
+    return !machine->suppress_vmdesc;
+}
+
 void qemu_savevm_state_complete(QEMUFile *f)
 {
     QJSON *vmdesc;
@@ -782,9 +788,11 @@ void qemu_savevm_state_complete(QEMUFile *f)
     qjson_finish(vmdesc);
     vmdesc_len = strlen(qjson_get_str(vmdesc));
 
-    qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
-    qemu_put_be32(f, vmdesc_len);
-    qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len);
+    if (should_send_vmdesc()) {
+        qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
+        qemu_put_be32(f, vmdesc_len);
+        qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len);
+    }
     object_unref(OBJECT(vmdesc));
 
     qemu_fflush(f);
@@ -930,6 +938,7 @@ int qemu_loadvm_state(QEMUFile *f)
     uint8_t section_type;
     unsigned int v;
     int ret;
+    int file_error_after_eof = -1;
 
     if (qemu_savevm_state_blocked(&local_err)) {
         error_report("%s", error_get_pretty(local_err));
@@ -1035,6 +1044,24 @@ int qemu_loadvm_state(QEMUFile *f)
         }
     }
 
+    file_error_after_eof = qemu_file_get_error(f);
+
+    /*
+     * Try to read in the VMDESC section as well, so that dumping tools that
+     * intercept our migration stream have the chance to see it.
+     */
+    if (qemu_get_byte(f) == QEMU_VM_VMDESCRIPTION) {
+        uint32_t size = qemu_get_be32(f);
+        uint8_t *buf = g_malloc(0x1000);
+
+        while (size > 0) {
+            uint32_t read_chunk = MIN(size, 0x1000);
+            qemu_get_buffer(f, buf, read_chunk);
+            size -= read_chunk;
+        }
+        g_free(buf);
+    }
+
     cpu_synchronize_all_post_init();
 
     ret = 0;
@@ -1046,7 +1073,8 @@ out:
     }
 
     if (ret == 0) {
-        ret = qemu_file_get_error(f);
+        /* We may not have a VMDESC section, so ignore relative errors */
+        ret = file_error_after_eof;
     }
 
     return ret;
diff --git a/target-tricore/cpu.h b/target-tricore/cpu.h
index b473426ce0..90bf0069b5 100644
--- a/target-tricore/cpu.h
+++ b/target-tricore/cpu.h
@@ -238,6 +238,13 @@ struct CPUTriCoreState {
 #define MASK_LCX_LCXS 0x000f0000
 #define MASK_LCX_LCX0 0x0000ffff
 
+#define MASK_DBGSR_DE 0x1
+#define MASK_DBGSR_HALT 0x6
+#define MASK_DBGSR_SUSP 0x10
+#define MASK_DBGSR_PREVSUSP 0x20
+#define MASK_DBGSR_PEVT 0x40
+#define MASK_DBGSR_EVTSRC 0x1f00
+
 #define TRICORE_HFLAG_KUU     0x3
 #define TRICORE_HFLAG_UM0     0x00002 /* user mode-0 flag          */
 #define TRICORE_HFLAG_UM1     0x00001 /* user mode-1 flag          */
diff --git a/target-tricore/helper.h b/target-tricore/helper.h
index 4c823460e1..1a49b00ccb 100644
--- a/target-tricore/helper.h
+++ b/target-tricore/helper.h
@@ -24,9 +24,12 @@ DEF_HELPER_3(add_h_suov, i32, env, i32, i32)
 DEF_HELPER_4(addr_h_ssov, i32, env, i64, i32, i32)
 DEF_HELPER_4(addsur_h_ssov, i32, env, i64, i32, i32)
 DEF_HELPER_3(sub_ssov, i32, env, i32, i32)
+DEF_HELPER_3(sub64_ssov, i64, env, i64, i64)
 DEF_HELPER_3(sub_suov, i32, env, i32, i32)
 DEF_HELPER_3(sub_h_ssov, i32, env, i32, i32)
 DEF_HELPER_3(sub_h_suov, i32, env, i32, i32)
+DEF_HELPER_4(subr_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_4(subadr_h_ssov, i32, env, i64, i32, i32)
 DEF_HELPER_3(mul_ssov, i32, env, i32, i32)
 DEF_HELPER_3(mul_suov, i32, env, i32, i32)
 DEF_HELPER_3(sha_ssov, i32, env, i32, i32)
@@ -41,6 +44,9 @@ DEF_HELPER_4(madd64_suov, i64, env, i32, i64, i32)
 DEF_HELPER_4(msub32_ssov, i32, env, i32, i32, i32)
 DEF_HELPER_4(msub32_suov, i32, env, i32, i32, i32)
 DEF_HELPER_4(msub64_ssov, i64, env, i32, i64, i32)
+DEF_HELPER_5(msub64_q_ssov, i64, env, i64, i32, i32, i32)
+DEF_HELPER_3(msub32_q_sub_ssov, i32, env, i64, i64)
+DEF_HELPER_5(msubr_q_ssov, i32, env, i32, i32, i32, i32)
 DEF_HELPER_4(msub64_suov, i64, env, i32, i64, i32)
 DEF_HELPER_3(absdif_h_ssov, i32, env, i32, i32)
 DEF_HELPER_2(abs_ssov, i32, env, i32)
@@ -57,6 +63,9 @@ DEF_HELPER_3(add_b, i32, env, i32, i32)
 DEF_HELPER_3(add_h, i32, env, i32, i32)
 DEF_HELPER_3(sub_b, i32, env, i32, i32)
 DEF_HELPER_3(sub_h, i32, env, i32, i32)
+DEF_HELPER_4(subr_h, i32, env, i64, i32, i32)
+DEF_HELPER_4(subadr_h, i32, env, i64, i32, i32)
+DEF_HELPER_5(msubr_q, i32, env, i32, i32, i32, i32)
 DEF_HELPER_FLAGS_2(eq_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
 DEF_HELPER_FLAGS_2(eq_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
 DEF_HELPER_FLAGS_2(eqany_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
@@ -113,10 +122,13 @@ DEF_HELPER_2(call, void, env, i32)
 DEF_HELPER_1(ret, void, env)
 DEF_HELPER_2(bisr, void, env, i32)
 DEF_HELPER_1(rfe, void, env)
+DEF_HELPER_1(rfm, void, env)
 DEF_HELPER_2(ldlcx, void, env, i32)
 DEF_HELPER_2(lducx, void, env, i32)
 DEF_HELPER_2(stlcx, void, env, i32)
 DEF_HELPER_2(stucx, void, env, i32)
+DEF_HELPER_1(svlcx, void, env)
+DEF_HELPER_1(rslcx, void, env)
 /* Address mode helper */
 DEF_HELPER_1(br_update, i32, i32)
 DEF_HELPER_2(circ_update, i32, i32, i32)
diff --git a/target-tricore/op_helper.c b/target-tricore/op_helper.c
index 40d32af5d3..97b0c8b8d3 100644
--- a/target-tricore/op_helper.c
+++ b/target-tricore/op_helper.c
@@ -340,6 +340,31 @@ target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1,
     return ssov32(env, result);
 }
 
+uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+    uint64_t result;
+    int64_t ovf;
+
+    result = r1 - r2;
+    ovf = (result ^ r1) & (r1 ^ r2);
+    env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+    if (ovf < 0) {
+        env->PSW_USB_V = (1 << 31);
+        env->PSW_USB_SV = (1 << 31);
+        /* ext_ret > MAX_INT */
+        if ((int64_t)r1 >= 0) {
+            result = INT64_MAX;
+        /* ext_ret < MIN_INT */
+        } else {
+            result = INT64_MIN;
+        }
+    } else {
+        env->PSW_USB_V = 0;
+    }
+    return result;
+}
+
 target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1,
                              target_ulong r2)
 {
@@ -350,6 +375,98 @@ target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1,
     return ssov16(env, ret_hw0, ret_hw1);
 }
 
+uint32_t helper_subr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+                            uint32_t r2_h)
+{
+    int64_t mul_res0 = sextract64(r1, 0, 32);
+    int64_t mul_res1 = sextract64(r1, 32, 32);
+    int64_t r2_low = sextract64(r2_l, 0, 32);
+    int64_t r2_high = sextract64(r2_h, 0, 32);
+    int64_t result0, result1;
+    uint32_t ovf0, ovf1;
+    uint32_t avf0, avf1;
+
+    ovf0 = ovf1 = 0;
+
+    result0 = r2_low - mul_res0 + 0x8000;
+    result1 = r2_high - mul_res1 + 0x8000;
+
+    avf0 = result0 * 2u;
+    avf0 = result0 ^ avf0;
+    avf1 = result1 * 2u;
+    avf1 = result1 ^ avf1;
+
+    if (result0 > INT32_MAX) {
+        ovf0 = (1 << 31);
+        result0 = INT32_MAX;
+    } else if (result0 < INT32_MIN) {
+        ovf0 = (1 << 31);
+        result0 = INT32_MIN;
+    }
+
+    if (result1 > INT32_MAX) {
+        ovf1 = (1 << 31);
+        result1 = INT32_MAX;
+    } else if (result1 < INT32_MIN) {
+        ovf1 = (1 << 31);
+        result1 = INT32_MIN;
+    }
+
+    env->PSW_USB_V = ovf0 | ovf1;
+    env->PSW_USB_SV |= env->PSW_USB_V;
+
+    env->PSW_USB_AV = avf0 | avf1;
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+    return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+                              uint32_t r2_h)
+{
+    int64_t mul_res0 = sextract64(r1, 0, 32);
+    int64_t mul_res1 = sextract64(r1, 32, 32);
+    int64_t r2_low = sextract64(r2_l, 0, 32);
+    int64_t r2_high = sextract64(r2_h, 0, 32);
+    int64_t result0, result1;
+    uint32_t ovf0, ovf1;
+    uint32_t avf0, avf1;
+
+    ovf0 = ovf1 = 0;
+
+    result0 = r2_low + mul_res0 + 0x8000;
+    result1 = r2_high - mul_res1 + 0x8000;
+
+    avf0 = result0 * 2u;
+    avf0 = result0 ^ avf0;
+    avf1 = result1 * 2u;
+    avf1 = result1 ^ avf1;
+
+    if (result0 > INT32_MAX) {
+        ovf0 = (1 << 31);
+        result0 = INT32_MAX;
+    } else if (result0 < INT32_MIN) {
+        ovf0 = (1 << 31);
+        result0 = INT32_MIN;
+    }
+
+    if (result1 > INT32_MAX) {
+        ovf1 = (1 << 31);
+        result1 = INT32_MAX;
+    } else if (result1 < INT32_MIN) {
+        ovf1 = (1 << 31);
+        result1 = INT32_MIN;
+    }
+
+    env->PSW_USB_V = ovf0 | ovf1;
+    env->PSW_USB_SV |= env->PSW_USB_V;
+
+    env->PSW_USB_AV = avf0 | avf1;
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+    return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
 target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1,
                              target_ulong r2)
 {
@@ -767,6 +884,132 @@ uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1,
     return ret;
 }
 
+uint32_t
+helper_msub32_q_sub_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+    int64_t result;
+    int64_t t1 = (int64_t)r1;
+    int64_t t2 = (int64_t)r2;
+
+    result = t1 - t2;
+
+    env->PSW_USB_AV = (result ^ result * 2u);
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+    /* we do the saturation by hand, since we produce an overflow on the host
+       if the mul before was (0x80000000 * 0x80000000) << 1). If this is the
+       case, we flip the saturated value. */
+    if (r2 == 0x8000000000000000LL) {
+        if (result > 0x7fffffffLL) {
+            env->PSW_USB_V = (1 << 31);
+            env->PSW_USB_SV = (1 << 31);
+            result = INT32_MIN;
+        } else if (result < -0x80000000LL) {
+            env->PSW_USB_V = (1 << 31);
+            env->PSW_USB_SV = (1 << 31);
+            result = INT32_MAX;
+        } else {
+            env->PSW_USB_V = 0;
+        }
+    } else {
+        if (result > 0x7fffffffLL) {
+            env->PSW_USB_V = (1 << 31);
+            env->PSW_USB_SV = (1 << 31);
+            result = INT32_MAX;
+        } else if (result < -0x80000000LL) {
+            env->PSW_USB_V = (1 << 31);
+            env->PSW_USB_SV = (1 << 31);
+            result = INT32_MIN;
+        } else {
+            env->PSW_USB_V = 0;
+        }
+    }
+    return (uint32_t)result;
+}
+
+uint64_t helper_msub64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2,
+                              uint32_t r3, uint32_t n)
+{
+    int64_t t1 = (int64_t)r1;
+    int64_t t2 = sextract64(r2, 0, 32);
+    int64_t t3 = sextract64(r3, 0, 32);
+    int64_t result, mul;
+    int64_t ovf;
+
+    mul = (t2 * t3) << n;
+    result = t1 - mul;
+
+    env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+    ovf = (result ^ t1) & (t1 ^ mul);
+    /* we do the saturation by hand, since we produce an overflow on the host
+       if the mul before was (0x80000000 * 0x80000000) << 1). If this is the
+       case, we flip the saturated value. */
+    if (mul == 0x8000000000000000LL) {
+        if (ovf >= 0) {
+            env->PSW_USB_V = (1 << 31);
+            env->PSW_USB_SV = (1 << 31);
+            /* ext_ret > MAX_INT */
+            if (mul >= 0) {
+                result = INT64_MAX;
+            /* ext_ret < MIN_INT */
+            } else {
+               result = INT64_MIN;
+            }
+        }
+    } else {
+        if (ovf < 0) {
+            env->PSW_USB_V = (1 << 31);
+            env->PSW_USB_SV = (1 << 31);
+            /* ext_ret > MAX_INT */
+            if (mul < 0) {
+                result = INT64_MAX;
+            /* ext_ret < MIN_INT */
+            } else {
+               result = INT64_MIN;
+            }
+        } else {
+            env->PSW_USB_V = 0;
+        }
+    }
+
+    return (uint64_t)result;
+}
+
+uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+                             uint32_t r3, uint32_t n)
+{
+    int64_t t1 = sextract64(r1, 0, 32);
+    int64_t t2 = sextract64(r2, 0, 32);
+    int64_t t3 = sextract64(r3, 0, 32);
+    int64_t mul, ret;
+
+    if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+        mul = 0x7fffffff;
+    } else {
+        mul = (t2 * t3) << n;
+    }
+
+    ret = t1 - mul + 0x8000;
+
+    env->PSW_USB_AV = ret ^ ret * 2u;
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+    if (ret > 0x7fffffffll) {
+        env->PSW_USB_V = (1 << 31);
+        env->PSW_USB_SV |= env->PSW_USB_V;
+        ret = INT32_MAX;
+    } else if (ret < -0x80000000ll) {
+        env->PSW_USB_V = (1 << 31);
+        env->PSW_USB_SV |= env->PSW_USB_V;
+        ret = INT32_MIN;
+    } else {
+        env->PSW_USB_V = 0;
+    }
+    return ret & 0xffff0000ll;
+}
+
 uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg)
 {
     int32_t b, i;
@@ -1017,6 +1260,110 @@ uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
     return ret;
 }
 
+uint32_t helper_subr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+                       uint32_t r2_h)
+{
+    int64_t mul_res0 = sextract64(r1, 0, 32);
+    int64_t mul_res1 = sextract64(r1, 32, 32);
+    int64_t r2_low = sextract64(r2_l, 0, 32);
+    int64_t r2_high = sextract64(r2_h, 0, 32);
+    int64_t result0, result1;
+    uint32_t ovf0, ovf1;
+    uint32_t avf0, avf1;
+
+    ovf0 = ovf1 = 0;
+
+    result0 = r2_low - mul_res0 + 0x8000;
+    result1 = r2_high - mul_res1 + 0x8000;
+
+    if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+        ovf0 = (1 << 31);
+    }
+
+    if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+        ovf1 = (1 << 31);
+    }
+
+    env->PSW_USB_V = ovf0 | ovf1;
+    env->PSW_USB_SV |= env->PSW_USB_V;
+
+    avf0 = result0 * 2u;
+    avf0 = result0 ^ avf0;
+    avf1 = result1 * 2u;
+    avf1 = result1 ^ avf1;
+
+    env->PSW_USB_AV = avf0 | avf1;
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+    return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_subadr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+                         uint32_t r2_h)
+{
+    int64_t mul_res0 = sextract64(r1, 0, 32);
+    int64_t mul_res1 = sextract64(r1, 32, 32);
+    int64_t r2_low = sextract64(r2_l, 0, 32);
+    int64_t r2_high = sextract64(r2_h, 0, 32);
+    int64_t result0, result1;
+    uint32_t ovf0, ovf1;
+    uint32_t avf0, avf1;
+
+    ovf0 = ovf1 = 0;
+
+    result0 = r2_low + mul_res0 + 0x8000;
+    result1 = r2_high - mul_res1 + 0x8000;
+
+    if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+        ovf0 = (1 << 31);
+    }
+
+    if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+        ovf1 = (1 << 31);
+    }
+
+    env->PSW_USB_V = ovf0 | ovf1;
+    env->PSW_USB_SV |= env->PSW_USB_V;
+
+    avf0 = result0 * 2u;
+    avf0 = result0 ^ avf0;
+    avf1 = result1 * 2u;
+    avf1 = result1 ^ avf1;
+
+    env->PSW_USB_AV = avf0 | avf1;
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+    return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+                        uint32_t r3, uint32_t n)
+{
+    int64_t t1 = sextract64(r1, 0, 32);
+    int64_t t2 = sextract64(r2, 0, 32);
+    int64_t t3 = sextract64(r3, 0, 32);
+    int64_t mul, ret;
+
+    if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+        mul = 0x7fffffff;
+    } else {
+        mul = (t2 * t3) << n;
+    }
+
+    ret = t1 - mul + 0x8000;
+
+    if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) {
+        env->PSW_USB_V = (1 << 31);
+        env->PSW_USB_SV |= env->PSW_USB_V;
+    } else {
+        env->PSW_USB_V = 0;
+    }
+    env->PSW_USB_AV = ret ^ ret * 2u;
+    env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+    return ret & 0xffff0000ll;
+}
+
 uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
 {
     int32_t b, i;
@@ -2152,6 +2499,26 @@ void helper_rfe(CPUTriCoreState *env)
     psw_write(env, new_PSW);
 }
 
+void helper_rfm(CPUTriCoreState *env)
+{
+    env->PC = (env->gpr_a[11] & ~0x1);
+    /* ICR.IE = PCXI.PIE; */
+    env->ICR = (env->ICR & ~MASK_ICR_IE) |
+               ((env->PCXI & ~MASK_PCXI_PIE) >> 15);
+    /* ICR.CCPN = PCXI.PCPN; */
+    env->ICR = (env->ICR & ~MASK_ICR_CCPN) |
+               ((env->PCXI & ~MASK_PCXI_PCPN) >> 24);
+    /* {PCXI, PSW, A[10], A[11]} = M(DCX, 4 * word); */
+    env->PCXI = cpu_ldl_data(env, env->DCX);
+    psw_write(env, cpu_ldl_data(env, env->DCX+4));
+    env->gpr_a[10] = cpu_ldl_data(env, env->DCX+8);
+    env->gpr_a[11] = cpu_ldl_data(env, env->DCX+12);
+
+    if (tricore_feature(env, TRICORE_FEATURE_131)) {
+        env->DBGTCR = 0;
+    }
+}
+
 void helper_ldlcx(CPUTriCoreState *env, uint32_t ea)
 {
     uint32_t dummy;
@@ -2176,6 +2543,75 @@ void helper_stucx(CPUTriCoreState *env, uint32_t ea)
     save_context_upper(env, ea);
 }
 
+void helper_svlcx(CPUTriCoreState *env)
+{
+    target_ulong tmp_FCX;
+    target_ulong ea;
+    target_ulong new_FCX;
+
+    if (env->FCX == 0) {
+        /* FCU trap */
+    }
+    /* tmp_FCX = FCX; */
+    tmp_FCX = env->FCX;
+    /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */
+    ea = ((env->FCX & MASK_FCX_FCXS) << 12) +
+         ((env->FCX & MASK_FCX_FCXO) << 6);
+    /* new_FCX = M(EA, word); */
+    new_FCX = cpu_ldl_data(env, ea);
+    /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11],
+                           A[12], A[13], A[14], A[15], D[12], D[13], D[14],
+                           D[15]}; */
+    save_context_lower(env, ea);
+
+    /* PCXI.PCPN = ICR.CCPN; */
+    env->PCXI = (env->PCXI & 0xffffff) +
+                ((env->ICR & MASK_ICR_CCPN) << 24);
+    /* PCXI.PIE = ICR.IE; */
+    env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) +
+                ((env->ICR & MASK_ICR_IE) << 15));
+    /* PCXI.UL = 0; */
+    env->PCXI &= ~MASK_PCXI_UL;
+
+    /* PCXI[19: 0] = FCX[19: 0]; */
+    env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+    /* FCX[19: 0] = new_FCX[19: 0]; */
+    env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+
+    /* if (tmp_FCX == LCX) trap(FCD);*/
+    if (tmp_FCX == env->LCX) {
+        /* FCD trap */
+    }
+}
+
+void helper_rslcx(CPUTriCoreState *env)
+{
+    target_ulong ea;
+    target_ulong new_PCXI;
+    /*   if (PCXI[19: 0] == 0) then trap(CSU); */
+    if ((env->PCXI & 0xfffff) == 0) {
+        /* CSU trap */
+    }
+    /* if (PCXI.UL == 1) then trap(CTYP); */
+    if ((env->PCXI & MASK_PCXI_UL) == 1) {
+        /* CTYP trap */
+    }
+    /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */
+    ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) +
+         ((env->PCXI & MASK_PCXI_PCXO) << 6);
+    /* {new_PCXI, A[11], A[10], A[11], D[8], D[9], D[10], D[11], A[12],
+        A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */
+    restore_context_upper(env, ea, &new_PCXI, &env->gpr_a[11]);
+    /* M(EA, word) = FCX; */
+    cpu_stl_data(env, ea, env->FCX);
+    /* M(EA, word) = FCX; */
+    cpu_stl_data(env, ea, env->FCX);
+    /* FCX[19: 0] = PCXI[19: 0]; */
+    env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff);
+    /* PCXI = new_PCXI; */
+    env->PCXI = new_PCXI;
+}
+
 void helper_psw_write(CPUTriCoreState *env, uint32_t arg)
 {
     psw_write(env, arg);
diff --git a/target-tricore/translate.c b/target-tricore/translate.c
index 27777be9c1..15a24f7c98 100644
--- a/target-tricore/translate.c
+++ b/target-tricore/translate.c
@@ -1575,6 +1575,37 @@ static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
     tcg_temp_free(result);
 }
 
+static inline void
+gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv_i64 t0 = tcg_temp_new_i64();
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 result = tcg_temp_new_i64();
+
+    tcg_gen_sub_i64(result, r1, r2);
+    /* calc v bit */
+    tcg_gen_xor_i64(t1, result, r1);
+    tcg_gen_xor_i64(t0, r1, r2);
+    tcg_gen_and_i64(t1, t1, t0);
+    tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t1, 32);
+    /* calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* calc AV/SAV bits */
+    tcg_gen_trunc_shr_i64_i32(temp, result, 32);
+    tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
+    tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+    /* write back result */
+    tcg_gen_mov_i64(ret, result);
+
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(result);
+    tcg_temp_free_i64(t0);
+    tcg_temp_free_i64(t1);
+}
+
 static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
 {
     TCGv result = tcg_temp_new();
@@ -1648,6 +1679,698 @@ static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
     tcg_temp_free(mask);
 }
 
+static inline void
+gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+           TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv temp2 = tcg_temp_new();
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_extr_i64_i32(temp, temp2, temp64);
+    gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+                   tcg_gen_sub_tl, tcg_gen_sub_tl);
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+            TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv temp2 = tcg_temp_new();
+    TCGv temp3 = tcg_temp_new();
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_extr_i64_i32(temp, temp2, temp64);
+    gen_subs(ret_low, r1_low, temp);
+    tcg_gen_mov_tl(temp, cpu_PSW_V);
+    tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+    gen_subs(ret_high, r1_high, temp2);
+    /* combine v bits */
+    tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+    /* combine av bits */
+    tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free(temp3);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+            TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    TCGv_i64 temp64_2 = tcg_temp_new_i64();
+    TCGv_i64 temp64_3 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+    gen_sub64_d(temp64_3, temp64_2, temp64);
+    /* write back result */
+    tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
+
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+    tcg_temp_free_i64(temp64_2);
+    tcg_temp_free_i64(temp64_3);
+}
+
+static inline void
+gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+             TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    TCGv_i64 temp64_2 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+    gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
+    tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+    tcg_temp_free_i64(temp64_2);
+}
+
+static inline void
+gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
+              uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high);
+
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+
+    tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+    tcg_gen_shli_tl(temp, r1, 16);
+    gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static inline void
+gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
+               uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
+
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+
+    tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+    tcg_gen_shli_tl(temp, r1, 16);
+    gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static inline void
+gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+    TCGv temp = tcg_const_i32(n);
+    gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+    TCGv temp = tcg_const_i32(n);
+    gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+             uint32_t up_shift, CPUTriCoreState *env)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+    TCGv temp3 = tcg_temp_new();
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    TCGv_i64 t3 = tcg_temp_new_i64();
+    TCGv_i64 t4 = tcg_temp_new_i64();
+
+    tcg_gen_ext_i32_i64(t2, arg2);
+    tcg_gen_ext_i32_i64(t3, arg3);
+
+    tcg_gen_mul_i64(t2, t2, t3);
+
+    tcg_gen_ext_i32_i64(t1, arg1);
+    /* if we shift part of the fraction out, we need to round up */
+    tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
+    tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
+    tcg_gen_sari_i64(t2, t2, up_shift - n);
+    tcg_gen_add_i64(t2, t2, t4);
+
+    tcg_gen_sub_i64(t3, t1, t2);
+    tcg_gen_trunc_i64_i32(temp3, t3);
+    /* calc v bit */
+    tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
+    tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
+    tcg_gen_or_i64(t1, t1, t2);
+    tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
+    tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+    /* We produce an overflow on the host if the mul before was
+       (0x80000000 * 0x80000000) << 1). If this is the
+       case, we negate the ovf. */
+    if (n == 1) {
+        tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
+        tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
+        tcg_gen_and_tl(temp, temp, temp2);
+        tcg_gen_shli_tl(temp, temp, 31);
+        /* negate v bit, if special condition */
+        tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+    }
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
+    tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+    /* write back result */
+    tcg_gen_mov_tl(ret, temp3);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free(temp3);
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    tcg_temp_free_i64(t3);
+    tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+    if (n == 0) {
+        tcg_gen_mul_tl(temp, arg2, arg3);
+    } else { /* n is exspected to be 1 */
+        tcg_gen_mul_tl(temp, arg2, arg3);
+        tcg_gen_shli_tl(temp, temp, 1);
+        /* catch special case r1 = r2 = 0x8000 */
+        tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+        tcg_gen_sub_tl(temp, temp, temp2);
+    }
+    gen_sub_d(ret, arg1, temp);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+    if (n == 0) {
+        tcg_gen_mul_tl(temp, arg2, arg3);
+    } else { /* n is exspected to be 1 */
+        tcg_gen_mul_tl(temp, arg2, arg3);
+        tcg_gen_shli_tl(temp, temp, 1);
+        /* catch special case r1 = r2 = 0x8000 */
+        tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+        tcg_gen_sub_tl(temp, temp, temp2);
+    }
+    gen_subs(ret, arg1, temp);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+               TCGv arg3, uint32_t n)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    TCGv_i64 t3 = tcg_temp_new_i64();
+
+    if (n == 0) {
+        tcg_gen_mul_tl(temp, arg2, arg3);
+    } else { /* n is exspected to be 1 */
+        tcg_gen_mul_tl(temp, arg2, arg3);
+        tcg_gen_shli_tl(temp, temp, 1);
+        /* catch special case r1 = r2 = 0x8000 */
+        tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+        tcg_gen_sub_tl(temp, temp, temp2);
+    }
+    tcg_gen_ext_i32_i64(t2, temp);
+    tcg_gen_shli_i64(t2, t2, 16);
+    tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+    gen_sub64_d(t3, t1, t2);
+    /* write back result */
+    tcg_gen_extr_i64_i32(rl, rh, t3);
+
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    tcg_temp_free_i64(t3);
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+               TCGv arg3, uint32_t n)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+
+    if (n == 0) {
+        tcg_gen_mul_tl(temp, arg2, arg3);
+    } else { /* n is exspected to be 1 */
+        tcg_gen_mul_tl(temp, arg2, arg3);
+        tcg_gen_shli_tl(temp, temp, 1);
+        /* catch special case r1 = r2 = 0x8000 */
+        tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+        tcg_gen_sub_tl(temp, temp, temp2);
+    }
+    tcg_gen_ext_i32_i64(t2, temp);
+    tcg_gen_shli_i64(t2, t2, 16);
+    tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+
+    gen_helper_sub64_ssov(t1, cpu_env, t1, t2);
+    tcg_gen_extr_i64_i32(rl, rh, t1);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+}
+
+static inline void
+gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+             TCGv arg3, uint32_t n, CPUTriCoreState *env)
+{
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    TCGv_i64 t3 = tcg_temp_new_i64();
+    TCGv_i64 t4 = tcg_temp_new_i64();
+    TCGv temp, temp2;
+
+    tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+    tcg_gen_ext_i32_i64(t2, arg2);
+    tcg_gen_ext_i32_i64(t3, arg3);
+
+    tcg_gen_mul_i64(t2, t2, t3);
+    if (n != 0) {
+        tcg_gen_shli_i64(t2, t2, 1);
+    }
+    tcg_gen_sub_i64(t4, t1, t2);
+    /* calc v bit */
+    tcg_gen_xor_i64(t3, t4, t1);
+    tcg_gen_xor_i64(t2, t1, t2);
+    tcg_gen_and_i64(t3, t3, t2);
+    tcg_gen_trunc_shr_i64_i32(cpu_PSW_V, t3, 32);
+    /* We produce an overflow on the host if the mul before was
+       (0x80000000 * 0x80000000) << 1). If this is the
+       case, we negate the ovf. */
+    if (n == 1) {
+        temp = tcg_temp_new();
+        temp2 = tcg_temp_new();
+        tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
+        tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
+        tcg_gen_and_tl(temp, temp, temp2);
+        tcg_gen_shli_tl(temp, temp, 31);
+        /* negate v bit, if special condition */
+        tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+
+        tcg_temp_free(temp);
+        tcg_temp_free(temp2);
+    }
+    /* write back result */
+    tcg_gen_extr_i64_i32(rl, rh, t4);
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
+    tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    tcg_temp_free_i64(t3);
+    tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+              uint32_t up_shift)
+{
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    TCGv_i64 t3 = tcg_temp_new_i64();
+    TCGv_i64 t4 = tcg_temp_new_i64();
+
+    tcg_gen_ext_i32_i64(t1, arg1);
+    tcg_gen_ext_i32_i64(t2, arg2);
+    tcg_gen_ext_i32_i64(t3, arg3);
+
+    tcg_gen_mul_i64(t2, t2, t3);
+    /* if we shift part of the fraction out, we need to round up */
+    tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
+    tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
+    tcg_gen_sari_i64(t3, t2, up_shift - n);
+    tcg_gen_add_i64(t3, t3, t4);
+
+    gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3);
+
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    tcg_temp_free_i64(t3);
+    tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+             TCGv arg3, uint32_t n)
+{
+    TCGv_i64 r1 = tcg_temp_new_i64();
+    TCGv temp = tcg_const_i32(n);
+
+    tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
+    gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+    tcg_gen_extr_i64_i32(rl, rh, r1);
+
+    tcg_temp_free_i64(r1);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+             TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv temp2 = tcg_temp_new();
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_extr_i64_i32(temp, temp2, temp64);
+    gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+                   tcg_gen_add_tl, tcg_gen_sub_tl);
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+              TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    TCGv_i64 temp64_2 = tcg_temp_new_i64();
+    TCGv_i64 temp64_3 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
+    tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+    tcg_gen_ext32s_i64(temp64, temp64); /* low */
+    tcg_gen_sub_i64(temp64, temp64_2, temp64);
+    tcg_gen_shli_i64(temp64, temp64, 16);
+
+    gen_sub64_d(temp64_2, temp64_3, temp64);
+    /* write back result */
+    tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
+
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+    tcg_temp_free_i64(temp64_2);
+    tcg_temp_free_i64(temp64_3);
+}
+
+static inline void
+gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv temp2 = tcg_temp_new();
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+    tcg_gen_shli_tl(temp, r1, 16);
+    gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+              TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv temp2 = tcg_temp_new();
+    TCGv temp3 = tcg_temp_new();
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_extr_i64_i32(temp, temp2, temp64);
+    gen_adds(ret_low, r1_low, temp);
+    tcg_gen_mov_tl(temp, cpu_PSW_V);
+    tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+    gen_subs(ret_high, r1_high, temp2);
+    /* combine v bits */
+    tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+    /* combine av bits */
+    tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free(temp3);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+               TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    TCGv_i64 temp64_2 = tcg_temp_new_i64();
+
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+    tcg_gen_ext32s_i64(temp64, temp64); /* low */
+    tcg_gen_sub_i64(temp64, temp64_2, temp64);
+    tcg_gen_shli_i64(temp64, temp64, 16);
+    tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+
+    gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
+    tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+    tcg_temp_free_i64(temp64_2);
+}
+
+static inline void
+gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+    TCGv temp = tcg_const_i32(n);
+    TCGv temp2 = tcg_temp_new();
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    switch (mode) {
+    case MODE_LL:
+        GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_LU:
+        GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UL:
+        GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+        break;
+    case MODE_UU:
+        GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+        break;
+    }
+    tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+    tcg_gen_shli_tl(temp, r1, 16);
+    gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free_i64(temp64);
+}
+
 static inline void gen_abs(TCGv ret, TCGv r1)
 {
     TCGv temp = tcg_temp_new();
@@ -2603,6 +3326,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
         tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
         tcg_gen_exit_tb(0);
         break;
+    case OPC2_32_SYS_RET:
     case OPC2_16_SR_RET:
         gen_helper_ret(cpu_env);
         tcg_gen_exit_tb(0);
@@ -6468,6 +7192,575 @@ static void decode_rrr1_maddsu_h(CPUTriCoreState *env, DisasContext *ctx)
     }
 }
 
+static void decode_rrr1_msub(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t r1, r2, r3, r4, n;
+
+    op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+    r1 = MASK_OP_RRR1_S1(ctx->opcode);
+    r2 = MASK_OP_RRR1_S2(ctx->opcode);
+    r3 = MASK_OP_RRR1_S3(ctx->opcode);
+    r4 = MASK_OP_RRR1_D(ctx->opcode);
+    n = MASK_OP_RRR1_N(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_RRR1_MSUB_H_LL:
+        gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                   cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUB_H_LU:
+        gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                   cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUB_H_UL:
+        gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                   cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUB_H_UU:
+        gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                   cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBS_H_LL:
+        gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                    cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBS_H_LU:
+        gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                    cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBS_H_UL:
+        gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                    cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBS_H_UU:
+        gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                    cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBM_H_LL:
+        gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                    cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBM_H_LU:
+        gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                    cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBM_H_UL:
+        gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                    cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBM_H_UU:
+        gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                    cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBMS_H_LL:
+        gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBMS_H_LU:
+        gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBMS_H_UL:
+        gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBMS_H_UU:
+        gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBR_H_LL:
+        gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                      cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBR_H_LU:
+        gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                      cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBR_H_UL:
+        gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                      cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBR_H_UU:
+        gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                      cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBRS_H_LL:
+        gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                       cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBRS_H_LU:
+        gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                       cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBRS_H_UL:
+        gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                       cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBRS_H_UU:
+        gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                       cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    }
+}
+
+static void decode_rrr1_msubq_h(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t r1, r2, r3, r4, n;
+    TCGv temp, temp2;
+
+    op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+    r1 = MASK_OP_RRR1_S1(ctx->opcode);
+    r2 = MASK_OP_RRR1_S2(ctx->opcode);
+    r3 = MASK_OP_RRR1_S3(ctx->opcode);
+    r4 = MASK_OP_RRR1_D(ctx->opcode);
+    n = MASK_OP_RRR1_N(ctx->opcode);
+
+    temp = tcg_const_i32(n);
+    temp2 = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_RRR1_MSUB_Q_32:
+        gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                     cpu_gpr_d[r2], n, 32, env);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_64:
+        gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                     n, env);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_32_L:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+        gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                     temp, n, 16, env);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_64_L:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+        gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+                     n, env);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_32_U:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+        gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                     temp, n, 16, env);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_64_U:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+        gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+                     n, env);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_32_LL:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+        tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+        gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_64_LL:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+        tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+        gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                       cpu_gpr_d[r3+1], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_32_UU:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+        gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUB_Q_64_UU:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+        gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                       cpu_gpr_d[r3+1], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_32:
+        gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                      cpu_gpr_d[r2], n, 32);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_64:
+        gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_32_L:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+        gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                      temp, n, 16);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_64_L:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+        gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+                      n);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_32_U:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+        gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                      temp, n, 16);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_64_U:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+        gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+                      n);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_32_LL:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+        tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+        gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_64_LL:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+        tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+        gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                        cpu_gpr_d[r3+1], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_32_UU:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+        gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUBS_Q_64_UU:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+        gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                        cpu_gpr_d[r3+1], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUBR_H_64_UL:
+        gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+                      cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+        break;
+    case OPC2_32_RRR1_MSUBRS_H_64_UL:
+        gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+                       cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+        break;
+    case OPC2_32_RRR1_MSUBR_Q_32_LL:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+        tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+        gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUBR_Q_32_UU:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+        gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUBRS_Q_32_LL:
+        tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+        tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+        gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+        break;
+    case OPC2_32_RRR1_MSUBRS_Q_32_UU:
+        tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+        gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+        break;
+    }
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static void decode_rrr1_msubad_h(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t r1, r2, r3, r4, n;
+
+    op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+    r1 = MASK_OP_RRR1_S1(ctx->opcode);
+    r2 = MASK_OP_RRR1_S2(ctx->opcode);
+    r3 = MASK_OP_RRR1_S3(ctx->opcode);
+    r4 = MASK_OP_RRR1_D(ctx->opcode);
+    n = MASK_OP_RRR1_N(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_RRR1_MSUBAD_H_32_LL:
+        gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBAD_H_32_LU:
+        gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBAD_H_32_UL:
+        gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBAD_H_32_UU:
+        gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                     cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBADS_H_32_LL:
+        gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBADS_H_32_LU:
+        gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBADS_H_32_UL:
+        gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBADS_H_32_UU:
+        gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBADM_H_64_LL:
+        gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBADM_H_64_LU:
+        gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBADM_H_64_UL:
+        gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBADM_H_64_UU:
+        gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                      cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                      n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBADMS_H_64_LL:
+        gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                       cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                       n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBADMS_H_64_LU:
+        gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                       cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                       n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBADMS_H_64_UL:
+        gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                       cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                       n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBADMS_H_64_UU:
+        gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+                       cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                       n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBADR_H_16_LL:
+        gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                        cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBADR_H_16_LU:
+        gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                        cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBADR_H_16_UL:
+        gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                        cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBADR_H_16_UU:
+        gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                        cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    case OPC2_32_RRR1_MSUBADRS_H_16_LL:
+        gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                         cpu_gpr_d[r2], n, MODE_LL);
+        break;
+    case OPC2_32_RRR1_MSUBADRS_H_16_LU:
+        gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                         cpu_gpr_d[r2], n, MODE_LU);
+        break;
+    case OPC2_32_RRR1_MSUBADRS_H_16_UL:
+        gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                         cpu_gpr_d[r2], n, MODE_UL);
+        break;
+    case OPC2_32_RRR1_MSUBADRS_H_16_UU:
+        gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+                         cpu_gpr_d[r2], n, MODE_UU);
+        break;
+    }
+}
+
+/* RRRR format */
+static void decode_rrrr_extract_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2, r3, r4;
+    TCGv tmp_width, tmp_pos;
+
+    r1 = MASK_OP_RRRR_S1(ctx->opcode);
+    r2 = MASK_OP_RRRR_S2(ctx->opcode);
+    r3 = MASK_OP_RRRR_S3(ctx->opcode);
+    r4 = MASK_OP_RRRR_D(ctx->opcode);
+    op2 = MASK_OP_RRRR_OP2(ctx->opcode);
+
+    tmp_pos = tcg_temp_new();
+    tmp_width = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_RRRR_DEXTR:
+        tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+        if (r1 == r2) {
+            tcg_gen_rotl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+        } else {
+            tcg_gen_shl_tl(tmp_width, cpu_gpr_d[r1], tmp_pos);
+            tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos);
+            tcg_gen_shr_tl(tmp_pos, cpu_gpr_d[r2], tmp_pos);
+            tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, tmp_pos);
+        }
+        break;
+    case OPC2_32_RRRR_EXTR:
+    case OPC2_32_RRRR_EXTR_U:
+        tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
+        tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+        tcg_gen_add_tl(tmp_pos, tmp_pos, tmp_width);
+        tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos);
+        tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+        tcg_gen_subfi_tl(tmp_width, 32, tmp_width);
+        if (op2 == OPC2_32_RRRR_EXTR) {
+            tcg_gen_sar_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+        } else {
+            tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+        }
+        break;
+    case OPC2_32_RRRR_INSERT:
+        tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
+        tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+        gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], tmp_width,
+                   tmp_pos);
+        break;
+    }
+    tcg_temp_free(tmp_pos);
+    tcg_temp_free(tmp_width);
+}
+
+/* RRRW format */
+static void decode_rrrw_extract_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2, r3, r4;
+    int32_t width;
+
+    TCGv temp, temp2;
+
+    op2 = MASK_OP_RRRW_OP2(ctx->opcode);
+    r1  = MASK_OP_RRRW_S1(ctx->opcode);
+    r2  = MASK_OP_RRRW_S2(ctx->opcode);
+    r3  = MASK_OP_RRRW_S3(ctx->opcode);
+    r4  = MASK_OP_RRRW_D(ctx->opcode);
+    width = MASK_OP_RRRW_WIDTH(ctx->opcode);
+
+    temp = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_RRRW_EXTR:
+        tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+        tcg_gen_addi_tl(temp, temp, width);
+        tcg_gen_subfi_tl(temp, 32, temp);
+        tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+        tcg_gen_sari_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width);
+        break;
+    case OPC2_32_RRRW_EXTR_U:
+        if (width == 0) {
+            tcg_gen_movi_tl(cpu_gpr_d[r4], 0);
+        } else {
+            tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+            tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+            tcg_gen_andi_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32-width));
+        }
+        break;
+    case OPC2_32_RRRW_IMASK:
+        temp2 = tcg_temp_new();
+
+        tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+        tcg_gen_movi_tl(temp2, (1 << width) - 1);
+        tcg_gen_shl_tl(temp2, temp2, temp);
+        tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp);
+        tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2);
+
+        tcg_temp_free(temp2);
+        break;
+    case OPC2_32_RRRW_INSERT:
+        temp2 = tcg_temp_new();
+
+        tcg_gen_movi_tl(temp, width);
+        tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f);
+        gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2);
+
+        tcg_temp_free(temp2);
+        break;
+    }
+    tcg_temp_free(temp);
+}
+
+/* SYS Format*/
+static void decode_sys_interrupts(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    TCGLabel *l1;
+    TCGv tmp;
+
+    op2 = MASK_OP_SYS_OP2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_SYS_DEBUG:
+        /* raise EXCP_DEBUG */
+        break;
+    case OPC2_32_SYS_DISABLE:
+        tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~MASK_ICR_IE);
+        break;
+    case OPC2_32_SYS_DSYNC:
+        break;
+    case OPC2_32_SYS_ENABLE:
+        tcg_gen_ori_tl(cpu_ICR, cpu_ICR, MASK_ICR_IE);
+        break;
+    case OPC2_32_SYS_ISYNC:
+        break;
+    case OPC2_32_SYS_NOP:
+        break;
+    case OPC2_32_SYS_RET:
+        gen_compute_branch(ctx, op2, 0, 0, 0, 0);
+        break;
+    case OPC2_32_SYS_RFE:
+        gen_helper_rfe(cpu_env);
+        tcg_gen_exit_tb(0);
+        ctx->bstate = BS_BRANCH;
+        break;
+    case OPC2_32_SYS_RFM:
+        if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) {
+            tmp = tcg_temp_new();
+            l1 = gen_new_label();
+
+            tcg_gen_ld32u_tl(tmp, cpu_env, offsetof(CPUTriCoreState, DBGSR));
+            tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE);
+            tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1);
+            gen_helper_rfm(cpu_env);
+            gen_set_label(l1);
+            tcg_gen_exit_tb(0);
+            ctx->bstate = BS_BRANCH;
+            tcg_temp_free(tmp);
+        } else {
+            /* generate privilege trap */
+        }
+        break;
+    case OPC2_32_SYS_RSLCX:
+        gen_helper_rslcx(cpu_env);
+        break;
+    case OPC2_32_SYS_SVLCX:
+        gen_helper_svlcx(cpu_env);
+        break;
+    case OPC2_32_SYS_TRAPSV:
+        /* TODO: raise sticky overflow trap */
+        break;
+    case OPC2_32_SYS_TRAPV:
+        /* TODO: raise overflow trap */
+        break;
+    }
+}
+
 static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
 {
     int op1;
@@ -6774,6 +8067,32 @@ static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
     case OPCM_32_RRR1_MADDSU_H:
         decode_rrr1_maddsu_h(env, ctx);
         break;
+    case OPCM_32_RRR1_MSUB_H:
+        decode_rrr1_msub(env, ctx);
+        break;
+    case OPCM_32_RRR1_MSUB_Q:
+        decode_rrr1_msubq_h(env, ctx);
+        break;
+    case OPCM_32_RRR1_MSUBAD_H:
+        decode_rrr1_msubad_h(env, ctx);
+        break;
+/* RRRR format */
+    case OPCM_32_RRRR_EXTRACT_INSERT:
+        decode_rrrr_extract_insert(env, ctx);
+/* RRRW format */
+    case OPCM_32_RRRW_EXTRACT_INSERT:
+        decode_rrrw_extract_insert(env, ctx);
+        break;
+/* SYS format */
+    case OPCM_32_SYS_INTERRUPTS:
+        decode_sys_interrupts(env, ctx);
+        break;
+    case OPC1_32_SYS_RSTV:
+        tcg_gen_movi_tl(cpu_PSW_V, 0);
+        tcg_gen_mov_tl(cpu_PSW_SV, cpu_PSW_V);
+        tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+        tcg_gen_mov_tl(cpu_PSW_SAV, cpu_PSW_V);
+        break;
     }
 }
 
diff --git a/target-tricore/tricore-opcodes.h b/target-tricore/tricore-opcodes.h
index 41c9ef60ad..d3a9bc158b 100644
--- a/target-tricore/tricore-opcodes.h
+++ b/target-tricore/tricore-opcodes.h
@@ -523,7 +523,7 @@ enum {
     OPCM_32_RRR1_MADDSU_H                            = 0xc3,
     OPCM_32_RRR1_MSUB_H                              = 0xa3,
     OPCM_32_RRR1_MSUB_Q                              = 0x63,
-    OPCM_32_RRR1_MSUBADS_H                           = 0xe3,
+    OPCM_32_RRR1_MSUBAD_H                            = 0xe3,
 /* RRR2 Format */
     OPCM_32_RRR2_MADD                                = 0x03,
     OPCM_32_RRR2_MSUB                                = 0x23,
@@ -1281,30 +1281,30 @@ enum {
 };
 /* OPCM_32_RRR1_MSUB_H                              */
 enum {
-    OPC2_32_RRR1_MSUB_H_32_LL                    = 0x1a,
-    OPC2_32_RRR1_MSUB_H_32_LU                    = 0x19,
-    OPC2_32_RRR1_MSUB_H_32_UL                    = 0x18,
-    OPC2_32_RRR1_MSUB_H_32_UU                    = 0x1b,
-    OPC2_32_RRR1_MSUBS_H_32_LL                   = 0x3a,
-    OPC2_32_RRR1_MSUBS_H_32_LU                   = 0x39,
-    OPC2_32_RRR1_MSUBS_H_32_UL                   = 0x38,
-    OPC2_32_RRR1_MSUBS_H_32_UU                   = 0x3b,
-    OPC2_32_RRR1_MSUBM_H_64_LL                   = 0x1e,
-    OPC2_32_RRR1_MSUBM_H_64_LU                   = 0x1d,
-    OPC2_32_RRR1_MSUBM_H_64_UL                   = 0x1c,
-    OPC2_32_RRR1_MSUBM_H_64_UU                   = 0x1f,
-    OPC2_32_RRR1_MSUBMS_H_64_LL                  = 0x3e,
-    OPC2_32_RRR1_MSUBMS_H_64_LU                  = 0x3d,
-    OPC2_32_RRR1_MSUBMS_H_64_UL                  = 0x3c,
-    OPC2_32_RRR1_MSUBMS_H_64_UU                  = 0x3f,
-    OPC2_32_RRR1_MSUBR_H_16_LL                   = 0x0e,
-    OPC2_32_RRR1_MSUBR_H_16_LU                   = 0x0d,
-    OPC2_32_RRR1_MSUBR_H_16_UL                   = 0x0c,
-    OPC2_32_RRR1_MSUBR_H_16_UU                   = 0x0f,
-    OPC2_32_RRR1_MSUBRS_H_16_LL                  = 0x2e,
-    OPC2_32_RRR1_MSUBRS_H_16_LU                  = 0x2d,
-    OPC2_32_RRR1_MSUBRS_H_16_UL                  = 0x2c,
-    OPC2_32_RRR1_MSUBRS_H_16_UU                  = 0x2f,
+    OPC2_32_RRR1_MSUB_H_LL                       = 0x1a,
+    OPC2_32_RRR1_MSUB_H_LU                       = 0x19,
+    OPC2_32_RRR1_MSUB_H_UL                       = 0x18,
+    OPC2_32_RRR1_MSUB_H_UU                       = 0x1b,
+    OPC2_32_RRR1_MSUBS_H_LL                      = 0x3a,
+    OPC2_32_RRR1_MSUBS_H_LU                      = 0x39,
+    OPC2_32_RRR1_MSUBS_H_UL                      = 0x38,
+    OPC2_32_RRR1_MSUBS_H_UU                      = 0x3b,
+    OPC2_32_RRR1_MSUBM_H_LL                      = 0x1e,
+    OPC2_32_RRR1_MSUBM_H_LU                      = 0x1d,
+    OPC2_32_RRR1_MSUBM_H_UL                      = 0x1c,
+    OPC2_32_RRR1_MSUBM_H_UU                      = 0x1f,
+    OPC2_32_RRR1_MSUBMS_H_LL                     = 0x3e,
+    OPC2_32_RRR1_MSUBMS_H_LU                     = 0x3d,
+    OPC2_32_RRR1_MSUBMS_H_UL                     = 0x3c,
+    OPC2_32_RRR1_MSUBMS_H_UU                     = 0x3f,
+    OPC2_32_RRR1_MSUBR_H_LL                      = 0x0e,
+    OPC2_32_RRR1_MSUBR_H_LU                      = 0x0d,
+    OPC2_32_RRR1_MSUBR_H_UL                      = 0x0c,
+    OPC2_32_RRR1_MSUBR_H_UU                      = 0x0f,
+    OPC2_32_RRR1_MSUBRS_H_LL                     = 0x2e,
+    OPC2_32_RRR1_MSUBRS_H_LU                     = 0x2d,
+    OPC2_32_RRR1_MSUBRS_H_UL                     = 0x2c,
+    OPC2_32_RRR1_MSUBRS_H_UU                     = 0x2f,
 };
 /* OPCM_32_RRR1_MSUB_Q                              */
 enum {
@@ -1328,8 +1328,8 @@ enum {
     OPC2_32_RRR1_MSUBS_Q_64_LL                   = 0x3d,
     OPC2_32_RRR1_MSUBS_Q_32_UU                   = 0x24,
     OPC2_32_RRR1_MSUBS_Q_64_UU                   = 0x3c,
-    OPC2_32_RRR1_MSUBR_H_32_UL                   = 0x1e,
-    OPC2_32_RRR1_MSUBRS_H_32_UL                  = 0x3e,
+    OPC2_32_RRR1_MSUBR_H_64_UL                   = 0x1e,
+    OPC2_32_RRR1_MSUBRS_H_64_UL                  = 0x3e,
     OPC2_32_RRR1_MSUBR_Q_32_LL                   = 0x07,
     OPC2_32_RRR1_MSUBR_Q_32_UU                   = 0x06,
     OPC2_32_RRR1_MSUBRS_Q_32_LL                  = 0x27,
@@ -1352,7 +1352,7 @@ enum {
     OPC2_32_RRR1_MSUBADMS_H_64_LL                = 0x3e,
     OPC2_32_RRR1_MSUBADMS_H_64_LU                = 0x3d,
     OPC2_32_RRR1_MSUBADMS_H_64_UL                = 0x3c,
-    OPC2_32_RRR1_MSUBADMS_H_16_UU                = 0x3f,
+    OPC2_32_RRR1_MSUBADMS_H_64_UU                = 0x3f,
     OPC2_32_RRR1_MSUBADR_H_16_LL                 = 0x0e,
     OPC2_32_RRR1_MSUBADR_H_16_LU                 = 0x0d,
     OPC2_32_RRR1_MSUBADR_H_16_UL                 = 0x0c,
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 067917c396..37c11103a6 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -980,8 +980,11 @@ static void tcg_constant_folding(TCGContext *s)
             if (temps_are_copies(args[1], args[2])) {
                 if (temps_are_copies(args[0], args[1])) {
                     tcg_op_remove(s, op);
-                } else {
+                } else if (temps[args[1]].state != TCG_TEMP_CONST) {
                     tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]);
+                } else {
+                    tcg_opt_gen_movi(s, op, args, opc,
+                                     args[0], temps[args[1]].val);
                 }
                 continue;
             }