summary refs log tree commit diff stats
diff options
context:
space:
mode:
-rw-r--r--disas/arm-a64.cc2
-rw-r--r--disas/libvixl/Makefile.objs9
-rw-r--r--disas/libvixl/README3
-rw-r--r--disas/libvixl/a64/assembler-a64.h2353
-rw-r--r--disas/libvixl/a64/disasm-a64.cc1954
-rw-r--r--disas/libvixl/a64/instructions-a64.cc314
-rw-r--r--disas/libvixl/a64/instructions-a64.h384
-rw-r--r--disas/libvixl/vixl/a64/assembler-a64.h4624
-rw-r--r--disas/libvixl/vixl/a64/constants-a64.h (renamed from disas/libvixl/a64/constants-a64.h)967
-rw-r--r--disas/libvixl/vixl/a64/cpu-a64.h (renamed from disas/libvixl/a64/cpu-a64.h)6
-rw-r--r--disas/libvixl/vixl/a64/decoder-a64.cc (renamed from disas/libvixl/a64/decoder-a64.cc)210
-rw-r--r--disas/libvixl/vixl/a64/decoder-a64.h (renamed from disas/libvixl/a64/decoder-a64.h)58
-rw-r--r--disas/libvixl/vixl/a64/disasm-a64.cc3487
-rw-r--r--disas/libvixl/vixl/a64/disasm-a64.h (renamed from disas/libvixl/a64/disasm-a64.h)17
-rw-r--r--disas/libvixl/vixl/a64/instructions-a64.cc622
-rw-r--r--disas/libvixl/vixl/a64/instructions-a64.h757
-rw-r--r--disas/libvixl/vixl/code-buffer.h (renamed from disas/libvixl/code-buffer.h)2
-rw-r--r--disas/libvixl/vixl/compiler-intrinsics.cc (renamed from disas/libvixl/utils.cc)137
-rw-r--r--disas/libvixl/vixl/compiler-intrinsics.h155
-rw-r--r--disas/libvixl/vixl/globals.h (renamed from disas/libvixl/globals.h)82
-rw-r--r--disas/libvixl/vixl/invalset.h775
-rw-r--r--disas/libvixl/vixl/platform.h (renamed from disas/libvixl/platform.h)2
-rw-r--r--disas/libvixl/vixl/utils.cc142
-rw-r--r--disas/libvixl/vixl/utils.h (renamed from disas/libvixl/utils.h)115
24 files changed, 11989 insertions, 5188 deletions
diff --git a/disas/arm-a64.cc b/disas/arm-a64.cc
index b57256b267..d4d46d5ff3 100644
--- a/disas/arm-a64.cc
+++ b/disas/arm-a64.cc
@@ -17,7 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include "a64/disasm-a64.h"
+#include "vixl/a64/disasm-a64.h"
 
 extern "C" {
 #include "disas/bfd.h"
diff --git a/disas/libvixl/Makefile.objs b/disas/libvixl/Makefile.objs
index 17e6565d10..e373cf02d3 100644
--- a/disas/libvixl/Makefile.objs
+++ b/disas/libvixl/Makefile.objs
@@ -1,7 +1,8 @@
-libvixl_OBJS = utils.o \
-               a64/instructions-a64.o \
-               a64/decoder-a64.o \
-               a64/disasm-a64.o
+libvixl_OBJS = vixl/utils.o \
+               vixl/compiler-intrinsics.o \
+               vixl/a64/instructions-a64.o \
+               vixl/a64/decoder-a64.o \
+               vixl/a64/disasm-a64.o
 
 $(addprefix $(obj)/,$(libvixl_OBJS)): QEMU_CFLAGS := -I$(SRC_PATH)/disas/libvixl $(QEMU_CFLAGS)
 
diff --git a/disas/libvixl/README b/disas/libvixl/README
index 58db41c67c..932a41adf7 100644
--- a/disas/libvixl/README
+++ b/disas/libvixl/README
@@ -2,11 +2,10 @@
 The code in this directory is a subset of libvixl:
  https://github.com/armvixl/vixl
 (specifically, it is the set of files needed for disassembly only,
-taken from libvixl 1.7).
+taken from libvixl 1.12).
 Bugfixes should preferably be sent upstream initially.
 
 The disassembler does not currently support the entire A64 instruction
 set. Notably:
- * No Advanced SIMD support.
  * Limited support for system instructions.
  * A few miscellaneous integer and floating point instructions are missing.
diff --git a/disas/libvixl/a64/assembler-a64.h b/disas/libvixl/a64/assembler-a64.h
deleted file mode 100644
index 35aaf20f72..0000000000
--- a/disas/libvixl/a64/assembler-a64.h
+++ /dev/null
@@ -1,2353 +0,0 @@
-// Copyright 2013, ARM Limited
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-//   * Redistributions of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//   * Neither the name of ARM Limited nor the names of its contributors may be
-//     used to endorse or promote products derived from this software without
-//     specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
-// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef VIXL_A64_ASSEMBLER_A64_H_
-#define VIXL_A64_ASSEMBLER_A64_H_
-
-#include <list>
-#include <stack>
-
-#include "globals.h"
-#include "utils.h"
-#include "code-buffer.h"
-#include "a64/instructions-a64.h"
-
-namespace vixl {
-
-typedef uint64_t RegList;
-static const int kRegListSizeInBits = sizeof(RegList) * 8;
-
-
-// Registers.
-
-// Some CPURegister methods can return Register and FPRegister types, so we
-// need to declare them in advance.
-class Register;
-class FPRegister;
-
-
-class CPURegister {
- public:
-  enum RegisterType {
-    // The kInvalid value is used to detect uninitialized static instances,
-    // which are always zero-initialized before any constructors are called.
-    kInvalid = 0,
-    kRegister,
-    kFPRegister,
-    kNoRegister
-  };
-
-  CPURegister() : code_(0), size_(0), type_(kNoRegister) {
-    VIXL_ASSERT(!IsValid());
-    VIXL_ASSERT(IsNone());
-  }
-
-  CPURegister(unsigned code, unsigned size, RegisterType type)
-      : code_(code), size_(size), type_(type) {
-    VIXL_ASSERT(IsValidOrNone());
-  }
-
-  unsigned code() const {
-    VIXL_ASSERT(IsValid());
-    return code_;
-  }
-
-  RegisterType type() const {
-    VIXL_ASSERT(IsValidOrNone());
-    return type_;
-  }
-
-  RegList Bit() const {
-    VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
-    return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
-  }
-
-  unsigned size() const {
-    VIXL_ASSERT(IsValid());
-    return size_;
-  }
-
-  int SizeInBytes() const {
-    VIXL_ASSERT(IsValid());
-    VIXL_ASSERT(size() % 8 == 0);
-    return size_ / 8;
-  }
-
-  int SizeInBits() const {
-    VIXL_ASSERT(IsValid());
-    return size_;
-  }
-
-  bool Is32Bits() const {
-    VIXL_ASSERT(IsValid());
-    return size_ == 32;
-  }
-
-  bool Is64Bits() const {
-    VIXL_ASSERT(IsValid());
-    return size_ == 64;
-  }
-
-  bool IsValid() const {
-    if (IsValidRegister() || IsValidFPRegister()) {
-      VIXL_ASSERT(!IsNone());
-      return true;
-    } else {
-      VIXL_ASSERT(IsNone());
-      return false;
-    }
-  }
-
-  bool IsValidRegister() const {
-    return IsRegister() &&
-           ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
-           ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
-  }
-
-  bool IsValidFPRegister() const {
-    return IsFPRegister() &&
-           ((size_ == kSRegSize) || (size_ == kDRegSize)) &&
-           (code_ < kNumberOfFPRegisters);
-  }
-
-  bool IsNone() const {
-    // kNoRegister types should always have size 0 and code 0.
-    VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
-    VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
-
-    return type_ == kNoRegister;
-  }
-
-  bool Aliases(const CPURegister& other) const {
-    VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
-    return (code_ == other.code_) && (type_ == other.type_);
-  }
-
-  bool Is(const CPURegister& other) const {
-    VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
-    return Aliases(other) && (size_ == other.size_);
-  }
-
-  bool IsZero() const {
-    VIXL_ASSERT(IsValid());
-    return IsRegister() && (code_ == kZeroRegCode);
-  }
-
-  bool IsSP() const {
-    VIXL_ASSERT(IsValid());
-    return IsRegister() && (code_ == kSPRegInternalCode);
-  }
-
-  bool IsRegister() const {
-    return type_ == kRegister;
-  }
-
-  bool IsFPRegister() const {
-    return type_ == kFPRegister;
-  }
-
-  bool IsW() const { return IsValidRegister() && Is32Bits(); }
-  bool IsX() const { return IsValidRegister() && Is64Bits(); }
-  bool IsS() const { return IsValidFPRegister() && Is32Bits(); }
-  bool IsD() const { return IsValidFPRegister() && Is64Bits(); }
-
-  const Register& W() const;
-  const Register& X() const;
-  const FPRegister& S() const;
-  const FPRegister& D() const;
-
-  bool IsSameSizeAndType(const CPURegister& other) const {
-    return (size_ == other.size_) && (type_ == other.type_);
-  }
-
- protected:
-  unsigned code_;
-  unsigned size_;
-  RegisterType type_;
-
- private:
-  bool IsValidOrNone() const {
-    return IsValid() || IsNone();
-  }
-};
-
-
-class Register : public CPURegister {
- public:
-  Register() : CPURegister() {}
-  explicit Register(const CPURegister& other)
-      : CPURegister(other.code(), other.size(), other.type()) {
-    VIXL_ASSERT(IsValidRegister());
-  }
-  Register(unsigned code, unsigned size)
-      : CPURegister(code, size, kRegister) {}
-
-  bool IsValid() const {
-    VIXL_ASSERT(IsRegister() || IsNone());
-    return IsValidRegister();
-  }
-
-  static const Register& WRegFromCode(unsigned code);
-  static const Register& XRegFromCode(unsigned code);
-
- private:
-  static const Register wregisters[];
-  static const Register xregisters[];
-};
-
-
-class FPRegister : public CPURegister {
- public:
-  FPRegister() : CPURegister() {}
-  explicit FPRegister(const CPURegister& other)
-      : CPURegister(other.code(), other.size(), other.type()) {
-    VIXL_ASSERT(IsValidFPRegister());
-  }
-  FPRegister(unsigned code, unsigned size)
-      : CPURegister(code, size, kFPRegister) {}
-
-  bool IsValid() const {
-    VIXL_ASSERT(IsFPRegister() || IsNone());
-    return IsValidFPRegister();
-  }
-
-  static const FPRegister& SRegFromCode(unsigned code);
-  static const FPRegister& DRegFromCode(unsigned code);
-
- private:
-  static const FPRegister sregisters[];
-  static const FPRegister dregisters[];
-};
-
-
-// No*Reg is used to indicate an unused argument, or an error case. Note that
-// these all compare equal (using the Is() method). The Register and FPRegister
-// variants are provided for convenience.
-const Register NoReg;
-const FPRegister NoFPReg;
-const CPURegister NoCPUReg;
-
-
-#define DEFINE_REGISTERS(N)  \
-const Register w##N(N, kWRegSize);  \
-const Register x##N(N, kXRegSize);
-REGISTER_CODE_LIST(DEFINE_REGISTERS)
-#undef DEFINE_REGISTERS
-const Register wsp(kSPRegInternalCode, kWRegSize);
-const Register sp(kSPRegInternalCode, kXRegSize);
-
-
-#define DEFINE_FPREGISTERS(N)  \
-const FPRegister s##N(N, kSRegSize);  \
-const FPRegister d##N(N, kDRegSize);
-REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
-#undef DEFINE_FPREGISTERS
-
-
-// Registers aliases.
-const Register ip0 = x16;
-const Register ip1 = x17;
-const Register lr = x30;
-const Register xzr = x31;
-const Register wzr = w31;
-
-
-// AreAliased returns true if any of the named registers overlap. Arguments
-// set to NoReg are ignored. The system stack pointer may be specified.
-bool AreAliased(const CPURegister& reg1,
-                const CPURegister& reg2,
-                const CPURegister& reg3 = NoReg,
-                const CPURegister& reg4 = NoReg,
-                const CPURegister& reg5 = NoReg,
-                const CPURegister& reg6 = NoReg,
-                const CPURegister& reg7 = NoReg,
-                const CPURegister& reg8 = NoReg);
-
-
-// AreSameSizeAndType returns true if all of the specified registers have the
-// same size, and are of the same type. The system stack pointer may be
-// specified. Arguments set to NoReg are ignored, as are any subsequent
-// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
-bool AreSameSizeAndType(const CPURegister& reg1,
-                        const CPURegister& reg2,
-                        const CPURegister& reg3 = NoCPUReg,
-                        const CPURegister& reg4 = NoCPUReg,
-                        const CPURegister& reg5 = NoCPUReg,
-                        const CPURegister& reg6 = NoCPUReg,
-                        const CPURegister& reg7 = NoCPUReg,
-                        const CPURegister& reg8 = NoCPUReg);
-
-
-// Lists of registers.
-class CPURegList {
- public:
-  explicit CPURegList(CPURegister reg1,
-                      CPURegister reg2 = NoCPUReg,
-                      CPURegister reg3 = NoCPUReg,
-                      CPURegister reg4 = NoCPUReg)
-      : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
-        size_(reg1.size()), type_(reg1.type()) {
-    VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
-    VIXL_ASSERT(IsValid());
-  }
-
-  CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
-      : list_(list), size_(size), type_(type) {
-    VIXL_ASSERT(IsValid());
-  }
-
-  CPURegList(CPURegister::RegisterType type, unsigned size,
-             unsigned first_reg, unsigned last_reg)
-      : size_(size), type_(type) {
-    VIXL_ASSERT(((type == CPURegister::kRegister) &&
-                 (last_reg < kNumberOfRegisters)) ||
-                ((type == CPURegister::kFPRegister) &&
-                 (last_reg < kNumberOfFPRegisters)));
-    VIXL_ASSERT(last_reg >= first_reg);
-    list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
-    list_ &= ~((UINT64_C(1) << first_reg) - 1);
-    VIXL_ASSERT(IsValid());
-  }
-
-  CPURegister::RegisterType type() const {
-    VIXL_ASSERT(IsValid());
-    return type_;
-  }
-
-  // Combine another CPURegList into this one. Registers that already exist in
-  // this list are left unchanged. The type and size of the registers in the
-  // 'other' list must match those in this list.
-  void Combine(const CPURegList& other) {
-    VIXL_ASSERT(IsValid());
-    VIXL_ASSERT(other.type() == type_);
-    VIXL_ASSERT(other.RegisterSizeInBits() == size_);
-    list_ |= other.list();
-  }
-
-  // Remove every register in the other CPURegList from this one. Registers that
-  // do not exist in this list are ignored. The type and size of the registers
-  // in the 'other' list must match those in this list.
-  void Remove(const CPURegList& other) {
-    VIXL_ASSERT(IsValid());
-    VIXL_ASSERT(other.type() == type_);
-    VIXL_ASSERT(other.RegisterSizeInBits() == size_);
-    list_ &= ~other.list();
-  }
-
-  // Variants of Combine and Remove which take a single register.
-  void Combine(const CPURegister& other) {
-    VIXL_ASSERT(other.type() == type_);
-    VIXL_ASSERT(other.size() == size_);
-    Combine(other.code());
-  }
-
-  void Remove(const CPURegister& other) {
-    VIXL_ASSERT(other.type() == type_);
-    VIXL_ASSERT(other.size() == size_);
-    Remove(other.code());
-  }
-
-  // Variants of Combine and Remove which take a single register by its code;
-  // the type and size of the register is inferred from this list.
-  void Combine(int code) {
-    VIXL_ASSERT(IsValid());
-    VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
-    list_ |= (UINT64_C(1) << code);
-  }
-
-  void Remove(int code) {
-    VIXL_ASSERT(IsValid());
-    VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
-    list_ &= ~(UINT64_C(1) << code);
-  }
-
-  static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
-    VIXL_ASSERT(list_1.type_ == list_2.type_);
-    VIXL_ASSERT(list_1.size_ == list_2.size_);
-    return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
-  }
-  static CPURegList Union(const CPURegList& list_1,
-                          const CPURegList& list_2,
-                          const CPURegList& list_3);
-  static CPURegList Union(const CPURegList& list_1,
-                          const CPURegList& list_2,
-                          const CPURegList& list_3,
-                          const CPURegList& list_4);
-
-  static CPURegList Intersection(const CPURegList& list_1,
-                                 const CPURegList& list_2) {
-    VIXL_ASSERT(list_1.type_ == list_2.type_);
-    VIXL_ASSERT(list_1.size_ == list_2.size_);
-    return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
-  }
-  static CPURegList Intersection(const CPURegList& list_1,
-                                 const CPURegList& list_2,
-                                 const CPURegList& list_3);
-  static CPURegList Intersection(const CPURegList& list_1,
-                                 const CPURegList& list_2,
-                                 const CPURegList& list_3,
-                                 const CPURegList& list_4);
-
-  RegList list() const {
-    VIXL_ASSERT(IsValid());
-    return list_;
-  }
-
-  void set_list(RegList new_list) {
-    VIXL_ASSERT(IsValid());
-    list_ = new_list;
-  }
-
-  // Remove all callee-saved registers from the list. This can be useful when
-  // preparing registers for an AAPCS64 function call, for example.
-  void RemoveCalleeSaved();
-
-  CPURegister PopLowestIndex();
-  CPURegister PopHighestIndex();
-
-  // AAPCS64 callee-saved registers.
-  static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
-  static CPURegList GetCalleeSavedFP(unsigned size = kDRegSize);
-
-  // AAPCS64 caller-saved registers. Note that this includes lr.
-  static CPURegList GetCallerSaved(unsigned size = kXRegSize);
-  static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
-
-  bool IsEmpty() const {
-    VIXL_ASSERT(IsValid());
-    return list_ == 0;
-  }
-
-  bool IncludesAliasOf(const CPURegister& other) const {
-    VIXL_ASSERT(IsValid());
-    return (type_ == other.type()) && ((other.Bit() & list_) != 0);
-  }
-
-  bool IncludesAliasOf(int code) const {
-    VIXL_ASSERT(IsValid());
-    return ((code & list_) != 0);
-  }
-
-  int Count() const {
-    VIXL_ASSERT(IsValid());
-    return CountSetBits(list_, kRegListSizeInBits);
-  }
-
-  unsigned RegisterSizeInBits() const {
-    VIXL_ASSERT(IsValid());
-    return size_;
-  }
-
-  unsigned RegisterSizeInBytes() const {
-    int size_in_bits = RegisterSizeInBits();
-    VIXL_ASSERT((size_in_bits % 8) == 0);
-    return size_in_bits / 8;
-  }
-
-  unsigned TotalSizeInBytes() const {
-    VIXL_ASSERT(IsValid());
-    return RegisterSizeInBytes() * Count();
-  }
-
- private:
-  RegList list_;
-  unsigned size_;
-  CPURegister::RegisterType type_;
-
-  bool IsValid() const;
-};
-
-
-// AAPCS64 callee-saved registers.
-extern const CPURegList kCalleeSaved;
-extern const CPURegList kCalleeSavedFP;
-
-
-// AAPCS64 caller-saved registers. Note that this includes lr.
-extern const CPURegList kCallerSaved;
-extern const CPURegList kCallerSavedFP;
-
-
-// Operand.
-class Operand {
- public:
-  // #<immediate>
-  // where <immediate> is int64_t.
-  // This is allowed to be an implicit constructor because Operand is
-  // a wrapper class that doesn't normally perform any type conversion.
-  Operand(int64_t immediate);           // NOLINT(runtime/explicit)
-
-  // rm, {<shift> #<shift_amount>}
-  // where <shift> is one of {LSL, LSR, ASR, ROR}.
-  //       <shift_amount> is uint6_t.
-  // This is allowed to be an implicit constructor because Operand is
-  // a wrapper class that doesn't normally perform any type conversion.
-  Operand(Register reg,
-          Shift shift = LSL,
-          unsigned shift_amount = 0);   // NOLINT(runtime/explicit)
-
-  // rm, {<extend> {#<shift_amount>}}
-  // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
-  //       <shift_amount> is uint2_t.
-  explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
-
-  bool IsImmediate() const;
-  bool IsShiftedRegister() const;
-  bool IsExtendedRegister() const;
-  bool IsZero() const;
-
-  // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
-  // which helps in the encoding of instructions that use the stack pointer.
-  Operand ToExtendedRegister() const;
-
-  int64_t immediate() const {
-    VIXL_ASSERT(IsImmediate());
-    return immediate_;
-  }
-
-  Register reg() const {
-    VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
-    return reg_;
-  }
-
-  Shift shift() const {
-    VIXL_ASSERT(IsShiftedRegister());
-    return shift_;
-  }
-
-  Extend extend() const {
-    VIXL_ASSERT(IsExtendedRegister());
-    return extend_;
-  }
-
-  unsigned shift_amount() const {
-    VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
-    return shift_amount_;
-  }
-
- private:
-  int64_t immediate_;
-  Register reg_;
-  Shift shift_;
-  Extend extend_;
-  unsigned shift_amount_;
-};
-
-
-// MemOperand represents the addressing mode of a load or store instruction.
-class MemOperand {
- public:
-  explicit MemOperand(Register base,
-                      int64_t offset = 0,
-                      AddrMode addrmode = Offset);
-  explicit MemOperand(Register base,
-                      Register regoffset,
-                      Shift shift = LSL,
-                      unsigned shift_amount = 0);
-  explicit MemOperand(Register base,
-                      Register regoffset,
-                      Extend extend,
-                      unsigned shift_amount = 0);
-  explicit MemOperand(Register base,
-                      const Operand& offset,
-                      AddrMode addrmode = Offset);
-
-  const Register& base() const { return base_; }
-  const Register& regoffset() const { return regoffset_; }
-  int64_t offset() const { return offset_; }
-  AddrMode addrmode() const { return addrmode_; }
-  Shift shift() const { return shift_; }
-  Extend extend() const { return extend_; }
-  unsigned shift_amount() const { return shift_amount_; }
-  bool IsImmediateOffset() const;
-  bool IsRegisterOffset() const;
-  bool IsPreIndex() const;
-  bool IsPostIndex() const;
-
- private:
-  Register base_;
-  Register regoffset_;
-  int64_t offset_;
-  AddrMode addrmode_;
-  Shift shift_;
-  Extend extend_;
-  unsigned shift_amount_;
-};
-
-
-class Label {
- public:
-  Label() : location_(kLocationUnbound) {}
-  ~Label() {
-    // If the label has been linked to, it needs to be bound to a target.
-    VIXL_ASSERT(!IsLinked() || IsBound());
-  }
-
-  bool IsBound() const { return location_ >= 0; }
-  bool IsLinked() const { return !links_.empty(); }
-
-  ptrdiff_t location() const { return location_; }
-
- private:
-  // The list of linked instructions is stored in a stack-like structure. We
-  // don't use std::stack directly because it's slow for the common case where
-  // only one or two instructions refer to a label, and labels themselves are
-  // short-lived. This class behaves like std::stack, but the first few links
-  // are preallocated (configured by kPreallocatedLinks).
-  //
-  // If more than N links are required, this falls back to std::stack.
-  class LinksStack {
-   public:
-    LinksStack() : size_(0), links_extended_(NULL) {}
-    ~LinksStack() {
-      delete links_extended_;
-    }
-
-    size_t size() const {
-      return size_;
-    }
-
-    bool empty() const {
-      return size_ == 0;
-    }
-
-    void push(ptrdiff_t value) {
-      if (size_ < kPreallocatedLinks) {
-        links_[size_] = value;
-      } else {
-        if (links_extended_ == NULL) {
-          links_extended_ = new std::stack<ptrdiff_t>();
-        }
-        VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks));
-        links_extended_->push(value);
-      }
-      size_++;
-    }
-
-    ptrdiff_t top() const {
-      return (size_ <= kPreallocatedLinks) ? links_[size_ - 1]
-                                           : links_extended_->top();
-    }
-
-    void pop() {
-      size_--;
-      if (size_ >= kPreallocatedLinks) {
-        links_extended_->pop();
-        VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks));
-      }
-    }
-
-   private:
-    static const size_t kPreallocatedLinks = 4;
-
-    size_t size_;
-    ptrdiff_t links_[kPreallocatedLinks];
-    std::stack<ptrdiff_t> * links_extended_;
-  };
-
-  void Bind(ptrdiff_t location) {
-    // Labels can only be bound once.
-    VIXL_ASSERT(!IsBound());
-    location_ = location;
-  }
-
-  void AddLink(ptrdiff_t instruction) {
-    // If a label is bound, the assembler already has the information it needs
-    // to write the instruction, so there is no need to add it to links_.
-    VIXL_ASSERT(!IsBound());
-    links_.push(instruction);
-  }
-
-  ptrdiff_t GetAndRemoveNextLink() {
-    VIXL_ASSERT(IsLinked());
-    ptrdiff_t link = links_.top();
-    links_.pop();
-    return link;
-  }
-
-  // The offsets of the instructions that have linked to this label.
-  LinksStack links_;
-  // The label location.
-  ptrdiff_t location_;
-
-  static const ptrdiff_t kLocationUnbound = -1;
-
-  // It is not safe to copy labels, so disable the copy constructor by declaring
-  // it private (without an implementation).
-  Label(const Label&);
-
-  // The Assembler class is responsible for binding and linking labels, since
-  // the stored offsets need to be consistent with the Assembler's buffer.
-  friend class Assembler;
-};
-
-
-// A literal is a 32-bit or 64-bit piece of data stored in the instruction
-// stream and loaded through a pc relative load. The same literal can be
-// referred to by multiple instructions but a literal can only reside at one
-// place in memory. A literal can be used by a load before or after being
-// placed in memory.
-//
-// Internally an offset of 0 is associated with a literal which has been
-// neither used nor placed. Then two possibilities arise:
-//  1) the label is placed, the offset (stored as offset + 1) is used to
-//     resolve any subsequent load using the label.
-//  2) the label is not placed and offset is the offset of the last load using
-//     the literal (stored as -offset -1). If multiple loads refer to this
-//     literal then the last load holds the offset of the preceding load and
-//     all loads form a chain. Once the offset is placed all the loads in the
-//     chain are resolved and future loads fall back to possibility 1.
-class RawLiteral {
- public:
-  RawLiteral() : size_(0), offset_(0), raw_value_(0) {}
-
-  size_t size() {
-    VIXL_STATIC_ASSERT(kDRegSizeInBytes == kXRegSizeInBytes);
-    VIXL_STATIC_ASSERT(kSRegSizeInBytes == kWRegSizeInBytes);
-    VIXL_ASSERT((size_ == kXRegSizeInBytes) || (size_ == kWRegSizeInBytes));
-    return size_;
-  }
-  uint64_t raw_value64() {
-    VIXL_ASSERT(size_ == kXRegSizeInBytes);
-    return raw_value_;
-  }
-  uint32_t raw_value32() {
-    VIXL_ASSERT(size_ == kWRegSizeInBytes);
-    VIXL_ASSERT(is_uint32(raw_value_) || is_int32(raw_value_));
-    return static_cast<uint32_t>(raw_value_);
-  }
-  bool IsUsed() { return offset_ < 0; }
-  bool IsPlaced() { return offset_ > 0; }
-
- protected:
-  ptrdiff_t offset() {
-    VIXL_ASSERT(IsPlaced());
-    return offset_ - 1;
-  }
-  void set_offset(ptrdiff_t offset) {
-    VIXL_ASSERT(offset >= 0);
-    VIXL_ASSERT(IsWordAligned(offset));
-    VIXL_ASSERT(!IsPlaced());
-    offset_ = offset + 1;
-  }
-  ptrdiff_t last_use() {
-    VIXL_ASSERT(IsUsed());
-    return -offset_ - 1;
-  }
-  void set_last_use(ptrdiff_t offset) {
-    VIXL_ASSERT(offset >= 0);
-    VIXL_ASSERT(IsWordAligned(offset));
-    VIXL_ASSERT(!IsPlaced());
-    offset_ = -offset - 1;
-  }
-
-  size_t size_;
-  ptrdiff_t offset_;
-  uint64_t raw_value_;
-
-  friend class Assembler;
-};
-
-
-template <typename T>
-class Literal : public RawLiteral {
- public:
-  explicit Literal(T value) {
-    size_ = sizeof(value);
-    memcpy(&raw_value_, &value, sizeof(value));
-  }
-};
-
-
-// Control whether or not position-independent code should be emitted.
-enum PositionIndependentCodeOption {
-  // All code generated will be position-independent; all branches and
-  // references to labels generated with the Label class will use PC-relative
-  // addressing.
-  PositionIndependentCode,
-
-  // Allow VIXL to generate code that refers to absolute addresses. With this
-  // option, it will not be possible to copy the code buffer and run it from a
-  // different address; code must be generated in its final location.
-  PositionDependentCode,
-
-  // Allow VIXL to assume that the bottom 12 bits of the address will be
-  // constant, but that the top 48 bits may change. This allows `adrp` to
-  // function in systems which copy code between pages, but otherwise maintain
-  // 4KB page alignment.
-  PageOffsetDependentCode
-};
-
-
-// Control how scaled- and unscaled-offset loads and stores are generated.
-enum LoadStoreScalingOption {
-  // Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
-  // register-offset, pre-index or post-index instructions if necessary.
-  PreferScaledOffset,
-
-  // Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
-  // register-offset, pre-index or post-index instructions if necessary.
-  PreferUnscaledOffset,
-
-  // Require scaled-immediate-offset instructions.
-  RequireScaledOffset,
-
-  // Require unscaled-immediate-offset instructions.
-  RequireUnscaledOffset
-};
-
-
-// Assembler.
-class Assembler {
- public:
-  Assembler(size_t capacity,
-            PositionIndependentCodeOption pic = PositionIndependentCode);
-  Assembler(byte* buffer, size_t capacity,
-            PositionIndependentCodeOption pic = PositionIndependentCode);
-
-  // The destructor asserts that one of the following is true:
-  //  * The Assembler object has not been used.
-  //  * Nothing has been emitted since the last Reset() call.
-  //  * Nothing has been emitted since the last FinalizeCode() call.
-  ~Assembler();
-
-  // System functions.
-
-  // Start generating code from the beginning of the buffer, discarding any code
-  // and data that has already been emitted into the buffer.
-  void Reset();
-
-  // Finalize a code buffer of generated instructions. This function must be
-  // called before executing or copying code from the buffer.
-  void FinalizeCode();
-
-  // Label.
-  // Bind a label to the current PC.
-  void bind(Label* label);
-
-  // Bind a label to a specified offset from the start of the buffer.
-  void BindToOffset(Label* label, ptrdiff_t offset);
-
-  // Place a literal at the current PC.
-  void place(RawLiteral* literal);
-
-  ptrdiff_t CursorOffset() const {
-    return buffer_->CursorOffset();
-  }
-
-  ptrdiff_t BufferEndOffset() const {
-    return static_cast<ptrdiff_t>(buffer_->capacity());
-  }
-
-  // Return the address of an offset in the buffer.
-  template <typename T>
-  T GetOffsetAddress(ptrdiff_t offset) {
-    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
-    return buffer_->GetOffsetAddress<T>(offset);
-  }
-
-  // Return the address of a bound label.
-  template <typename T>
-  T GetLabelAddress(const Label * label) {
-    VIXL_ASSERT(label->IsBound());
-    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
-    return GetOffsetAddress<T>(label->location());
-  }
-
-  // Return the address of the cursor.
-  template <typename T>
-  T GetCursorAddress() {
-    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
-    return GetOffsetAddress<T>(CursorOffset());
-  }
-
-  // Return the address of the start of the buffer.
-  template <typename T>
-  T GetStartAddress() {
-    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
-    return GetOffsetAddress<T>(0);
-  }
-
-  // Instruction set functions.
-
-  // Branch / Jump instructions.
-  // Branch to register.
-  void br(const Register& xn);
-
-  // Branch with link to register.
-  void blr(const Register& xn);
-
-  // Branch to register with return hint.
-  void ret(const Register& xn = lr);
-
-  // Unconditional branch to label.
-  void b(Label* label);
-
-  // Conditional branch to label.
-  void b(Label* label, Condition cond);
-
-  // Unconditional branch to PC offset.
-  void b(int imm26);
-
-  // Conditional branch to PC offset.
-  void b(int imm19, Condition cond);
-
-  // Branch with link to label.
-  void bl(Label* label);
-
-  // Branch with link to PC offset.
-  void bl(int imm26);
-
-  // Compare and branch to label if zero.
-  void cbz(const Register& rt, Label* label);
-
-  // Compare and branch to PC offset if zero.
-  void cbz(const Register& rt, int imm19);
-
-  // Compare and branch to label if not zero.
-  void cbnz(const Register& rt, Label* label);
-
-  // Compare and branch to PC offset if not zero.
-  void cbnz(const Register& rt, int imm19);
-
-  // Test bit and branch to label if zero.
-  void tbz(const Register& rt, unsigned bit_pos, Label* label);
-
-  // Test bit and branch to PC offset if zero.
-  void tbz(const Register& rt, unsigned bit_pos, int imm14);
-
-  // Test bit and branch to label if not zero.
-  void tbnz(const Register& rt, unsigned bit_pos, Label* label);
-
-  // Test bit and branch to PC offset if not zero.
-  void tbnz(const Register& rt, unsigned bit_pos, int imm14);
-
-  // Address calculation instructions.
-  // Calculate a PC-relative address. Unlike for branches the offset in adr is
-  // unscaled (i.e. the result can be unaligned).
-
-  // Calculate the address of a label.
-  void adr(const Register& rd, Label* label);
-
-  // Calculate the address of a PC offset.
-  void adr(const Register& rd, int imm21);
-
-  // Calculate the page address of a label.
-  void adrp(const Register& rd, Label* label);
-
-  // Calculate the page address of a PC offset.
-  void adrp(const Register& rd, int imm21);
-
-  // Data Processing instructions.
-  // Add.
-  void add(const Register& rd,
-           const Register& rn,
-           const Operand& operand);
-
-  // Add and update status flags.
-  void adds(const Register& rd,
-            const Register& rn,
-            const Operand& operand);
-
-  // Compare negative.
-  void cmn(const Register& rn, const Operand& operand);
-
-  // Subtract.
-  void sub(const Register& rd,
-           const Register& rn,
-           const Operand& operand);
-
-  // Subtract and update status flags.
-  void subs(const Register& rd,
-            const Register& rn,
-            const Operand& operand);
-
-  // Compare.
-  void cmp(const Register& rn, const Operand& operand);
-
-  // Negate.
-  void neg(const Register& rd,
-           const Operand& operand);
-
-  // Negate and update status flags.
-  void negs(const Register& rd,
-            const Operand& operand);
-
-  // Add with carry bit.
-  void adc(const Register& rd,
-           const Register& rn,
-           const Operand& operand);
-
-  // Add with carry bit and update status flags.
-  void adcs(const Register& rd,
-            const Register& rn,
-            const Operand& operand);
-
-  // Subtract with carry bit.
-  void sbc(const Register& rd,
-           const Register& rn,
-           const Operand& operand);
-
-  // Subtract with carry bit and update status flags.
-  void sbcs(const Register& rd,
-            const Register& rn,
-            const Operand& operand);
-
-  // Negate with carry bit.
-  void ngc(const Register& rd,
-           const Operand& operand);
-
-  // Negate with carry bit and update status flags.
-  void ngcs(const Register& rd,
-            const Operand& operand);
-
-  // Logical instructions.
-  // Bitwise and (A & B).
-  void and_(const Register& rd,
-            const Register& rn,
-            const Operand& operand);
-
-  // Bitwise and (A & B) and update status flags.
-  void ands(const Register& rd,
-            const Register& rn,
-            const Operand& operand);
-
-  // Bit test and set flags.
-  void tst(const Register& rn, const Operand& operand);
-
-  // Bit clear (A & ~B).
-  void bic(const Register& rd,
-           const Register& rn,
-           const Operand& operand);
-
-  // Bit clear (A & ~B) and update status flags.
-  void bics(const Register& rd,
-            const Register& rn,
-            const Operand& operand);
-
-  // Bitwise or (A | B).
-  void orr(const Register& rd, const Register& rn, const Operand& operand);
-
-  // Bitwise nor (A | ~B).
-  void orn(const Register& rd, const Register& rn, const Operand& operand);
-
-  // Bitwise eor/xor (A ^ B).
-  void eor(const Register& rd, const Register& rn, const Operand& operand);
-
-  // Bitwise enor/xnor (A ^ ~B).
-  void eon(const Register& rd, const Register& rn, const Operand& operand);
-
-  // Logical shift left by variable.
-  void lslv(const Register& rd, const Register& rn, const Register& rm);
-
-  // Logical shift right by variable.
-  void lsrv(const Register& rd, const Register& rn, const Register& rm);
-
-  // Arithmetic shift right by variable.
-  void asrv(const Register& rd, const Register& rn, const Register& rm);
-
-  // Rotate right by variable.
-  void rorv(const Register& rd, const Register& rn, const Register& rm);
-
-  // Bitfield instructions.
-  // Bitfield move.
-  void bfm(const Register& rd,
-           const Register& rn,
-           unsigned immr,
-           unsigned imms);
-
-  // Signed bitfield move.
-  void sbfm(const Register& rd,
-            const Register& rn,
-            unsigned immr,
-            unsigned imms);
-
-  // Unsigned bitfield move.
-  void ubfm(const Register& rd,
-            const Register& rn,
-            unsigned immr,
-            unsigned imms);
-
-  // Bfm aliases.
-  // Bitfield insert.
-  void bfi(const Register& rd,
-           const Register& rn,
-           unsigned lsb,
-           unsigned width) {
-    VIXL_ASSERT(width >= 1);
-    VIXL_ASSERT(lsb + width <= rn.size());
-    bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
-  }
-
-  // Bitfield extract and insert low.
-  void bfxil(const Register& rd,
-             const Register& rn,
-             unsigned lsb,
-             unsigned width) {
-    VIXL_ASSERT(width >= 1);
-    VIXL_ASSERT(lsb + width <= rn.size());
-    bfm(rd, rn, lsb, lsb + width - 1);
-  }
-
-  // Sbfm aliases.
-  // Arithmetic shift right.
-  void asr(const Register& rd, const Register& rn, unsigned shift) {
-    VIXL_ASSERT(shift < rd.size());
-    sbfm(rd, rn, shift, rd.size() - 1);
-  }
-
-  // Signed bitfield insert with zero at right.
-  void sbfiz(const Register& rd,
-             const Register& rn,
-             unsigned lsb,
-             unsigned width) {
-    VIXL_ASSERT(width >= 1);
-    VIXL_ASSERT(lsb + width <= rn.size());
-    sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
-  }
-
-  // Signed bitfield extract.
-  void sbfx(const Register& rd,
-            const Register& rn,
-            unsigned lsb,
-            unsigned width) {
-    VIXL_ASSERT(width >= 1);
-    VIXL_ASSERT(lsb + width <= rn.size());
-    sbfm(rd, rn, lsb, lsb + width - 1);
-  }
-
-  // Signed extend byte.
-  void sxtb(const Register& rd, const Register& rn) {
-    sbfm(rd, rn, 0, 7);
-  }
-
-  // Signed extend halfword.
-  void sxth(const Register& rd, const Register& rn) {
-    sbfm(rd, rn, 0, 15);
-  }
-
-  // Signed extend word.
-  void sxtw(const Register& rd, const Register& rn) {
-    sbfm(rd, rn, 0, 31);
-  }
-
-  // Ubfm aliases.
-  // Logical shift left.
-  void lsl(const Register& rd, const Register& rn, unsigned shift) {
-    unsigned reg_size = rd.size();
-    VIXL_ASSERT(shift < reg_size);
-    ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
-  }
-
-  // Logical shift right.
-  void lsr(const Register& rd, const Register& rn, unsigned shift) {
-    VIXL_ASSERT(shift < rd.size());
-    ubfm(rd, rn, shift, rd.size() - 1);
-  }
-
-  // Unsigned bitfield insert with zero at right.
-  void ubfiz(const Register& rd,
-             const Register& rn,
-             unsigned lsb,
-             unsigned width) {
-    VIXL_ASSERT(width >= 1);
-    VIXL_ASSERT(lsb + width <= rn.size());
-    ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
-  }
-
-  // Unsigned bitfield extract.
-  void ubfx(const Register& rd,
-            const Register& rn,
-            unsigned lsb,
-            unsigned width) {
-    VIXL_ASSERT(width >= 1);
-    VIXL_ASSERT(lsb + width <= rn.size());
-    ubfm(rd, rn, lsb, lsb + width - 1);
-  }
-
-  // Unsigned extend byte.
-  void uxtb(const Register& rd, const Register& rn) {
-    ubfm(rd, rn, 0, 7);
-  }
-
-  // Unsigned extend halfword.
-  void uxth(const Register& rd, const Register& rn) {
-    ubfm(rd, rn, 0, 15);
-  }
-
-  // Unsigned extend word.
-  void uxtw(const Register& rd, const Register& rn) {
-    ubfm(rd, rn, 0, 31);
-  }
-
-  // Extract.
-  void extr(const Register& rd,
-            const Register& rn,
-            const Register& rm,
-            unsigned lsb);
-
-  // Conditional select: rd = cond ? rn : rm.
-  void csel(const Register& rd,
-            const Register& rn,
-            const Register& rm,
-            Condition cond);
-
-  // Conditional select increment: rd = cond ? rn : rm + 1.
-  void csinc(const Register& rd,
-             const Register& rn,
-             const Register& rm,
-             Condition cond);
-
-  // Conditional select inversion: rd = cond ? rn : ~rm.
-  void csinv(const Register& rd,
-             const Register& rn,
-             const Register& rm,
-             Condition cond);
-
-  // Conditional select negation: rd = cond ? rn : -rm.
-  void csneg(const Register& rd,
-             const Register& rn,
-             const Register& rm,
-             Condition cond);
-
-  // Conditional set: rd = cond ? 1 : 0.
-  void cset(const Register& rd, Condition cond);
-
-  // Conditional set mask: rd = cond ? -1 : 0.
-  void csetm(const Register& rd, Condition cond);
-
-  // Conditional increment: rd = cond ? rn + 1 : rn.
-  void cinc(const Register& rd, const Register& rn, Condition cond);
-
-  // Conditional invert: rd = cond ? ~rn : rn.
-  void cinv(const Register& rd, const Register& rn, Condition cond);
-
-  // Conditional negate: rd = cond ? -rn : rn.
-  void cneg(const Register& rd, const Register& rn, Condition cond);
-
-  // Rotate right.
-  void ror(const Register& rd, const Register& rs, unsigned shift) {
-    extr(rd, rs, rs, shift);
-  }
-
-  // Conditional comparison.
-  // Conditional compare negative.
-  void ccmn(const Register& rn,
-            const Operand& operand,
-            StatusFlags nzcv,
-            Condition cond);
-
-  // Conditional compare.
-  void ccmp(const Register& rn,
-            const Operand& operand,
-            StatusFlags nzcv,
-            Condition cond);
-
-  // Multiply.
-  void mul(const Register& rd, const Register& rn, const Register& rm);
-
-  // Negated multiply.
-  void mneg(const Register& rd, const Register& rn, const Register& rm);
-
-  // Signed long multiply: 32 x 32 -> 64-bit.
-  void smull(const Register& rd, const Register& rn, const Register& rm);
-
-  // Signed multiply high: 64 x 64 -> 64-bit <127:64>.
-  void smulh(const Register& xd, const Register& xn, const Register& xm);
-
-  // Multiply and accumulate.
-  void madd(const Register& rd,
-            const Register& rn,
-            const Register& rm,
-            const Register& ra);
-
-  // Multiply and subtract.
-  void msub(const Register& rd,
-            const Register& rn,
-            const Register& rm,
-            const Register& ra);
-
-  // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
-  void smaddl(const Register& rd,
-              const Register& rn,
-              const Register& rm,
-              const Register& ra);
-
-  // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
-  void umaddl(const Register& rd,
-              const Register& rn,
-              const Register& rm,
-              const Register& ra);
-
-  // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit.
-  void smsubl(const Register& rd,
-              const Register& rn,
-              const Register& rm,
-              const Register& ra);
-
-  // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit.
-  void umsubl(const Register& rd,
-              const Register& rn,
-              const Register& rm,
-              const Register& ra);
-
-  // Signed integer divide.
-  void sdiv(const Register& rd, const Register& rn, const Register& rm);
-
-  // Unsigned integer divide.
-  void udiv(const Register& rd, const Register& rn, const Register& rm);
-
-  // Bit reverse.
-  void rbit(const Register& rd, const Register& rn);
-
-  // Reverse bytes in 16-bit half words.
-  void rev16(const Register& rd, const Register& rn);
-
-  // Reverse bytes in 32-bit words.
-  void rev32(const Register& rd, const Register& rn);
-
-  // Reverse bytes.
-  void rev(const Register& rd, const Register& rn);
-
-  // Count leading zeroes.
-  void clz(const Register& rd, const Register& rn);
-
-  // Count leading sign bits.
-  void cls(const Register& rd, const Register& rn);
-
-  // Memory instructions.
-  // Load integer or FP register.
-  void ldr(const CPURegister& rt, const MemOperand& src,
-           LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Store integer or FP register.
-  void str(const CPURegister& rt, const MemOperand& dst,
-           LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Load word with sign extension.
-  void ldrsw(const Register& rt, const MemOperand& src,
-             LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Load byte.
-  void ldrb(const Register& rt, const MemOperand& src,
-            LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Store byte.
-  void strb(const Register& rt, const MemOperand& dst,
-            LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Load byte with sign extension.
-  void ldrsb(const Register& rt, const MemOperand& src,
-             LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Load half-word.
-  void ldrh(const Register& rt, const MemOperand& src,
-            LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Store half-word.
-  void strh(const Register& rt, const MemOperand& dst,
-            LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Load half-word with sign extension.
-  void ldrsh(const Register& rt, const MemOperand& src,
-             LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Load integer or FP register (with unscaled offset).
-  void ldur(const CPURegister& rt, const MemOperand& src,
-            LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Store integer or FP register (with unscaled offset).
-  void stur(const CPURegister& rt, const MemOperand& src,
-            LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Load word with sign extension.
-  void ldursw(const Register& rt, const MemOperand& src,
-              LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Load byte (with unscaled offset).
-  void ldurb(const Register& rt, const MemOperand& src,
-             LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Store byte (with unscaled offset).
-  void sturb(const Register& rt, const MemOperand& dst,
-             LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Load byte with sign extension (and unscaled offset).
-  void ldursb(const Register& rt, const MemOperand& src,
-              LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Load half-word (with unscaled offset).
-  void ldurh(const Register& rt, const MemOperand& src,
-             LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Store half-word (with unscaled offset).
-  void sturh(const Register& rt, const MemOperand& dst,
-             LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Load half-word with sign extension (and unscaled offset).
-  void ldursh(const Register& rt, const MemOperand& src,
-              LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Load integer or FP register pair.
-  void ldp(const CPURegister& rt, const CPURegister& rt2,
-           const MemOperand& src);
-
-  // Store integer or FP register pair.
-  void stp(const CPURegister& rt, const CPURegister& rt2,
-           const MemOperand& dst);
-
-  // Load word pair with sign extension.
-  void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
-
-  // Load integer or FP register pair, non-temporal.
-  void ldnp(const CPURegister& rt, const CPURegister& rt2,
-            const MemOperand& src);
-
-  // Store integer or FP register pair, non-temporal.
-  void stnp(const CPURegister& rt, const CPURegister& rt2,
-            const MemOperand& dst);
-
-  // Load integer or FP register from literal pool.
-  void ldr(const CPURegister& rt, RawLiteral* literal);
-
-  // Load word with sign extension from literal pool.
-  void ldrsw(const Register& rt, RawLiteral* literal);
-
-  // Load integer or FP register from pc + imm19 << 2.
-  void ldr(const CPURegister& rt, int imm19);
-
-  // Load word with sign extension from pc + imm19 << 2.
-  void ldrsw(const Register& rt, int imm19);
-
-  // Store exclusive byte.
-  void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);
-
-  // Store exclusive half-word.
-  void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);
-
-  // Store exclusive register.
-  void stxr(const Register& rs, const Register& rt, const MemOperand& dst);
-
-  // Load exclusive byte.
-  void ldxrb(const Register& rt, const MemOperand& src);
-
-  // Load exclusive half-word.
-  void ldxrh(const Register& rt, const MemOperand& src);
-
-  // Load exclusive register.
-  void ldxr(const Register& rt, const MemOperand& src);
-
-  // Store exclusive register pair.
-  void stxp(const Register& rs,
-            const Register& rt,
-            const Register& rt2,
-            const MemOperand& dst);
-
-  // Load exclusive register pair.
-  void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);
-
-  // Store-release exclusive byte.
-  void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);
-
-  // Store-release exclusive half-word.
-  void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);
-
-  // Store-release exclusive register.
-  void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);
-
-  // Load-acquire exclusive byte.
-  void ldaxrb(const Register& rt, const MemOperand& src);
-
-  // Load-acquire exclusive half-word.
-  void ldaxrh(const Register& rt, const MemOperand& src);
-
-  // Load-acquire exclusive register.
-  void ldaxr(const Register& rt, const MemOperand& src);
-
-  // Store-release exclusive register pair.
-  void stlxp(const Register& rs,
-             const Register& rt,
-             const Register& rt2,
-             const MemOperand& dst);
-
-  // Load-acquire exclusive register pair.
-  void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);
-
-  // Store-release byte.
-  void stlrb(const Register& rt, const MemOperand& dst);
-
-  // Store-release half-word.
-  void stlrh(const Register& rt, const MemOperand& dst);
-
-  // Store-release register.
-  void stlr(const Register& rt, const MemOperand& dst);
-
-  // Load-acquire byte.
-  void ldarb(const Register& rt, const MemOperand& src);
-
-  // Load-acquire half-word.
-  void ldarh(const Register& rt, const MemOperand& src);
-
-  // Load-acquire register.
-  void ldar(const Register& rt, const MemOperand& src);
-
-  // Prefetch memory.
-  void prfm(PrefetchOperation op, const MemOperand& addr,
-            LoadStoreScalingOption option = PreferScaledOffset);
-
-  // Prefetch memory (with unscaled offset).
-  void prfum(PrefetchOperation op, const MemOperand& addr,
-             LoadStoreScalingOption option = PreferUnscaledOffset);
-
-  // Prefetch memory in the literal pool.
-  void prfm(PrefetchOperation op, RawLiteral* literal);
-
-  // Prefetch from pc + imm19 << 2.
-  void prfm(PrefetchOperation op, int imm19);
-
-  // Move instructions. The default shift of -1 indicates that the move
-  // instruction will calculate an appropriate 16-bit immediate and left shift
-  // that is equal to the 64-bit immediate argument. If an explicit left shift
-  // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
-  //
-  // For movk, an explicit shift can be used to indicate which half word should
-  // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
-  // half word with zero, whereas movk(x0, 0, 48) will overwrite the
-  // most-significant.
-
-  // Move immediate and keep.
-  void movk(const Register& rd, uint64_t imm, int shift = -1) {
-    MoveWide(rd, imm, shift, MOVK);
-  }
-
-  // Move inverted immediate.
-  void movn(const Register& rd, uint64_t imm, int shift = -1) {
-    MoveWide(rd, imm, shift, MOVN);
-  }
-
-  // Move immediate.
-  void movz(const Register& rd, uint64_t imm, int shift = -1) {
-    MoveWide(rd, imm, shift, MOVZ);
-  }
-
-  // Misc instructions.
-  // Monitor debug-mode breakpoint.
-  void brk(int code);
-
-  // Halting debug-mode breakpoint.
-  void hlt(int code);
-
-  // Move register to register.
-  void mov(const Register& rd, const Register& rn);
-
-  // Move inverted operand to register.
-  void mvn(const Register& rd, const Operand& operand);
-
-  // System instructions.
-  // Move to register from system register.
-  void mrs(const Register& rt, SystemRegister sysreg);
-
-  // Move from register to system register.
-  void msr(SystemRegister sysreg, const Register& rt);
-
-  // System hint.
-  void hint(SystemHint code);
-
-  // Clear exclusive monitor.
-  void clrex(int imm4 = 0xf);
-
-  // Data memory barrier.
-  void dmb(BarrierDomain domain, BarrierType type);
-
-  // Data synchronization barrier.
-  void dsb(BarrierDomain domain, BarrierType type);
-
-  // Instruction synchronization barrier.
-  void isb();
-
-  // Alias for system instructions.
-  // No-op.
-  void nop() {
-    hint(NOP);
-  }
-
-  // FP instructions.
-  // Move double precision immediate to FP register.
-  void fmov(const FPRegister& fd, double imm);
-
-  // Move single precision immediate to FP register.
-  void fmov(const FPRegister& fd, float imm);
-
-  // Move FP register to register.
-  void fmov(const Register& rd, const FPRegister& fn);
-
-  // Move register to FP register.
-  void fmov(const FPRegister& fd, const Register& rn);
-
-  // Move FP register to FP register.
-  void fmov(const FPRegister& fd, const FPRegister& fn);
-
-  // FP add.
-  void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
-  // FP subtract.
-  void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
-  // FP multiply.
-  void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
-  // FP fused multiply and add.
-  void fmadd(const FPRegister& fd,
-             const FPRegister& fn,
-             const FPRegister& fm,
-             const FPRegister& fa);
-
-  // FP fused multiply and subtract.
-  void fmsub(const FPRegister& fd,
-             const FPRegister& fn,
-             const FPRegister& fm,
-             const FPRegister& fa);
-
-  // FP fused multiply, add and negate.
-  void fnmadd(const FPRegister& fd,
-              const FPRegister& fn,
-              const FPRegister& fm,
-              const FPRegister& fa);
-
-  // FP fused multiply, subtract and negate.
-  void fnmsub(const FPRegister& fd,
-              const FPRegister& fn,
-              const FPRegister& fm,
-              const FPRegister& fa);
-
-  // FP divide.
-  void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
-  // FP maximum.
-  void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
-  // FP minimum.
-  void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
-  // FP maximum number.
-  void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
-  // FP minimum number.
-  void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
-
-  // FP absolute.
-  void fabs(const FPRegister& fd, const FPRegister& fn);
-
-  // FP negate.
-  void fneg(const FPRegister& fd, const FPRegister& fn);
-
-  // FP square root.
-  void fsqrt(const FPRegister& fd, const FPRegister& fn);
-
-  // FP round to integer (nearest with ties to away).
-  void frinta(const FPRegister& fd, const FPRegister& fn);
-
-  // FP round to integer (implicit rounding).
-  void frinti(const FPRegister& fd, const FPRegister& fn);
-
-  // FP round to integer (toward minus infinity).
-  void frintm(const FPRegister& fd, const FPRegister& fn);
-
-  // FP round to integer (nearest with ties to even).
-  void frintn(const FPRegister& fd, const FPRegister& fn);
-
-  // FP round to integer (toward plus infinity).
-  void frintp(const FPRegister& fd, const FPRegister& fn);
-
-  // FP round to integer (exact, implicit rounding).
-  void frintx(const FPRegister& fd, const FPRegister& fn);
-
-  // FP round to integer (towards zero).
-  void frintz(const FPRegister& fd, const FPRegister& fn);
-
-  // FP compare registers.
-  void fcmp(const FPRegister& fn, const FPRegister& fm);
-
-  // FP compare immediate.
-  void fcmp(const FPRegister& fn, double value);
-
-  // FP conditional compare.
-  void fccmp(const FPRegister& fn,
-             const FPRegister& fm,
-             StatusFlags nzcv,
-             Condition cond);
-
-  // FP conditional select.
-  void fcsel(const FPRegister& fd,
-             const FPRegister& fn,
-             const FPRegister& fm,
-             Condition cond);
-
-  // Common FP Convert function.
-  void FPConvertToInt(const Register& rd,
-                      const FPRegister& fn,
-                      FPIntegerConvertOp op);
-
-  // FP convert between single and double precision.
-  void fcvt(const FPRegister& fd, const FPRegister& fn);
-
-  // Convert FP to signed integer (nearest with ties to away).
-  void fcvtas(const Register& rd, const FPRegister& fn);
-
-  // Convert FP to unsigned integer (nearest with ties to away).
-  void fcvtau(const Register& rd, const FPRegister& fn);
-
-  // Convert FP to signed integer (round towards -infinity).
-  void fcvtms(const Register& rd, const FPRegister& fn);
-
-  // Convert FP to unsigned integer (round towards -infinity).
-  void fcvtmu(const Register& rd, const FPRegister& fn);
-
-  // Convert FP to signed integer (nearest with ties to even).
-  void fcvtns(const Register& rd, const FPRegister& fn);
-
-  // Convert FP to unsigned integer (nearest with ties to even).
-  void fcvtnu(const Register& rd, const FPRegister& fn);
-
-  // Convert FP to signed integer (round towards zero).
-  void fcvtzs(const Register& rd, const FPRegister& fn);
-
-  // Convert FP to unsigned integer (round towards zero).
-  void fcvtzu(const Register& rd, const FPRegister& fn);
-
-  // Convert signed integer or fixed point to FP.
-  void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
-
-  // Convert unsigned integer or fixed point to FP.
-  void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
-
-  // Emit generic instructions.
-  // Emit raw instructions into the instruction stream.
-  void dci(Instr raw_inst) { Emit(raw_inst); }
-
-  // Emit 32 bits of data into the instruction stream.
-  void dc32(uint32_t data) {
-    VIXL_ASSERT(buffer_monitor_ > 0);
-    buffer_->Emit32(data);
-  }
-
-  // Emit 64 bits of data into the instruction stream.
-  void dc64(uint64_t data) {
-    VIXL_ASSERT(buffer_monitor_ > 0);
-    buffer_->Emit64(data);
-  }
-
-  // Copy a string into the instruction stream, including the terminating NULL
-  // character. The instruction pointer is then aligned correctly for
-  // subsequent instructions.
-  void EmitString(const char * string) {
-    VIXL_ASSERT(string != NULL);
-    VIXL_ASSERT(buffer_monitor_ > 0);
-
-    buffer_->EmitString(string);
-    buffer_->Align();
-  }
-
-  // Code generation helpers.
-
-  // Register encoding.
-  static Instr Rd(CPURegister rd) {
-    VIXL_ASSERT(rd.code() != kSPRegInternalCode);
-    return rd.code() << Rd_offset;
-  }
-
-  static Instr Rn(CPURegister rn) {
-    VIXL_ASSERT(rn.code() != kSPRegInternalCode);
-    return rn.code() << Rn_offset;
-  }
-
-  static Instr Rm(CPURegister rm) {
-    VIXL_ASSERT(rm.code() != kSPRegInternalCode);
-    return rm.code() << Rm_offset;
-  }
-
-  static Instr Ra(CPURegister ra) {
-    VIXL_ASSERT(ra.code() != kSPRegInternalCode);
-    return ra.code() << Ra_offset;
-  }
-
-  static Instr Rt(CPURegister rt) {
-    VIXL_ASSERT(rt.code() != kSPRegInternalCode);
-    return rt.code() << Rt_offset;
-  }
-
-  static Instr Rt2(CPURegister rt2) {
-    VIXL_ASSERT(rt2.code() != kSPRegInternalCode);
-    return rt2.code() << Rt2_offset;
-  }
-
-  static Instr Rs(CPURegister rs) {
-    VIXL_ASSERT(rs.code() != kSPRegInternalCode);
-    return rs.code() << Rs_offset;
-  }
-
-  // These encoding functions allow the stack pointer to be encoded, and
-  // disallow the zero register.
-  static Instr RdSP(Register rd) {
-    VIXL_ASSERT(!rd.IsZero());
-    return (rd.code() & kRegCodeMask) << Rd_offset;
-  }
-
-  static Instr RnSP(Register rn) {
-    VIXL_ASSERT(!rn.IsZero());
-    return (rn.code() & kRegCodeMask) << Rn_offset;
-  }
-
-  // Flags encoding.
-  static Instr Flags(FlagsUpdate S) {
-    if (S == SetFlags) {
-      return 1 << FlagsUpdate_offset;
-    } else if (S == LeaveFlags) {
-      return 0 << FlagsUpdate_offset;
-    }
-    VIXL_UNREACHABLE();
-    return 0;
-  }
-
-  static Instr Cond(Condition cond) {
-    return cond << Condition_offset;
-  }
-
-  // PC-relative address encoding.
-  static Instr ImmPCRelAddress(int imm21) {
-    VIXL_ASSERT(is_int21(imm21));
-    Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
-    Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
-    Instr immlo = imm << ImmPCRelLo_offset;
-    return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
-  }
-
-  // Branch encoding.
-  static Instr ImmUncondBranch(int imm26) {
-    VIXL_ASSERT(is_int26(imm26));
-    return truncate_to_int26(imm26) << ImmUncondBranch_offset;
-  }
-
-  static Instr ImmCondBranch(int imm19) {
-    VIXL_ASSERT(is_int19(imm19));
-    return truncate_to_int19(imm19) << ImmCondBranch_offset;
-  }
-
-  static Instr ImmCmpBranch(int imm19) {
-    VIXL_ASSERT(is_int19(imm19));
-    return truncate_to_int19(imm19) << ImmCmpBranch_offset;
-  }
-
-  static Instr ImmTestBranch(int imm14) {
-    VIXL_ASSERT(is_int14(imm14));
-    return truncate_to_int14(imm14) << ImmTestBranch_offset;
-  }
-
-  static Instr ImmTestBranchBit(unsigned bit_pos) {
-    VIXL_ASSERT(is_uint6(bit_pos));
-    // Subtract five from the shift offset, as we need bit 5 from bit_pos.
-    unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
-    unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
-    b5 &= ImmTestBranchBit5_mask;
-    b40 &= ImmTestBranchBit40_mask;
-    return b5 | b40;
-  }
-
-  // Data Processing encoding.
-  static Instr SF(Register rd) {
-      return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
-  }
-
-  static Instr ImmAddSub(int64_t imm) {
-    VIXL_ASSERT(IsImmAddSub(imm));
-    if (is_uint12(imm)) {  // No shift required.
-      return imm << ImmAddSub_offset;
-    } else {
-      return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
-    }
-  }
-
-  static Instr ImmS(unsigned imms, unsigned reg_size) {
-    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
-           ((reg_size == kWRegSize) && is_uint5(imms)));
-    USE(reg_size);
-    return imms << ImmS_offset;
-  }
-
-  static Instr ImmR(unsigned immr, unsigned reg_size) {
-    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
-           ((reg_size == kWRegSize) && is_uint5(immr)));
-    USE(reg_size);
-    VIXL_ASSERT(is_uint6(immr));
-    return immr << ImmR_offset;
-  }
-
-  static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
-    VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
-    VIXL_ASSERT(is_uint6(imms));
-    VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
-    USE(reg_size);
-    return imms << ImmSetBits_offset;
-  }
-
-  static Instr ImmRotate(unsigned immr, unsigned reg_size) {
-    VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
-    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
-           ((reg_size == kWRegSize) && is_uint5(immr)));
-    USE(reg_size);
-    return immr << ImmRotate_offset;
-  }
-
-  static Instr ImmLLiteral(int imm19) {
-    VIXL_ASSERT(is_int19(imm19));
-    return truncate_to_int19(imm19) << ImmLLiteral_offset;
-  }
-
-  static Instr BitN(unsigned bitn, unsigned reg_size) {
-    VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
-    VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
-    USE(reg_size);
-    return bitn << BitN_offset;
-  }
-
-  static Instr ShiftDP(Shift shift) {
-    VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
-    return shift << ShiftDP_offset;
-  }
-
-  static Instr ImmDPShift(unsigned amount) {
-    VIXL_ASSERT(is_uint6(amount));
-    return amount << ImmDPShift_offset;
-  }
-
-  static Instr ExtendMode(Extend extend) {
-    return extend << ExtendMode_offset;
-  }
-
-  static Instr ImmExtendShift(unsigned left_shift) {
-    VIXL_ASSERT(left_shift <= 4);
-    return left_shift << ImmExtendShift_offset;
-  }
-
-  static Instr ImmCondCmp(unsigned imm) {
-    VIXL_ASSERT(is_uint5(imm));
-    return imm << ImmCondCmp_offset;
-  }
-
-  static Instr Nzcv(StatusFlags nzcv) {
-    return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
-  }
-
-  // MemOperand offset encoding.
-  static Instr ImmLSUnsigned(int imm12) {
-    VIXL_ASSERT(is_uint12(imm12));
-    return imm12 << ImmLSUnsigned_offset;
-  }
-
-  static Instr ImmLS(int imm9) {
-    VIXL_ASSERT(is_int9(imm9));
-    return truncate_to_int9(imm9) << ImmLS_offset;
-  }
-
-  static Instr ImmLSPair(int imm7, LSDataSize size) {
-    VIXL_ASSERT(((imm7 >> size) << size) == imm7);
-    int scaled_imm7 = imm7 >> size;
-    VIXL_ASSERT(is_int7(scaled_imm7));
-    return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
-  }
-
-  static Instr ImmShiftLS(unsigned shift_amount) {
-    VIXL_ASSERT(is_uint1(shift_amount));
-    return shift_amount << ImmShiftLS_offset;
-  }
-
-  static Instr ImmPrefetchOperation(int imm5) {
-    VIXL_ASSERT(is_uint5(imm5));
-    return imm5 << ImmPrefetchOperation_offset;
-  }
-
-  static Instr ImmException(int imm16) {
-    VIXL_ASSERT(is_uint16(imm16));
-    return imm16 << ImmException_offset;
-  }
-
-  static Instr ImmSystemRegister(int imm15) {
-    VIXL_ASSERT(is_uint15(imm15));
-    return imm15 << ImmSystemRegister_offset;
-  }
-
-  static Instr ImmHint(int imm7) {
-    VIXL_ASSERT(is_uint7(imm7));
-    return imm7 << ImmHint_offset;
-  }
-
-  static Instr CRm(int imm4) {
-    VIXL_ASSERT(is_uint4(imm4));
-    return imm4 << CRm_offset;
-  }
-
-  static Instr ImmBarrierDomain(int imm2) {
-    VIXL_ASSERT(is_uint2(imm2));
-    return imm2 << ImmBarrierDomain_offset;
-  }
-
-  static Instr ImmBarrierType(int imm2) {
-    VIXL_ASSERT(is_uint2(imm2));
-    return imm2 << ImmBarrierType_offset;
-  }
-
-  static LSDataSize CalcLSDataSize(LoadStoreOp op) {
-    VIXL_ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
-    return static_cast<LSDataSize>(op >> SizeLS_offset);
-  }
-
-  // Move immediates encoding.
-  static Instr ImmMoveWide(uint64_t imm) {
-    VIXL_ASSERT(is_uint16(imm));
-    return imm << ImmMoveWide_offset;
-  }
-
-  static Instr ShiftMoveWide(int64_t shift) {
-    VIXL_ASSERT(is_uint2(shift));
-    return shift << ShiftMoveWide_offset;
-  }
-
-  // FP Immediates.
-  static Instr ImmFP32(float imm);
-  static Instr ImmFP64(double imm);
-
-  // FP register type.
-  static Instr FPType(FPRegister fd) {
-    return fd.Is64Bits() ? FP64 : FP32;
-  }
-
-  static Instr FPScale(unsigned scale) {
-    VIXL_ASSERT(is_uint6(scale));
-    return scale << FPScale_offset;
-  }
-
-  // Immediate field checking helpers.
-  static bool IsImmAddSub(int64_t immediate);
-  static bool IsImmConditionalCompare(int64_t immediate);
-  static bool IsImmFP32(float imm);
-  static bool IsImmFP64(double imm);
-  static bool IsImmLogical(uint64_t value,
-                           unsigned width,
-                           unsigned* n = NULL,
-                           unsigned* imm_s = NULL,
-                           unsigned* imm_r = NULL);
-  static bool IsImmLSPair(int64_t offset, LSDataSize size);
-  static bool IsImmLSScaled(int64_t offset, LSDataSize size);
-  static bool IsImmLSUnscaled(int64_t offset);
-  static bool IsImmMovn(uint64_t imm, unsigned reg_size);
-  static bool IsImmMovz(uint64_t imm, unsigned reg_size);
-
-  // Size of the code generated since label to the current position.
-  size_t SizeOfCodeGeneratedSince(Label* label) const {
-    VIXL_ASSERT(label->IsBound());
-    return buffer_->OffsetFrom(label->location());
-  }
-
-  size_t SizeOfCodeGenerated() const {
-    return buffer_->CursorOffset();
-  }
-
-  size_t BufferCapacity() const { return buffer_->capacity(); }
-
-  size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); }
-
-  void EnsureSpaceFor(size_t amount) {
-    if (buffer_->RemainingBytes() < amount) {
-      size_t capacity = buffer_->capacity();
-      size_t size = buffer_->CursorOffset();
-      do {
-        // TODO(all): refine.
-        capacity *= 2;
-      } while ((capacity - size) <  amount);
-      buffer_->Grow(capacity);
-    }
-  }
-
-#ifdef VIXL_DEBUG
-  void AcquireBuffer() {
-    VIXL_ASSERT(buffer_monitor_ >= 0);
-    buffer_monitor_++;
-  }
-
-  void ReleaseBuffer() {
-    buffer_monitor_--;
-    VIXL_ASSERT(buffer_monitor_ >= 0);
-  }
-#endif
-
-  PositionIndependentCodeOption pic() const {
-    return pic_;
-  }
-
-  bool AllowPageOffsetDependentCode() const {
-    return (pic() == PageOffsetDependentCode) ||
-           (pic() == PositionDependentCode);
-  }
-
-  static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
-    return reg.Is64Bits() ? xzr : wzr;
-  }
-
-
- protected:
-  void LoadStore(const CPURegister& rt,
-                 const MemOperand& addr,
-                 LoadStoreOp op,
-                 LoadStoreScalingOption option = PreferScaledOffset);
-
-  void LoadStorePair(const CPURegister& rt,
-                     const CPURegister& rt2,
-                     const MemOperand& addr,
-                     LoadStorePairOp op);
-
-  void Prefetch(PrefetchOperation op,
-                const MemOperand& addr,
-                LoadStoreScalingOption option = PreferScaledOffset);
-
-  // TODO(all): The third parameter should be passed by reference but gcc 4.8.2
-  // reports a bogus uninitialised warning then.
-  void Logical(const Register& rd,
-               const Register& rn,
-               const Operand operand,
-               LogicalOp op);
-  void LogicalImmediate(const Register& rd,
-                        const Register& rn,
-                        unsigned n,
-                        unsigned imm_s,
-                        unsigned imm_r,
-                        LogicalOp op);
-
-  void ConditionalCompare(const Register& rn,
-                          const Operand& operand,
-                          StatusFlags nzcv,
-                          Condition cond,
-                          ConditionalCompareOp op);
-
-  void AddSubWithCarry(const Register& rd,
-                       const Register& rn,
-                       const Operand& operand,
-                       FlagsUpdate S,
-                       AddSubWithCarryOp op);
-
-
-  // Functions for emulating operands not directly supported by the instruction
-  // set.
-  void EmitShift(const Register& rd,
-                 const Register& rn,
-                 Shift shift,
-                 unsigned amount);
-  void EmitExtendShift(const Register& rd,
-                       const Register& rn,
-                       Extend extend,
-                       unsigned left_shift);
-
-  void AddSub(const Register& rd,
-              const Register& rn,
-              const Operand& operand,
-              FlagsUpdate S,
-              AddSubOp op);
-
-  // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
-  // registers. Only simple loads are supported; sign- and zero-extension (such
-  // as in LDPSW_x or LDRB_w) are not supported.
-  static LoadStoreOp LoadOpFor(const CPURegister& rt);
-  static LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
-                                       const CPURegister& rt2);
-  static LoadStoreOp StoreOpFor(const CPURegister& rt);
-  static LoadStorePairOp StorePairOpFor(const CPURegister& rt,
-                                        const CPURegister& rt2);
-  static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
-    const CPURegister& rt, const CPURegister& rt2);
-  static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
-    const CPURegister& rt, const CPURegister& rt2);
-  static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
-
-
- private:
-  // Instruction helpers.
-  void MoveWide(const Register& rd,
-                uint64_t imm,
-                int shift,
-                MoveWideImmediateOp mov_op);
-  void DataProcShiftedRegister(const Register& rd,
-                               const Register& rn,
-                               const Operand& operand,
-                               FlagsUpdate S,
-                               Instr op);
-  void DataProcExtendedRegister(const Register& rd,
-                                const Register& rn,
-                                const Operand& operand,
-                                FlagsUpdate S,
-                                Instr op);
-  void LoadStorePairNonTemporal(const CPURegister& rt,
-                                const CPURegister& rt2,
-                                const MemOperand& addr,
-                                LoadStorePairNonTemporalOp op);
-  void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op);
-  void ConditionalSelect(const Register& rd,
-                         const Register& rn,
-                         const Register& rm,
-                         Condition cond,
-                         ConditionalSelectOp op);
-  void DataProcessing1Source(const Register& rd,
-                             const Register& rn,
-                             DataProcessing1SourceOp op);
-  void DataProcessing3Source(const Register& rd,
-                             const Register& rn,
-                             const Register& rm,
-                             const Register& ra,
-                             DataProcessing3SourceOp op);
-  void FPDataProcessing1Source(const FPRegister& fd,
-                               const FPRegister& fn,
-                               FPDataProcessing1SourceOp op);
-  void FPDataProcessing2Source(const FPRegister& fd,
-                               const FPRegister& fn,
-                               const FPRegister& fm,
-                               FPDataProcessing2SourceOp op);
-  void FPDataProcessing3Source(const FPRegister& fd,
-                               const FPRegister& fn,
-                               const FPRegister& fm,
-                               const FPRegister& fa,
-                               FPDataProcessing3SourceOp op);
-
-  // Encode the specified MemOperand for the specified access size and scaling
-  // preference.
-  Instr LoadStoreMemOperand(const MemOperand& addr,
-                            LSDataSize size,
-                            LoadStoreScalingOption option);
-
-  // Link the current (not-yet-emitted) instruction to the specified label, then
-  // return an offset to be encoded in the instruction. If the label is not yet
-  // bound, an offset of 0 is returned.
-  ptrdiff_t LinkAndGetByteOffsetTo(Label * label);
-  ptrdiff_t LinkAndGetInstructionOffsetTo(Label * label);
-  ptrdiff_t LinkAndGetPageOffsetTo(Label * label);
-
-  // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
-  template <int element_shift>
-  ptrdiff_t LinkAndGetOffsetTo(Label* label);
-
-  // Literal load offset are in words (32-bit).
-  ptrdiff_t LinkAndGetWordOffsetTo(RawLiteral* literal);
-
-  // Emit the instruction in buffer_.
-  void Emit(Instr instruction) {
-    VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
-    VIXL_ASSERT(buffer_monitor_ > 0);
-    buffer_->Emit32(instruction);
-  }
-
-  // Buffer where the code is emitted.
-  CodeBuffer* buffer_;
-  PositionIndependentCodeOption pic_;
-
-#ifdef VIXL_DEBUG
-  int64_t buffer_monitor_;
-#endif
-};
-
-
-// All Assembler emits MUST acquire/release the underlying code buffer. The
-// helper scope below will do so and optionally ensure the buffer is big enough
-// to receive the emit. It is possible to request the scope not to perform any
-// checks (kNoCheck) if for example it is known in advance the buffer size is
-// adequate or there is some other size checking mechanism in place.
-class CodeBufferCheckScope {
- public:
-  // Tell whether or not the scope needs to ensure the associated CodeBuffer
-  // has enough space for the requested size.
-  enum CheckPolicy {
-    kNoCheck,
-    kCheck
-  };
-
-  // Tell whether or not the scope should assert the amount of code emitted
-  // within the scope is consistent with the requested amount.
-  enum AssertPolicy {
-    kNoAssert,    // No assert required.
-    kExactSize,   // The code emitted must be exactly size bytes.
-    kMaximumSize  // The code emitted must be at most size bytes.
-  };
-
-  CodeBufferCheckScope(Assembler* assm,
-                       size_t size,
-                       CheckPolicy check_policy = kCheck,
-                       AssertPolicy assert_policy = kMaximumSize)
-      : assm_(assm) {
-    if (check_policy == kCheck) assm->EnsureSpaceFor(size);
-#ifdef VIXL_DEBUG
-    assm->bind(&start_);
-    size_ = size;
-    assert_policy_ = assert_policy;
-    assm->AcquireBuffer();
-#else
-    USE(assert_policy);
-#endif
-  }
-
-  // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
-  explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) {
-#ifdef VIXL_DEBUG
-    size_ = 0;
-    assert_policy_ = kNoAssert;
-    assm->AcquireBuffer();
-#endif
-  }
-
-  ~CodeBufferCheckScope() {
-#ifdef VIXL_DEBUG
-    assm_->ReleaseBuffer();
-    switch (assert_policy_) {
-      case kNoAssert: break;
-      case kExactSize:
-        VIXL_ASSERT(assm_->SizeOfCodeGeneratedSince(&start_) == size_);
-        break;
-      case kMaximumSize:
-        VIXL_ASSERT(assm_->SizeOfCodeGeneratedSince(&start_) <= size_);
-        break;
-      default:
-        VIXL_UNREACHABLE();
-    }
-#endif
-  }
-
- protected:
-  Assembler* assm_;
-#ifdef VIXL_DEBUG
-  Label start_;
-  size_t size_;
-  AssertPolicy assert_policy_;
-#endif
-};
-
-}  // namespace vixl
-
-#endif  // VIXL_A64_ASSEMBLER_A64_H_
diff --git a/disas/libvixl/a64/disasm-a64.cc b/disas/libvixl/a64/disasm-a64.cc
deleted file mode 100644
index f7bc2468bb..0000000000
--- a/disas/libvixl/a64/disasm-a64.cc
+++ /dev/null
@@ -1,1954 +0,0 @@
-// Copyright 2013, ARM Limited
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-//   * Redistributions of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//   * Neither the name of ARM Limited nor the names of its contributors may be
-//     used to endorse or promote products derived from this software without
-//     specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
-// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <cstdlib>
-#include "a64/disasm-a64.h"
-
-namespace vixl {
-
-Disassembler::Disassembler() {
-  buffer_size_ = 256;
-  buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
-  buffer_pos_ = 0;
-  own_buffer_ = true;
-  code_address_offset_ = 0;
-}
-
-
-Disassembler::Disassembler(char* text_buffer, int buffer_size) {
-  buffer_size_ = buffer_size;
-  buffer_ = text_buffer;
-  buffer_pos_ = 0;
-  own_buffer_ = false;
-  code_address_offset_ = 0;
-}
-
-
-Disassembler::~Disassembler() {
-  if (own_buffer_) {
-    free(buffer_);
-  }
-}
-
-
-char* Disassembler::GetOutput() {
-  return buffer_;
-}
-
-
-void Disassembler::VisitAddSubImmediate(const Instruction* instr) {
-  bool rd_is_zr = RdIsZROrSP(instr);
-  bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
-                  (instr->ImmAddSub() == 0) ? true : false;
-  const char *mnemonic = "";
-  const char *form = "'Rds, 'Rns, 'IAddSub";
-  const char *form_cmp = "'Rns, 'IAddSub";
-  const char *form_mov = "'Rds, 'Rns";
-
-  switch (instr->Mask(AddSubImmediateMask)) {
-    case ADD_w_imm:
-    case ADD_x_imm: {
-      mnemonic = "add";
-      if (stack_op) {
-        mnemonic = "mov";
-        form = form_mov;
-      }
-      break;
-    }
-    case ADDS_w_imm:
-    case ADDS_x_imm: {
-      mnemonic = "adds";
-      if (rd_is_zr) {
-        mnemonic = "cmn";
-        form = form_cmp;
-      }
-      break;
-    }
-    case SUB_w_imm:
-    case SUB_x_imm: mnemonic = "sub"; break;
-    case SUBS_w_imm:
-    case SUBS_x_imm: {
-      mnemonic = "subs";
-      if (rd_is_zr) {
-        mnemonic = "cmp";
-        form = form_cmp;
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitAddSubShifted(const Instruction* instr) {
-  bool rd_is_zr = RdIsZROrSP(instr);
-  bool rn_is_zr = RnIsZROrSP(instr);
-  const char *mnemonic = "";
-  const char *form = "'Rd, 'Rn, 'Rm'HDP";
-  const char *form_cmp = "'Rn, 'Rm'HDP";
-  const char *form_neg = "'Rd, 'Rm'HDP";
-
-  switch (instr->Mask(AddSubShiftedMask)) {
-    case ADD_w_shift:
-    case ADD_x_shift: mnemonic = "add"; break;
-    case ADDS_w_shift:
-    case ADDS_x_shift: {
-      mnemonic = "adds";
-      if (rd_is_zr) {
-        mnemonic = "cmn";
-        form = form_cmp;
-      }
-      break;
-    }
-    case SUB_w_shift:
-    case SUB_x_shift: {
-      mnemonic = "sub";
-      if (rn_is_zr) {
-        mnemonic = "neg";
-        form = form_neg;
-      }
-      break;
-    }
-    case SUBS_w_shift:
-    case SUBS_x_shift: {
-      mnemonic = "subs";
-      if (rd_is_zr) {
-        mnemonic = "cmp";
-        form = form_cmp;
-      } else if (rn_is_zr) {
-        mnemonic = "negs";
-        form = form_neg;
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitAddSubExtended(const Instruction* instr) {
-  bool rd_is_zr = RdIsZROrSP(instr);
-  const char *mnemonic = "";
-  Extend mode = static_cast<Extend>(instr->ExtendMode());
-  const char *form = ((mode == UXTX) || (mode == SXTX)) ?
-                     "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
-  const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
-                         "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
-
-  switch (instr->Mask(AddSubExtendedMask)) {
-    case ADD_w_ext:
-    case ADD_x_ext: mnemonic = "add"; break;
-    case ADDS_w_ext:
-    case ADDS_x_ext: {
-      mnemonic = "adds";
-      if (rd_is_zr) {
-        mnemonic = "cmn";
-        form = form_cmp;
-      }
-      break;
-    }
-    case SUB_w_ext:
-    case SUB_x_ext: mnemonic = "sub"; break;
-    case SUBS_w_ext:
-    case SUBS_x_ext: {
-      mnemonic = "subs";
-      if (rd_is_zr) {
-        mnemonic = "cmp";
-        form = form_cmp;
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitAddSubWithCarry(const Instruction* instr) {
-  bool rn_is_zr = RnIsZROrSP(instr);
-  const char *mnemonic = "";
-  const char *form = "'Rd, 'Rn, 'Rm";
-  const char *form_neg = "'Rd, 'Rm";
-
-  switch (instr->Mask(AddSubWithCarryMask)) {
-    case ADC_w:
-    case ADC_x: mnemonic = "adc"; break;
-    case ADCS_w:
-    case ADCS_x: mnemonic = "adcs"; break;
-    case SBC_w:
-    case SBC_x: {
-      mnemonic = "sbc";
-      if (rn_is_zr) {
-        mnemonic = "ngc";
-        form = form_neg;
-      }
-      break;
-    }
-    case SBCS_w:
-    case SBCS_x: {
-      mnemonic = "sbcs";
-      if (rn_is_zr) {
-        mnemonic = "ngcs";
-        form = form_neg;
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLogicalImmediate(const Instruction* instr) {
-  bool rd_is_zr = RdIsZROrSP(instr);
-  bool rn_is_zr = RnIsZROrSP(instr);
-  const char *mnemonic = "";
-  const char *form = "'Rds, 'Rn, 'ITri";
-
-  if (instr->ImmLogical() == 0) {
-    // The immediate encoded in the instruction is not in the expected format.
-    Format(instr, "unallocated", "(LogicalImmediate)");
-    return;
-  }
-
-  switch (instr->Mask(LogicalImmediateMask)) {
-    case AND_w_imm:
-    case AND_x_imm: mnemonic = "and"; break;
-    case ORR_w_imm:
-    case ORR_x_imm: {
-      mnemonic = "orr";
-      unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
-                                                        : kWRegSize;
-      if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
-        mnemonic = "mov";
-        form = "'Rds, 'ITri";
-      }
-      break;
-    }
-    case EOR_w_imm:
-    case EOR_x_imm: mnemonic = "eor"; break;
-    case ANDS_w_imm:
-    case ANDS_x_imm: {
-      mnemonic = "ands";
-      if (rd_is_zr) {
-        mnemonic = "tst";
-        form = "'Rn, 'ITri";
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
-  VIXL_ASSERT((reg_size == kXRegSize) ||
-              ((reg_size == kWRegSize) && (value <= 0xffffffff)));
-
-  // Test for movz: 16 bits set at positions 0, 16, 32 or 48.
-  if (((value & UINT64_C(0xffffffffffff0000)) == 0) ||
-      ((value & UINT64_C(0xffffffff0000ffff)) == 0) ||
-      ((value & UINT64_C(0xffff0000ffffffff)) == 0) ||
-      ((value & UINT64_C(0x0000ffffffffffff)) == 0)) {
-    return true;
-  }
-
-  // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48).
-  if ((reg_size == kXRegSize) &&
-      (((~value & UINT64_C(0xffffffffffff0000)) == 0) ||
-       ((~value & UINT64_C(0xffffffff0000ffff)) == 0) ||
-       ((~value & UINT64_C(0xffff0000ffffffff)) == 0) ||
-       ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) {
-    return true;
-  }
-  if ((reg_size == kWRegSize) &&
-      (((value & 0xffff0000) == 0xffff0000) ||
-       ((value & 0x0000ffff) == 0x0000ffff))) {
-    return true;
-  }
-  return false;
-}
-
-
-void Disassembler::VisitLogicalShifted(const Instruction* instr) {
-  bool rd_is_zr = RdIsZROrSP(instr);
-  bool rn_is_zr = RnIsZROrSP(instr);
-  const char *mnemonic = "";
-  const char *form = "'Rd, 'Rn, 'Rm'HLo";
-
-  switch (instr->Mask(LogicalShiftedMask)) {
-    case AND_w:
-    case AND_x: mnemonic = "and"; break;
-    case BIC_w:
-    case BIC_x: mnemonic = "bic"; break;
-    case EOR_w:
-    case EOR_x: mnemonic = "eor"; break;
-    case EON_w:
-    case EON_x: mnemonic = "eon"; break;
-    case BICS_w:
-    case BICS_x: mnemonic = "bics"; break;
-    case ANDS_w:
-    case ANDS_x: {
-      mnemonic = "ands";
-      if (rd_is_zr) {
-        mnemonic = "tst";
-        form = "'Rn, 'Rm'HLo";
-      }
-      break;
-    }
-    case ORR_w:
-    case ORR_x: {
-      mnemonic = "orr";
-      if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
-        mnemonic = "mov";
-        form = "'Rd, 'Rm";
-      }
-      break;
-    }
-    case ORN_w:
-    case ORN_x: {
-      mnemonic = "orn";
-      if (rn_is_zr) {
-        mnemonic = "mvn";
-        form = "'Rd, 'Rm'HLo";
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitConditionalCompareRegister(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
-
-  switch (instr->Mask(ConditionalCompareRegisterMask)) {
-    case CCMN_w:
-    case CCMN_x: mnemonic = "ccmn"; break;
-    case CCMP_w:
-    case CCMP_x: mnemonic = "ccmp"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitConditionalCompareImmediate(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
-
-  switch (instr->Mask(ConditionalCompareImmediateMask)) {
-    case CCMN_w_imm:
-    case CCMN_x_imm: mnemonic = "ccmn"; break;
-    case CCMP_w_imm:
-    case CCMP_x_imm: mnemonic = "ccmp"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitConditionalSelect(const Instruction* instr) {
-  bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
-  bool rn_is_rm = (instr->Rn() == instr->Rm());
-  const char *mnemonic = "";
-  const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
-  const char *form_test = "'Rd, 'CInv";
-  const char *form_update = "'Rd, 'Rn, 'CInv";
-
-  Condition cond = static_cast<Condition>(instr->Condition());
-  bool invertible_cond = (cond != al) && (cond != nv);
-
-  switch (instr->Mask(ConditionalSelectMask)) {
-    case CSEL_w:
-    case CSEL_x: mnemonic = "csel"; break;
-    case CSINC_w:
-    case CSINC_x: {
-      mnemonic = "csinc";
-      if (rnm_is_zr && invertible_cond) {
-        mnemonic = "cset";
-        form = form_test;
-      } else if (rn_is_rm && invertible_cond) {
-        mnemonic = "cinc";
-        form = form_update;
-      }
-      break;
-    }
-    case CSINV_w:
-    case CSINV_x: {
-      mnemonic = "csinv";
-      if (rnm_is_zr && invertible_cond) {
-        mnemonic = "csetm";
-        form = form_test;
-      } else if (rn_is_rm && invertible_cond) {
-        mnemonic = "cinv";
-        form = form_update;
-      }
-      break;
-    }
-    case CSNEG_w:
-    case CSNEG_x: {
-      mnemonic = "csneg";
-      if (rn_is_rm && invertible_cond) {
-        mnemonic = "cneg";
-        form = form_update;
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitBitfield(const Instruction* instr) {
-  unsigned s = instr->ImmS();
-  unsigned r = instr->ImmR();
-  unsigned rd_size_minus_1 =
-    ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
-  const char *mnemonic = "";
-  const char *form = "";
-  const char *form_shift_right = "'Rd, 'Rn, 'IBr";
-  const char *form_extend = "'Rd, 'Wn";
-  const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
-  const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
-  const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
-
-  switch (instr->Mask(BitfieldMask)) {
-    case SBFM_w:
-    case SBFM_x: {
-      mnemonic = "sbfx";
-      form = form_bfx;
-      if (r == 0) {
-        form = form_extend;
-        if (s == 7) {
-          mnemonic = "sxtb";
-        } else if (s == 15) {
-          mnemonic = "sxth";
-        } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
-          mnemonic = "sxtw";
-        } else {
-          form = form_bfx;
-        }
-      } else if (s == rd_size_minus_1) {
-        mnemonic = "asr";
-        form = form_shift_right;
-      } else if (s < r) {
-        mnemonic = "sbfiz";
-        form = form_bfiz;
-      }
-      break;
-    }
-    case UBFM_w:
-    case UBFM_x: {
-      mnemonic = "ubfx";
-      form = form_bfx;
-      if (r == 0) {
-        form = form_extend;
-        if (s == 7) {
-          mnemonic = "uxtb";
-        } else if (s == 15) {
-          mnemonic = "uxth";
-        } else {
-          form = form_bfx;
-        }
-      }
-      if (s == rd_size_minus_1) {
-        mnemonic = "lsr";
-        form = form_shift_right;
-      } else if (r == s + 1) {
-        mnemonic = "lsl";
-        form = form_lsl;
-      } else if (s < r) {
-        mnemonic = "ubfiz";
-        form = form_bfiz;
-      }
-      break;
-    }
-    case BFM_w:
-    case BFM_x: {
-      mnemonic = "bfxil";
-      form = form_bfx;
-      if (s < r) {
-        mnemonic = "bfi";
-        form = form_bfiz;
-      }
-    }
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitExtract(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
-
-  switch (instr->Mask(ExtractMask)) {
-    case EXTR_w:
-    case EXTR_x: {
-      if (instr->Rn() == instr->Rm()) {
-        mnemonic = "ror";
-        form = "'Rd, 'Rn, 'IExtract";
-      } else {
-        mnemonic = "extr";
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitPCRelAddressing(const Instruction* instr) {
-  switch (instr->Mask(PCRelAddressingMask)) {
-    case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
-    case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break;
-    default: Format(instr, "unimplemented", "(PCRelAddressing)");
-  }
-}
-
-
-void Disassembler::VisitConditionalBranch(const Instruction* instr) {
-  switch (instr->Mask(ConditionalBranchMask)) {
-    case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
-    default: VIXL_UNREACHABLE();
-  }
-}
-
-
-void Disassembler::VisitUnconditionalBranchToRegister(
-    const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "'Xn";
-
-  switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
-    case BR: mnemonic = "br"; break;
-    case BLR: mnemonic = "blr"; break;
-    case RET: {
-      mnemonic = "ret";
-      if (instr->Rn() == kLinkRegCode) {
-        form = NULL;
-      }
-      break;
-    }
-    default: form = "(UnconditionalBranchToRegister)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitUnconditionalBranch(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'BImmUncn";
-
-  switch (instr->Mask(UnconditionalBranchMask)) {
-    case B: mnemonic = "b"; break;
-    case BL: mnemonic = "bl"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitDataProcessing1Source(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Rd, 'Rn";
-
-  switch (instr->Mask(DataProcessing1SourceMask)) {
-    #define FORMAT(A, B)  \
-    case A##_w:           \
-    case A##_x: mnemonic = B; break;
-    FORMAT(RBIT, "rbit");
-    FORMAT(REV16, "rev16");
-    FORMAT(REV, "rev");
-    FORMAT(CLZ, "clz");
-    FORMAT(CLS, "cls");
-    #undef FORMAT
-    case REV32_x: mnemonic = "rev32"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitDataProcessing2Source(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "'Rd, 'Rn, 'Rm";
-
-  switch (instr->Mask(DataProcessing2SourceMask)) {
-    #define FORMAT(A, B)  \
-    case A##_w:           \
-    case A##_x: mnemonic = B; break;
-    FORMAT(UDIV, "udiv");
-    FORMAT(SDIV, "sdiv");
-    FORMAT(LSLV, "lsl");
-    FORMAT(LSRV, "lsr");
-    FORMAT(ASRV, "asr");
-    FORMAT(RORV, "ror");
-    #undef FORMAT
-    default: form = "(DataProcessing2Source)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitDataProcessing3Source(const Instruction* instr) {
-  bool ra_is_zr = RaIsZROrSP(instr);
-  const char *mnemonic = "";
-  const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
-  const char *form_rrr = "'Rd, 'Rn, 'Rm";
-  const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
-  const char *form_xww = "'Xd, 'Wn, 'Wm";
-  const char *form_xxx = "'Xd, 'Xn, 'Xm";
-
-  switch (instr->Mask(DataProcessing3SourceMask)) {
-    case MADD_w:
-    case MADD_x: {
-      mnemonic = "madd";
-      form = form_rrrr;
-      if (ra_is_zr) {
-        mnemonic = "mul";
-        form = form_rrr;
-      }
-      break;
-    }
-    case MSUB_w:
-    case MSUB_x: {
-      mnemonic = "msub";
-      form = form_rrrr;
-      if (ra_is_zr) {
-        mnemonic = "mneg";
-        form = form_rrr;
-      }
-      break;
-    }
-    case SMADDL_x: {
-      mnemonic = "smaddl";
-      if (ra_is_zr) {
-        mnemonic = "smull";
-        form = form_xww;
-      }
-      break;
-    }
-    case SMSUBL_x: {
-      mnemonic = "smsubl";
-      if (ra_is_zr) {
-        mnemonic = "smnegl";
-        form = form_xww;
-      }
-      break;
-    }
-    case UMADDL_x: {
-      mnemonic = "umaddl";
-      if (ra_is_zr) {
-        mnemonic = "umull";
-        form = form_xww;
-      }
-      break;
-    }
-    case UMSUBL_x: {
-      mnemonic = "umsubl";
-      if (ra_is_zr) {
-        mnemonic = "umnegl";
-        form = form_xww;
-      }
-      break;
-    }
-    case SMULH_x: {
-      mnemonic = "smulh";
-      form = form_xxx;
-      break;
-    }
-    case UMULH_x: {
-      mnemonic = "umulh";
-      form = form_xxx;
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitCompareBranch(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Rt, 'BImmCmpa";
-
-  switch (instr->Mask(CompareBranchMask)) {
-    case CBZ_w:
-    case CBZ_x: mnemonic = "cbz"; break;
-    case CBNZ_w:
-    case CBNZ_x: mnemonic = "cbnz"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitTestBranch(const Instruction* instr) {
-  const char *mnemonic = "";
-  // If the top bit of the immediate is clear, the tested register is
-  // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
-  // encoded in bit 31 of the instruction, we can reuse the Rt form, which
-  // uses bit 31 (normally "sf") to choose the register size.
-  const char *form = "'Rt, 'IS, 'BImmTest";
-
-  switch (instr->Mask(TestBranchMask)) {
-    case TBZ: mnemonic = "tbz"; break;
-    case TBNZ: mnemonic = "tbnz"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Rd, 'IMoveImm";
-
-  // Print the shift separately for movk, to make it clear which half word will
-  // be overwritten. Movn and movz print the computed immediate, which includes
-  // shift calculation.
-  switch (instr->Mask(MoveWideImmediateMask)) {
-    case MOVN_w:
-    case MOVN_x:
-      if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
-        if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
-          mnemonic = "movn";
-        } else {
-          mnemonic = "mov";
-          form = "'Rd, 'IMoveNeg";
-        }
-      } else {
-        mnemonic = "movn";
-      }
-      break;
-    case MOVZ_w:
-    case MOVZ_x:
-      if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
-        mnemonic = "mov";
-      else
-        mnemonic = "movz";
-      break;
-    case MOVK_w:
-    case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-#define LOAD_STORE_LIST(V)    \
-  V(STRB_w, "strb", "'Wt")    \
-  V(STRH_w, "strh", "'Wt")    \
-  V(STR_w, "str", "'Wt")      \
-  V(STR_x, "str", "'Xt")      \
-  V(LDRB_w, "ldrb", "'Wt")    \
-  V(LDRH_w, "ldrh", "'Wt")    \
-  V(LDR_w, "ldr", "'Wt")      \
-  V(LDR_x, "ldr", "'Xt")      \
-  V(LDRSB_x, "ldrsb", "'Xt")  \
-  V(LDRSH_x, "ldrsh", "'Xt")  \
-  V(LDRSW_x, "ldrsw", "'Xt")  \
-  V(LDRSB_w, "ldrsb", "'Wt")  \
-  V(LDRSH_w, "ldrsh", "'Wt")  \
-  V(STR_s, "str", "'St")      \
-  V(STR_d, "str", "'Dt")      \
-  V(LDR_s, "ldr", "'St")      \
-  V(LDR_d, "ldr", "'Dt")
-
-void Disassembler::VisitLoadStorePreIndex(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "(LoadStorePreIndex)";
-
-  switch (instr->Mask(LoadStorePreIndexMask)) {
-    #define LS_PREINDEX(A, B, C) \
-    case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
-    LOAD_STORE_LIST(LS_PREINDEX)
-    #undef LS_PREINDEX
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStorePostIndex(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "(LoadStorePostIndex)";
-
-  switch (instr->Mask(LoadStorePostIndexMask)) {
-    #define LS_POSTINDEX(A, B, C) \
-    case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
-    LOAD_STORE_LIST(LS_POSTINDEX)
-    #undef LS_POSTINDEX
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "(LoadStoreUnsignedOffset)";
-
-  switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
-    #define LS_UNSIGNEDOFFSET(A, B, C) \
-    case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
-    LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
-    #undef LS_UNSIGNEDOFFSET
-    case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStoreRegisterOffset(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "(LoadStoreRegisterOffset)";
-
-  switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
-    #define LS_REGISTEROFFSET(A, B, C) \
-    case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
-    LOAD_STORE_LIST(LS_REGISTEROFFSET)
-    #undef LS_REGISTEROFFSET
-    case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "'Wt, ['Xns'ILS]";
-  const char *form_x = "'Xt, ['Xns'ILS]";
-  const char *form_s = "'St, ['Xns'ILS]";
-  const char *form_d = "'Dt, ['Xns'ILS]";
-  const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
-
-  switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
-    case STURB_w:  mnemonic = "sturb"; break;
-    case STURH_w:  mnemonic = "sturh"; break;
-    case STUR_w:   mnemonic = "stur"; break;
-    case STUR_x:   mnemonic = "stur"; form = form_x; break;
-    case STUR_s:   mnemonic = "stur"; form = form_s; break;
-    case STUR_d:   mnemonic = "stur"; form = form_d; break;
-    case LDURB_w:  mnemonic = "ldurb"; break;
-    case LDURH_w:  mnemonic = "ldurh"; break;
-    case LDUR_w:   mnemonic = "ldur"; break;
-    case LDUR_x:   mnemonic = "ldur"; form = form_x; break;
-    case LDUR_s:   mnemonic = "ldur"; form = form_s; break;
-    case LDUR_d:   mnemonic = "ldur"; form = form_d; break;
-    case LDURSB_x: form = form_x;  // Fall through.
-    case LDURSB_w: mnemonic = "ldursb"; break;
-    case LDURSH_x: form = form_x;  // Fall through.
-    case LDURSH_w: mnemonic = "ldursh"; break;
-    case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
-    case PRFUM:    mnemonic = "prfum"; form = form_prefetch; break;
-    default: form = "(LoadStoreUnscaledOffset)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadLiteral(const Instruction* instr) {
-  const char *mnemonic = "ldr";
-  const char *form = "(LoadLiteral)";
-
-  switch (instr->Mask(LoadLiteralMask)) {
-    case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
-    case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
-    case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
-    case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
-    case LDRSW_x_lit: {
-      mnemonic = "ldrsw";
-      form = "'Xt, 'ILLiteral 'LValue";
-      break;
-    }
-    case PRFM_lit: {
-      mnemonic = "prfm";
-      form = "'PrefOp, 'ILLiteral 'LValue";
-      break;
-    }
-    default: mnemonic = "unimplemented";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-#define LOAD_STORE_PAIR_LIST(V)         \
-  V(STP_w, "stp", "'Wt, 'Wt2", "4")     \
-  V(LDP_w, "ldp", "'Wt, 'Wt2", "4")     \
-  V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "4") \
-  V(STP_x, "stp", "'Xt, 'Xt2", "8")     \
-  V(LDP_x, "ldp", "'Xt, 'Xt2", "8")     \
-  V(STP_s, "stp", "'St, 'St2", "4")     \
-  V(LDP_s, "ldp", "'St, 'St2", "4")     \
-  V(STP_d, "stp", "'Dt, 'Dt2", "8")     \
-  V(LDP_d, "ldp", "'Dt, 'Dt2", "8")
-
-void Disassembler::VisitLoadStorePairPostIndex(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "(LoadStorePairPostIndex)";
-
-  switch (instr->Mask(LoadStorePairPostIndexMask)) {
-    #define LSP_POSTINDEX(A, B, C, D) \
-    case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
-    LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
-    #undef LSP_POSTINDEX
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStorePairPreIndex(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "(LoadStorePairPreIndex)";
-
-  switch (instr->Mask(LoadStorePairPreIndexMask)) {
-    #define LSP_PREINDEX(A, B, C, D) \
-    case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
-    LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
-    #undef LSP_PREINDEX
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStorePairOffset(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "(LoadStorePairOffset)";
-
-  switch (instr->Mask(LoadStorePairOffsetMask)) {
-    #define LSP_OFFSET(A, B, C, D) \
-    case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
-    LOAD_STORE_PAIR_LIST(LSP_OFFSET)
-    #undef LSP_OFFSET
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStorePairNonTemporal(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form;
-
-  switch (instr->Mask(LoadStorePairNonTemporalMask)) {
-    case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
-    case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
-    case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
-    case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
-    case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
-    case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
-    case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
-    case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
-    default: form = "(LoadStorePairNonTemporal)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitLoadStoreExclusive(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form;
-
-  switch (instr->Mask(LoadStoreExclusiveMask)) {
-    case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
-    case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
-    case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break;
-    case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break;
-    case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break;
-    case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break;
-    case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break;
-    case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break;
-    case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
-    case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
-    case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
-    case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
-    case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
-    case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
-    case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break;
-    case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break;
-    case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break;
-    case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break;
-    case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break;
-    case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break;
-    case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
-    case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
-    case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
-    case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
-    case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break;
-    case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break;
-    case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break;
-    case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break;
-    case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break;
-    case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break;
-    case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break;
-    case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break;
-    default: form = "(LoadStoreExclusive)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPCompare(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "'Fn, 'Fm";
-  const char *form_zero = "'Fn, #0.0";
-
-  switch (instr->Mask(FPCompareMask)) {
-    case FCMP_s_zero:
-    case FCMP_d_zero: form = form_zero;  // Fall through.
-    case FCMP_s:
-    case FCMP_d: mnemonic = "fcmp"; break;
-    default: form = "(FPCompare)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPConditionalCompare(const Instruction* instr) {
-  const char *mnemonic = "unmplemented";
-  const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
-
-  switch (instr->Mask(FPConditionalCompareMask)) {
-    case FCCMP_s:
-    case FCCMP_d: mnemonic = "fccmp"; break;
-    case FCCMPE_s:
-    case FCCMPE_d: mnemonic = "fccmpe"; break;
-    default: form = "(FPConditionalCompare)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPConditionalSelect(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
-
-  switch (instr->Mask(FPConditionalSelectMask)) {
-    case FCSEL_s:
-    case FCSEL_d: mnemonic = "fcsel"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPDataProcessing1Source(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "'Fd, 'Fn";
-
-  switch (instr->Mask(FPDataProcessing1SourceMask)) {
-    #define FORMAT(A, B)  \
-    case A##_s:           \
-    case A##_d: mnemonic = B; break;
-    FORMAT(FMOV, "fmov");
-    FORMAT(FABS, "fabs");
-    FORMAT(FNEG, "fneg");
-    FORMAT(FSQRT, "fsqrt");
-    FORMAT(FRINTN, "frintn");
-    FORMAT(FRINTP, "frintp");
-    FORMAT(FRINTM, "frintm");
-    FORMAT(FRINTZ, "frintz");
-    FORMAT(FRINTA, "frinta");
-    FORMAT(FRINTX, "frintx");
-    FORMAT(FRINTI, "frinti");
-    #undef FORMAT
-    case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
-    case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
-    default: form = "(FPDataProcessing1Source)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPDataProcessing2Source(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Fd, 'Fn, 'Fm";
-
-  switch (instr->Mask(FPDataProcessing2SourceMask)) {
-    #define FORMAT(A, B)  \
-    case A##_s:           \
-    case A##_d: mnemonic = B; break;
-    FORMAT(FMUL, "fmul");
-    FORMAT(FDIV, "fdiv");
-    FORMAT(FADD, "fadd");
-    FORMAT(FSUB, "fsub");
-    FORMAT(FMAX, "fmax");
-    FORMAT(FMIN, "fmin");
-    FORMAT(FMAXNM, "fmaxnm");
-    FORMAT(FMINNM, "fminnm");
-    FORMAT(FNMUL, "fnmul");
-    #undef FORMAT
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPDataProcessing3Source(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
-
-  switch (instr->Mask(FPDataProcessing3SourceMask)) {
-    #define FORMAT(A, B)  \
-    case A##_s:           \
-    case A##_d: mnemonic = B; break;
-    FORMAT(FMADD, "fmadd");
-    FORMAT(FMSUB, "fmsub");
-    FORMAT(FNMADD, "fnmadd");
-    FORMAT(FNMSUB, "fnmsub");
-    #undef FORMAT
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPImmediate(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "(FPImmediate)";
-
-  switch (instr->Mask(FPImmediateMask)) {
-    case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
-    case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPIntegerConvert(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "(FPIntegerConvert)";
-  const char *form_rf = "'Rd, 'Fn";
-  const char *form_fr = "'Fd, 'Rn";
-
-  switch (instr->Mask(FPIntegerConvertMask)) {
-    case FMOV_ws:
-    case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
-    case FMOV_sw:
-    case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
-    case FCVTAS_ws:
-    case FCVTAS_xs:
-    case FCVTAS_wd:
-    case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
-    case FCVTAU_ws:
-    case FCVTAU_xs:
-    case FCVTAU_wd:
-    case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
-    case FCVTMS_ws:
-    case FCVTMS_xs:
-    case FCVTMS_wd:
-    case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
-    case FCVTMU_ws:
-    case FCVTMU_xs:
-    case FCVTMU_wd:
-    case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
-    case FCVTNS_ws:
-    case FCVTNS_xs:
-    case FCVTNS_wd:
-    case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
-    case FCVTNU_ws:
-    case FCVTNU_xs:
-    case FCVTNU_wd:
-    case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
-    case FCVTZU_xd:
-    case FCVTZU_ws:
-    case FCVTZU_wd:
-    case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
-    case FCVTZS_xd:
-    case FCVTZS_wd:
-    case FCVTZS_xs:
-    case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
-    case SCVTF_sw:
-    case SCVTF_sx:
-    case SCVTF_dw:
-    case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
-    case UCVTF_sw:
-    case UCVTF_sx:
-    case UCVTF_dw:
-    case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitFPFixedPointConvert(const Instruction* instr) {
-  const char *mnemonic = "";
-  const char *form = "'Rd, 'Fn, 'IFPFBits";
-  const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
-
-  switch (instr->Mask(FPFixedPointConvertMask)) {
-    case FCVTZS_ws_fixed:
-    case FCVTZS_xs_fixed:
-    case FCVTZS_wd_fixed:
-    case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
-    case FCVTZU_ws_fixed:
-    case FCVTZU_xs_fixed:
-    case FCVTZU_wd_fixed:
-    case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
-    case SCVTF_sw_fixed:
-    case SCVTF_sx_fixed:
-    case SCVTF_dw_fixed:
-    case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
-    case UCVTF_sw_fixed:
-    case UCVTF_sx_fixed:
-    case UCVTF_dw_fixed:
-    case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
-    default: VIXL_UNREACHABLE();
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitSystem(const Instruction* instr) {
-  // Some system instructions hijack their Op and Cp fields to represent a
-  // range of immediates instead of indicating a different instruction. This
-  // makes the decoding tricky.
-  const char *mnemonic = "unimplemented";
-  const char *form = "(System)";
-
-  if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
-    switch (instr->Mask(SystemExclusiveMonitorMask)) {
-      case CLREX: {
-        mnemonic = "clrex";
-        form = (instr->CRm() == 0xf) ? NULL : "'IX";
-        break;
-      }
-    }
-  } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
-    switch (instr->Mask(SystemSysRegMask)) {
-      case MRS: {
-        mnemonic = "mrs";
-        switch (instr->ImmSystemRegister()) {
-          case NZCV: form = "'Xt, nzcv"; break;
-          case FPCR: form = "'Xt, fpcr"; break;
-          default: form = "'Xt, (unknown)"; break;
-        }
-        break;
-      }
-      case MSR: {
-        mnemonic = "msr";
-        switch (instr->ImmSystemRegister()) {
-          case NZCV: form = "nzcv, 'Xt"; break;
-          case FPCR: form = "fpcr, 'Xt"; break;
-          default: form = "(unknown), 'Xt"; break;
-        }
-        break;
-      }
-    }
-  } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
-    switch (instr->ImmHint()) {
-      case NOP: {
-        mnemonic = "nop";
-        form = NULL;
-        break;
-      }
-    }
-  } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
-    switch (instr->Mask(MemBarrierMask)) {
-      case DMB: {
-        mnemonic = "dmb";
-        form = "'M";
-        break;
-      }
-      case DSB: {
-        mnemonic = "dsb";
-        form = "'M";
-        break;
-      }
-      case ISB: {
-        mnemonic = "isb";
-        form = NULL;
-        break;
-      }
-    }
-  }
-
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitException(const Instruction* instr) {
-  const char *mnemonic = "unimplemented";
-  const char *form = "'IDebug";
-
-  switch (instr->Mask(ExceptionMask)) {
-    case HLT: mnemonic = "hlt"; break;
-    case BRK: mnemonic = "brk"; break;
-    case SVC: mnemonic = "svc"; break;
-    case HVC: mnemonic = "hvc"; break;
-    case SMC: mnemonic = "smc"; break;
-    case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
-    case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
-    case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
-    default: form = "(Exception)";
-  }
-  Format(instr, mnemonic, form);
-}
-
-
-void Disassembler::VisitUnimplemented(const Instruction* instr) {
-  Format(instr, "unimplemented", "(Unimplemented)");
-}
-
-
-void Disassembler::VisitUnallocated(const Instruction* instr) {
-  Format(instr, "unallocated", "(Unallocated)");
-}
-
-
-void Disassembler::ProcessOutput(const Instruction* /*instr*/) {
-  // The base disasm does nothing more than disassembling into a buffer.
-}
-
-
-void Disassembler::AppendRegisterNameToOutput(const Instruction* instr,
-                                              const CPURegister& reg) {
-  USE(instr);
-  VIXL_ASSERT(reg.IsValid());
-  char reg_char;
-
-  if (reg.IsRegister()) {
-    reg_char = reg.Is64Bits() ? 'x' : 'w';
-  } else {
-    VIXL_ASSERT(reg.IsFPRegister());
-    reg_char = reg.Is64Bits() ? 'd' : 's';
-  }
-
-  if (reg.IsFPRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
-    // A normal register: w0 - w30, x0 - x30, s0 - s31, d0 - d31.
-    AppendToOutput("%c%d", reg_char, reg.code());
-  } else if (reg.Aliases(sp)) {
-    // Disassemble w31/x31 as stack pointer wsp/sp.
-    AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
-  } else {
-    // Disassemble w31/x31 as zero register wzr/xzr.
-    AppendToOutput("%czr", reg_char);
-  }
-}
-
-
-void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
-                                                  int64_t offset) {
-  USE(instr);
-  char sign = (offset < 0) ? '-' : '+';
-  AppendToOutput("#%c0x%" PRIx64, sign, std::abs(offset));
-}
-
-
-void Disassembler::AppendAddressToOutput(const Instruction* instr,
-                                         const void* addr) {
-  USE(instr);
-  AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
-}
-
-
-void Disassembler::AppendCodeAddressToOutput(const Instruction* instr,
-                                             const void* addr) {
-  AppendAddressToOutput(instr, addr);
-}
-
-
-void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
-                                             const void* addr) {
-  AppendAddressToOutput(instr, addr);
-}
-
-
-void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
-                                                     const void* addr) {
-  USE(instr);
-  int64_t rel_addr = CodeRelativeAddress(addr);
-  if (rel_addr >= 0) {
-    AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
-  } else {
-    AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
-  }
-}
-
-
-void Disassembler::AppendCodeRelativeCodeAddressToOutput(
-    const Instruction* instr, const void* addr) {
-  AppendCodeRelativeAddressToOutput(instr, addr);
-}
-
-
-void Disassembler::AppendCodeRelativeDataAddressToOutput(
-    const Instruction* instr, const void* addr) {
-  AppendCodeRelativeAddressToOutput(instr, addr);
-}
-
-
-void Disassembler::MapCodeAddress(int64_t base_address,
-                                  const Instruction* instr_address) {
-  set_code_address_offset(
-      base_address - reinterpret_cast<intptr_t>(instr_address));
-}
-int64_t Disassembler::CodeRelativeAddress(const void* addr) {
-  return reinterpret_cast<intptr_t>(addr) + code_address_offset();
-}
-
-
-void Disassembler::Format(const Instruction* instr, const char* mnemonic,
-                          const char* format) {
-  VIXL_ASSERT(mnemonic != NULL);
-  ResetOutput();
-  Substitute(instr, mnemonic);
-  if (format != NULL) {
-    buffer_[buffer_pos_++] = ' ';
-    Substitute(instr, format);
-  }
-  buffer_[buffer_pos_] = 0;
-  ProcessOutput(instr);
-}
-
-
-void Disassembler::Substitute(const Instruction* instr, const char* string) {
-  char chr = *string++;
-  while (chr != '\0') {
-    if (chr == '\'') {
-      string += SubstituteField(instr, string);
-    } else {
-      buffer_[buffer_pos_++] = chr;
-    }
-    chr = *string++;
-  }
-}
-
-
-int Disassembler::SubstituteField(const Instruction* instr,
-                                  const char* format) {
-  switch (format[0]) {
-    case 'R':  // Register. X or W, selected by sf bit.
-    case 'F':  // FP Register. S or D, selected by type field.
-    case 'W':
-    case 'X':
-    case 'S':
-    case 'D': return SubstituteRegisterField(instr, format);
-    case 'I': return SubstituteImmediateField(instr, format);
-    case 'L': return SubstituteLiteralField(instr, format);
-    case 'H': return SubstituteShiftField(instr, format);
-    case 'P': return SubstitutePrefetchField(instr, format);
-    case 'C': return SubstituteConditionField(instr, format);
-    case 'E': return SubstituteExtendField(instr, format);
-    case 'A': return SubstitutePCRelAddressField(instr, format);
-    case 'B': return SubstituteBranchTargetField(instr, format);
-    case 'O': return SubstituteLSRegOffsetField(instr, format);
-    case 'M': return SubstituteBarrierField(instr, format);
-    default: {
-      VIXL_UNREACHABLE();
-      return 1;
-    }
-  }
-}
-
-
-int Disassembler::SubstituteRegisterField(const Instruction* instr,
-                                          const char* format) {
-  unsigned reg_num = 0;
-  unsigned field_len = 2;
-  switch (format[1]) {
-    case 'd': reg_num = instr->Rd(); break;
-    case 'n': reg_num = instr->Rn(); break;
-    case 'm': reg_num = instr->Rm(); break;
-    case 'a': reg_num = instr->Ra(); break;
-    case 's': reg_num = instr->Rs(); break;
-    case 't': {
-      if (format[2] == '2') {
-        reg_num = instr->Rt2();
-        field_len = 3;
-      } else {
-        reg_num = instr->Rt();
-      }
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-
-  // Increase field length for registers tagged as stack.
-  if (format[2] == 's') {
-    field_len = 3;
-  }
-
-  CPURegister::RegisterType reg_type;
-  unsigned reg_size;
-
-  if (format[0] == 'R') {
-    // Register type is R: use sf bit to choose X and W.
-    reg_type = CPURegister::kRegister;
-    reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
-  } else if (format[0] == 'F') {
-    // Floating-point register: use type field to choose S or D.
-    reg_type = CPURegister::kFPRegister;
-    reg_size = ((instr->FPType() & 1) == 0) ? kSRegSize : kDRegSize;
-  } else {
-    // The register type is specified.
-    switch (format[0]) {
-      case 'W':
-        reg_type = CPURegister::kRegister; reg_size = kWRegSize; break;
-      case 'X':
-        reg_type = CPURegister::kRegister; reg_size = kXRegSize; break;
-      case 'S':
-        reg_type = CPURegister::kFPRegister; reg_size = kSRegSize; break;
-      case 'D':
-        reg_type = CPURegister::kFPRegister; reg_size = kDRegSize; break;
-      default:
-        VIXL_UNREACHABLE();
-        reg_type = CPURegister::kRegister;
-        reg_size = kXRegSize;
-    }
-  }
-
-  if ((reg_type == CPURegister::kRegister) &&
-      (reg_num == kZeroRegCode) && (format[2] == 's')) {
-    reg_num = kSPRegInternalCode;
-  }
-
-  AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type));
-
-  return field_len;
-}
-
-
-int Disassembler::SubstituteImmediateField(const Instruction* instr,
-                                           const char* format) {
-  VIXL_ASSERT(format[0] == 'I');
-
-  switch (format[1]) {
-    case 'M': {  // IMoveImm, IMoveNeg or IMoveLSL.
-      if (format[5] == 'L') {
-        AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
-        if (instr->ShiftMoveWide() > 0) {
-          AppendToOutput(", lsl #%" PRId64, 16 * instr->ShiftMoveWide());
-        }
-      } else {
-        VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
-        uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
-        if (format[5] == 'N')
-          imm = ~imm;
-        if (!instr->SixtyFourBits())
-          imm &= UINT64_C(0xffffffff);
-        AppendToOutput("#0x%" PRIx64, imm);
-      }
-      return 8;
-    }
-    case 'L': {
-      switch (format[2]) {
-        case 'L': {  // ILLiteral - Immediate Load Literal.
-          AppendToOutput("pc%+" PRId64,
-                         instr->ImmLLiteral() << kLiteralEntrySizeLog2);
-          return 9;
-        }
-        case 'S': {  // ILS - Immediate Load/Store.
-          if (instr->ImmLS() != 0) {
-            AppendToOutput(", #%" PRId64, instr->ImmLS());
-          }
-          return 3;
-        }
-        case 'P': {  // ILPx - Immediate Load/Store Pair, x = access size.
-          if (instr->ImmLSPair() != 0) {
-            // format[3] is the scale value. Convert to a number.
-            int scale = format[3] - 0x30;
-            AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
-          }
-          return 4;
-        }
-        case 'U': {  // ILU - Immediate Load/Store Unsigned.
-          if (instr->ImmLSUnsigned() != 0) {
-            AppendToOutput(", #%" PRIu64,
-                           instr->ImmLSUnsigned() << instr->SizeLS());
-          }
-          return 3;
-        }
-      }
-    }
-    case 'C': {  // ICondB - Immediate Conditional Branch.
-      int64_t offset = instr->ImmCondBranch() << 2;
-      AppendPCRelativeOffsetToOutput(instr, offset);
-      return 6;
-    }
-    case 'A': {  // IAddSub.
-      VIXL_ASSERT(instr->ShiftAddSub() <= 1);
-      int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
-      AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
-      return 7;
-    }
-    case 'F': {  // IFPSingle, IFPDouble or IFPFBits.
-      if (format[3] == 'F') {  // IFPFbits.
-        AppendToOutput("#%" PRId64, 64 - instr->FPScale());
-        return 8;
-      } else {
-        AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
-                       format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
-        return 9;
-      }
-    }
-    case 'T': {  // ITri - Immediate Triangular Encoded.
-      AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
-      return 4;
-    }
-    case 'N': {  // INzcv.
-      int nzcv = (instr->Nzcv() << Flags_offset);
-      AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
-                                  ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
-                                  ((nzcv & CFlag) == 0) ? 'c' : 'C',
-                                  ((nzcv & VFlag) == 0) ? 'v' : 'V');
-      return 5;
-    }
-    case 'P': {  // IP - Conditional compare.
-      AppendToOutput("#%" PRId64, instr->ImmCondCmp());
-      return 2;
-    }
-    case 'B': {  // Bitfields.
-      return SubstituteBitfieldImmediateField(instr, format);
-    }
-    case 'E': {  // IExtract.
-      AppendToOutput("#%" PRId64, instr->ImmS());
-      return 8;
-    }
-    case 'S': {  // IS - Test and branch bit.
-      AppendToOutput("#%" PRId64, (instr->ImmTestBranchBit5() << 5) |
-                                  instr->ImmTestBranchBit40());
-      return 2;
-    }
-    case 'D': {  // IDebug - HLT and BRK instructions.
-      AppendToOutput("#0x%" PRIx64, instr->ImmException());
-      return 6;
-    }
-    case 'X': {  // IX - CLREX instruction.
-      AppendToOutput("#0x%" PRIx64, instr->CRm());
-      return 2;
-    }
-    default: {
-      VIXL_UNIMPLEMENTED();
-      return 0;
-    }
-  }
-}
-
-
-int Disassembler::SubstituteBitfieldImmediateField(const Instruction* instr,
-                                                   const char* format) {
-  VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B'));
-  unsigned r = instr->ImmR();
-  unsigned s = instr->ImmS();
-
-  switch (format[2]) {
-    case 'r': {  // IBr.
-      AppendToOutput("#%d", r);
-      return 3;
-    }
-    case 's': {  // IBs+1 or IBs-r+1.
-      if (format[3] == '+') {
-        AppendToOutput("#%d", s + 1);
-        return 5;
-      } else {
-        VIXL_ASSERT(format[3] == '-');
-        AppendToOutput("#%d", s - r + 1);
-        return 7;
-      }
-    }
-    case 'Z': {  // IBZ-r.
-      VIXL_ASSERT((format[3] == '-') && (format[4] == 'r'));
-      unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
-      AppendToOutput("#%d", reg_size - r);
-      return 5;
-    }
-    default: {
-      VIXL_UNREACHABLE();
-      return 0;
-    }
-  }
-}
-
-
-int Disassembler::SubstituteLiteralField(const Instruction* instr,
-                                         const char* format) {
-  VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
-  USE(format);
-
-  const void * address = instr->LiteralAddress<const void *>();
-  switch (instr->Mask(LoadLiteralMask)) {
-    case LDR_w_lit:
-    case LDR_x_lit:
-    case LDRSW_x_lit:
-    case LDR_s_lit:
-    case LDR_d_lit:
-      AppendCodeRelativeDataAddressToOutput(instr, address);
-      break;
-    case PRFM_lit: {
-      // Use the prefetch hint to decide how to print the address.
-      switch (instr->PrefetchHint()) {
-        case 0x0:     // PLD: prefetch for load.
-        case 0x2:     // PST: prepare for store.
-          AppendCodeRelativeDataAddressToOutput(instr, address);
-          break;
-        case 0x1:     // PLI: preload instructions.
-          AppendCodeRelativeCodeAddressToOutput(instr, address);
-          break;
-        case 0x3:     // Unallocated hint.
-          AppendCodeRelativeAddressToOutput(instr, address);
-          break;
-      }
-      break;
-    }
-    default:
-      VIXL_UNREACHABLE();
-  }
-
-  return 6;
-}
-
-
-int Disassembler::SubstituteShiftField(const Instruction* instr,
-                                       const char* format) {
-  VIXL_ASSERT(format[0] == 'H');
-  VIXL_ASSERT(instr->ShiftDP() <= 0x3);
-
-  switch (format[1]) {
-    case 'D': {  // HDP.
-      VIXL_ASSERT(instr->ShiftDP() != ROR);
-    }  // Fall through.
-    case 'L': {  // HLo.
-      if (instr->ImmDPShift() != 0) {
-        const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
-        AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
-                       instr->ImmDPShift());
-      }
-      return 3;
-    }
-    default:
-      VIXL_UNIMPLEMENTED();
-      return 0;
-  }
-}
-
-
-int Disassembler::SubstituteConditionField(const Instruction* instr,
-                                           const char* format) {
-  VIXL_ASSERT(format[0] == 'C');
-  const char* condition_code[] = { "eq", "ne", "hs", "lo",
-                                   "mi", "pl", "vs", "vc",
-                                   "hi", "ls", "ge", "lt",
-                                   "gt", "le", "al", "nv" };
-  int cond;
-  switch (format[1]) {
-    case 'B': cond = instr->ConditionBranch(); break;
-    case 'I': {
-      cond = InvertCondition(static_cast<Condition>(instr->Condition()));
-      break;
-    }
-    default: cond = instr->Condition();
-  }
-  AppendToOutput("%s", condition_code[cond]);
-  return 4;
-}
-
-
-int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
-                                              const char* format) {
-  VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) ||   // Used by `adr`.
-              (strcmp(format, "AddrPCRelPage") == 0));    // Used by `adrp`.
-
-  int64_t offset = instr->ImmPCRel();
-
-  // Compute the target address based on the effective address (after applying
-  // code_address_offset). This is required for correct behaviour of adrp.
-  const Instruction* base = instr + code_address_offset();
-  if (format[9] == 'P') {
-    offset *= kPageSize;
-    base = AlignDown(base, kPageSize);
-  }
-  // Strip code_address_offset before printing, so we can use the
-  // semantically-correct AppendCodeRelativeAddressToOutput.
-  const void* target =
-      reinterpret_cast<const void*>(base + offset - code_address_offset());
-
-  AppendPCRelativeOffsetToOutput(instr, offset);
-  AppendToOutput(" ");
-  AppendCodeRelativeAddressToOutput(instr, target);
-  return 13;
-}
-
-
-int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
-                                              const char* format) {
-  VIXL_ASSERT(strncmp(format, "BImm", 4) == 0);
-
-  int64_t offset = 0;
-  switch (format[5]) {
-    // BImmUncn - unconditional branch immediate.
-    case 'n': offset = instr->ImmUncondBranch(); break;
-    // BImmCond - conditional branch immediate.
-    case 'o': offset = instr->ImmCondBranch(); break;
-    // BImmCmpa - compare and branch immediate.
-    case 'm': offset = instr->ImmCmpBranch(); break;
-    // BImmTest - test and branch immediate.
-    case 'e': offset = instr->ImmTestBranch(); break;
-    default: VIXL_UNIMPLEMENTED();
-  }
-  offset <<= kInstructionSizeLog2;
-  const void* target_address = reinterpret_cast<const void*>(instr + offset);
-  VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
-
-  AppendPCRelativeOffsetToOutput(instr, offset);
-  AppendToOutput(" ");
-  AppendCodeRelativeCodeAddressToOutput(instr, target_address);
-
-  return 8;
-}
-
-
-int Disassembler::SubstituteExtendField(const Instruction* instr,
-                                        const char* format) {
-  VIXL_ASSERT(strncmp(format, "Ext", 3) == 0);
-  VIXL_ASSERT(instr->ExtendMode() <= 7);
-  USE(format);
-
-  const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
-                                "sxtb", "sxth", "sxtw", "sxtx" };
-
-  // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
-  // registers becomes lsl.
-  if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
-      (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
-       (instr->ExtendMode() == UXTX))) {
-    if (instr->ImmExtendShift() > 0) {
-      AppendToOutput(", lsl #%" PRId64, instr->ImmExtendShift());
-    }
-  } else {
-    AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
-    if (instr->ImmExtendShift() > 0) {
-      AppendToOutput(" #%" PRId64, instr->ImmExtendShift());
-    }
-  }
-  return 3;
-}
-
-
-int Disassembler::SubstituteLSRegOffsetField(const Instruction* instr,
-                                             const char* format) {
-  VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0);
-  const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
-                                "undefined", "undefined", "sxtw", "sxtx" };
-  USE(format);
-
-  unsigned shift = instr->ImmShiftLS();
-  Extend ext = static_cast<Extend>(instr->ExtendMode());
-  char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
-
-  unsigned rm = instr->Rm();
-  if (rm == kZeroRegCode) {
-    AppendToOutput("%czr", reg_type);
-  } else {
-    AppendToOutput("%c%d", reg_type, rm);
-  }
-
-  // Extend mode UXTX is an alias for shift mode LSL here.
-  if (!((ext == UXTX) && (shift == 0))) {
-    AppendToOutput(", %s", extend_mode[ext]);
-    if (shift != 0) {
-      AppendToOutput(" #%" PRId64, instr->SizeLS());
-    }
-  }
-  return 9;
-}
-
-
-int Disassembler::SubstitutePrefetchField(const Instruction* instr,
-                                          const char* format) {
-  VIXL_ASSERT(format[0] == 'P');
-  USE(format);
-
-  static const char* hints[] = {"ld", "li", "st"};
-  static const char* stream_options[] = {"keep", "strm"};
-
-  unsigned hint = instr->PrefetchHint();
-  unsigned target = instr->PrefetchTarget() + 1;
-  unsigned stream = instr->PrefetchStream();
-
-  if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
-    // Unallocated prefetch operations.
-    int prefetch_mode = instr->ImmPrefetchOperation();
-    AppendToOutput("#0b%c%c%c%c%c",
-                   (prefetch_mode & (1 << 4)) ? '1' : '0',
-                   (prefetch_mode & (1 << 3)) ? '1' : '0',
-                   (prefetch_mode & (1 << 2)) ? '1' : '0',
-                   (prefetch_mode & (1 << 1)) ? '1' : '0',
-                   (prefetch_mode & (1 << 0)) ? '1' : '0');
-  } else {
-    VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
-    AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
-  }
-  return 6;
-}
-
-int Disassembler::SubstituteBarrierField(const Instruction* instr,
-                                         const char* format) {
-  VIXL_ASSERT(format[0] == 'M');
-  USE(format);
-
-  static const char* options[4][4] = {
-    { "sy (0b0000)", "oshld", "oshst", "osh" },
-    { "sy (0b0100)", "nshld", "nshst", "nsh" },
-    { "sy (0b1000)", "ishld", "ishst", "ish" },
-    { "sy (0b1100)", "ld", "st", "sy" }
-  };
-  int domain = instr->ImmBarrierDomain();
-  int type = instr->ImmBarrierType();
-
-  AppendToOutput("%s", options[domain][type]);
-  return 1;
-}
-
-void Disassembler::ResetOutput() {
-  buffer_pos_ = 0;
-  buffer_[buffer_pos_] = 0;
-}
-
-
-void Disassembler::AppendToOutput(const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_, format, args);
-  va_end(args);
-}
-
-
-void PrintDisassembler::ProcessOutput(const Instruction* instr) {
-  fprintf(stream_, "0x%016" PRIx64 "  %08" PRIx32 "\t\t%s\n",
-          reinterpret_cast<uint64_t>(instr),
-          instr->InstructionBits(),
-          GetOutput());
-}
-}  // namespace vixl
diff --git a/disas/libvixl/a64/instructions-a64.cc b/disas/libvixl/a64/instructions-a64.cc
deleted file mode 100644
index b091886838..0000000000
--- a/disas/libvixl/a64/instructions-a64.cc
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright 2013, ARM Limited
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-//   * Redistributions of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//   * Neither the name of ARM Limited nor the names of its contributors may be
-//     used to endorse or promote products derived from this software without
-//     specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
-// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "a64/instructions-a64.h"
-#include "a64/assembler-a64.h"
-
-namespace vixl {
-
-
-// Floating-point infinity values.
-const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
-const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
-const double kFP64PositiveInfinity =
-    rawbits_to_double(UINT64_C(0x7ff0000000000000));
-const double kFP64NegativeInfinity =
-    rawbits_to_double(UINT64_C(0xfff0000000000000));
-
-
-// The default NaN values (for FPCR.DN=1).
-const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
-const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
-
-
-static uint64_t RotateRight(uint64_t value,
-                            unsigned int rotate,
-                            unsigned int width) {
-  VIXL_ASSERT(width <= 64);
-  rotate &= 63;
-  return ((value & ((UINT64_C(1) << rotate) - 1)) <<
-          (width - rotate)) | (value >> rotate);
-}
-
-
-static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
-                                    uint64_t value,
-                                    unsigned width) {
-  VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
-              (width == 32));
-  VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
-  uint64_t result = value & ((UINT64_C(1) << width) - 1);
-  for (unsigned i = width; i < reg_size; i *= 2) {
-    result |= (result << i);
-  }
-  return result;
-}
-
-
-bool Instruction::IsLoad() const {
-  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
-    return false;
-  }
-
-  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
-    return Mask(LoadStorePairLBit) != 0;
-  } else {
-    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
-    switch (op) {
-      case LDRB_w:
-      case LDRH_w:
-      case LDR_w:
-      case LDR_x:
-      case LDRSB_w:
-      case LDRSB_x:
-      case LDRSH_w:
-      case LDRSH_x:
-      case LDRSW_x:
-      case LDR_s:
-      case LDR_d: return true;
-      default: return false;
-    }
-  }
-}
-
-
-bool Instruction::IsStore() const {
-  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
-    return false;
-  }
-
-  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
-    return Mask(LoadStorePairLBit) == 0;
-  } else {
-    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
-    switch (op) {
-      case STRB_w:
-      case STRH_w:
-      case STR_w:
-      case STR_x:
-      case STR_s:
-      case STR_d: return true;
-      default: return false;
-    }
-  }
-}
-
-
-// Logical immediates can't encode zero, so a return value of zero is used to
-// indicate a failure case. Specifically, where the constraints on imm_s are
-// not met.
-uint64_t Instruction::ImmLogical() const {
-  unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
-  int64_t n = BitN();
-  int64_t imm_s = ImmSetBits();
-  int64_t imm_r = ImmRotate();
-
-  // An integer is constructed from the n, imm_s and imm_r bits according to
-  // the following table:
-  //
-  //  N   imms    immr    size        S             R
-  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
-  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
-  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
-  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
-  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
-  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
-  // (s bits must not be all set)
-  //
-  // A pattern is constructed of size bits, where the least significant S+1
-  // bits are set. The pattern is rotated right by R, and repeated across a
-  // 32 or 64-bit value, depending on destination register width.
-  //
-
-  if (n == 1) {
-    if (imm_s == 0x3F) {
-      return 0;
-    }
-    uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
-    return RotateRight(bits, imm_r, 64);
-  } else {
-    if ((imm_s >> 1) == 0x1F) {
-      return 0;
-    }
-    for (int width = 0x20; width >= 0x2; width >>= 1) {
-      if ((imm_s & width) == 0) {
-        int mask = width - 1;
-        if ((imm_s & mask) == mask) {
-          return 0;
-        }
-        uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
-        return RepeatBitsAcrossReg(reg_size,
-                                   RotateRight(bits, imm_r & mask, width),
-                                   width);
-      }
-    }
-  }
-  VIXL_UNREACHABLE();
-  return 0;
-}
-
-
-float Instruction::ImmFP32() const {
-  //  ImmFP: abcdefgh (8 bits)
-  // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
-  // where B is b ^ 1
-  uint32_t bits = ImmFP();
-  uint32_t bit7 = (bits >> 7) & 0x1;
-  uint32_t bit6 = (bits >> 6) & 0x1;
-  uint32_t bit5_to_0 = bits & 0x3f;
-  uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
-
-  return rawbits_to_float(result);
-}
-
-
-double Instruction::ImmFP64() const {
-  //  ImmFP: abcdefgh (8 bits)
-  // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
-  //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
-  // where B is b ^ 1
-  uint32_t bits = ImmFP();
-  uint64_t bit7 = (bits >> 7) & 0x1;
-  uint64_t bit6 = (bits >> 6) & 0x1;
-  uint64_t bit5_to_0 = bits & 0x3f;
-  uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
-
-  return rawbits_to_double(result);
-}
-
-
-LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
-  switch (op) {
-    case STP_x:
-    case LDP_x:
-    case STP_d:
-    case LDP_d: return LSDoubleWord;
-    default: return LSWord;
-  }
-}
-
-
-const Instruction* Instruction::ImmPCOffsetTarget() const {
-  const Instruction * base = this;
-  ptrdiff_t offset;
-  if (IsPCRelAddressing()) {
-    // ADR and ADRP.
-    offset = ImmPCRel();
-    if (Mask(PCRelAddressingMask) == ADRP) {
-      base = AlignDown(base, kPageSize);
-      offset *= kPageSize;
-    } else {
-      VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
-    }
-  } else {
-    // All PC-relative branches.
-    VIXL_ASSERT(BranchType() != UnknownBranchType);
-    // Relative branch offsets are instruction-size-aligned.
-    offset = ImmBranch() << kInstructionSizeLog2;
-  }
-  return base + offset;
-}
-
-
-inline int Instruction::ImmBranch() const {
-  switch (BranchType()) {
-    case CondBranchType: return ImmCondBranch();
-    case UncondBranchType: return ImmUncondBranch();
-    case CompareBranchType: return ImmCmpBranch();
-    case TestBranchType: return ImmTestBranch();
-    default: VIXL_UNREACHABLE();
-  }
-  return 0;
-}
-
-
-void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
-  if (IsPCRelAddressing()) {
-    SetPCRelImmTarget(target);
-  } else {
-    SetBranchImmTarget(target);
-  }
-}
-
-
-void Instruction::SetPCRelImmTarget(const Instruction* target) {
-  int32_t imm21;
-  if ((Mask(PCRelAddressingMask) == ADR)) {
-    imm21 = target - this;
-  } else {
-    VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
-    uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
-    uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
-    imm21 = target_page - this_page;
-  }
-  Instr imm = Assembler::ImmPCRelAddress(imm21);
-
-  SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
-}
-
-
-void Instruction::SetBranchImmTarget(const Instruction* target) {
-  VIXL_ASSERT(((target - this) & 3) == 0);
-  Instr branch_imm = 0;
-  uint32_t imm_mask = 0;
-  int offset = (target - this) >> kInstructionSizeLog2;
-  switch (BranchType()) {
-    case CondBranchType: {
-      branch_imm = Assembler::ImmCondBranch(offset);
-      imm_mask = ImmCondBranch_mask;
-      break;
-    }
-    case UncondBranchType: {
-      branch_imm = Assembler::ImmUncondBranch(offset);
-      imm_mask = ImmUncondBranch_mask;
-      break;
-    }
-    case CompareBranchType: {
-      branch_imm = Assembler::ImmCmpBranch(offset);
-      imm_mask = ImmCmpBranch_mask;
-      break;
-    }
-    case TestBranchType: {
-      branch_imm = Assembler::ImmTestBranch(offset);
-      imm_mask = ImmTestBranch_mask;
-      break;
-    }
-    default: VIXL_UNREACHABLE();
-  }
-  SetInstructionBits(Mask(~imm_mask) | branch_imm);
-}
-
-
-void Instruction::SetImmLLiteral(const Instruction* source) {
-  VIXL_ASSERT(IsWordAligned(source));
-  ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
-  Instr imm = Assembler::ImmLLiteral(offset);
-  Instr mask = ImmLLiteral_mask;
-
-  SetInstructionBits(Mask(~mask) | imm);
-}
-}  // namespace vixl
-
diff --git a/disas/libvixl/a64/instructions-a64.h b/disas/libvixl/a64/instructions-a64.h
deleted file mode 100644
index f1d883ccc7..0000000000
--- a/disas/libvixl/a64/instructions-a64.h
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2013, ARM Limited
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-//   * Redistributions of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//   * Redistributions in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//   * Neither the name of ARM Limited nor the names of its contributors may be
-//     used to endorse or promote products derived from this software without
-//     specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
-// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
-#define VIXL_A64_INSTRUCTIONS_A64_H_
-
-#include "globals.h"
-#include "utils.h"
-#include "a64/constants-a64.h"
-
-namespace vixl {
-// ISA constants. --------------------------------------------------------------
-
-typedef uint32_t Instr;
-const unsigned kInstructionSize = 4;
-const unsigned kInstructionSizeLog2 = 2;
-const unsigned kLiteralEntrySize = 4;
-const unsigned kLiteralEntrySizeLog2 = 2;
-const unsigned kMaxLoadLiteralRange = 1 * MBytes;
-
-// This is the nominal page size (as used by the adrp instruction); the actual
-// size of the memory pages allocated by the kernel is likely to differ.
-const unsigned kPageSize = 4 * KBytes;
-const unsigned kPageSizeLog2 = 12;
-
-const unsigned kWRegSize = 32;
-const unsigned kWRegSizeLog2 = 5;
-const unsigned kWRegSizeInBytes = kWRegSize / 8;
-const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
-const unsigned kXRegSize = 64;
-const unsigned kXRegSizeLog2 = 6;
-const unsigned kXRegSizeInBytes = kXRegSize / 8;
-const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
-const unsigned kSRegSize = 32;
-const unsigned kSRegSizeLog2 = 5;
-const unsigned kSRegSizeInBytes = kSRegSize / 8;
-const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
-const unsigned kDRegSize = 64;
-const unsigned kDRegSizeLog2 = 6;
-const unsigned kDRegSizeInBytes = kDRegSize / 8;
-const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
-const uint64_t kWRegMask = UINT64_C(0xffffffff);
-const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
-const uint64_t kSRegMask = UINT64_C(0xffffffff);
-const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
-const uint64_t kSSignMask = UINT64_C(0x80000000);
-const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
-const uint64_t kWSignMask = UINT64_C(0x80000000);
-const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
-const uint64_t kByteMask = UINT64_C(0xff);
-const uint64_t kHalfWordMask = UINT64_C(0xffff);
-const uint64_t kWordMask = UINT64_C(0xffffffff);
-const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
-const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
-const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
-const int64_t kXMinInt = INT64_C(0x8000000000000000);
-const int32_t kWMaxInt = INT32_C(0x7fffffff);
-const int32_t kWMinInt = INT32_C(0x80000000);
-const unsigned kLinkRegCode = 30;
-const unsigned kZeroRegCode = 31;
-const unsigned kSPRegInternalCode = 63;
-const unsigned kRegCodeMask = 0x1f;
-
-const unsigned kAddressTagOffset = 56;
-const unsigned kAddressTagWidth = 8;
-const uint64_t kAddressTagMask =
-    ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
-VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
-
-// AArch64 floating-point specifics. These match IEEE-754.
-const unsigned kDoubleMantissaBits = 52;
-const unsigned kDoubleExponentBits = 11;
-const unsigned kFloatMantissaBits = 23;
-const unsigned kFloatExponentBits = 8;
-
-// Floating-point infinity values.
-extern const float kFP32PositiveInfinity;
-extern const float kFP32NegativeInfinity;
-extern const double kFP64PositiveInfinity;
-extern const double kFP64NegativeInfinity;
-
-// The default NaN values (for FPCR.DN=1).
-extern const double kFP64DefaultNaN;
-extern const float kFP32DefaultNaN;
-
-
-enum LSDataSize {
-  LSByte        = 0,
-  LSHalfword    = 1,
-  LSWord        = 2,
-  LSDoubleWord  = 3
-};
-
-LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
-
-enum ImmBranchType {
-  UnknownBranchType = 0,
-  CondBranchType    = 1,
-  UncondBranchType  = 2,
-  CompareBranchType = 3,
-  TestBranchType    = 4
-};
-
-enum AddrMode {
-  Offset,
-  PreIndex,
-  PostIndex
-};
-
-enum FPRounding {
-  // The first four values are encodable directly by FPCR<RMode>.
-  FPTieEven = 0x0,
-  FPPositiveInfinity = 0x1,
-  FPNegativeInfinity = 0x2,
-  FPZero = 0x3,
-
-  // The final rounding mode is only available when explicitly specified by the
-  // instruction (such as with fcvta). It cannot be set in FPCR.
-  FPTieAway
-};
-
-enum Reg31Mode {
-  Reg31IsStackPointer,
-  Reg31IsZeroRegister
-};
-
-// Instructions. ---------------------------------------------------------------
-
-class Instruction {
- public:
-  Instr InstructionBits() const {
-    return *(reinterpret_cast<const Instr*>(this));
-  }
-
-  void SetInstructionBits(Instr new_instr) {
-    *(reinterpret_cast<Instr*>(this)) = new_instr;
-  }
-
-  int Bit(int pos) const {
-    return (InstructionBits() >> pos) & 1;
-  }
-
-  uint32_t Bits(int msb, int lsb) const {
-    return unsigned_bitextract_32(msb, lsb, InstructionBits());
-  }
-
-  int32_t SignedBits(int msb, int lsb) const {
-    int32_t bits = *(reinterpret_cast<const int32_t*>(this));
-    return signed_bitextract_32(msb, lsb, bits);
-  }
-
-  Instr Mask(uint32_t mask) const {
-    return InstructionBits() & mask;
-  }
-
-  #define DEFINE_GETTER(Name, HighBit, LowBit, Func)             \
-  int64_t Name() const { return Func(HighBit, LowBit); }
-  INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
-  #undef DEFINE_GETTER
-
-  // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
-  // formed from ImmPCRelLo and ImmPCRelHi.
-  int ImmPCRel() const {
-    int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
-    int const width = ImmPCRelLo_width + ImmPCRelHi_width;
-    return signed_bitextract_32(width-1, 0, offset);
-  }
-
-  uint64_t ImmLogical() const;
-  float ImmFP32() const;
-  double ImmFP64() const;
-
-  LSDataSize SizeLSPair() const {
-    return CalcLSPairDataSize(
-             static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
-  }
-
-  // Helpers.
-  bool IsCondBranchImm() const {
-    return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
-  }
-
-  bool IsUncondBranchImm() const {
-    return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
-  }
-
-  bool IsCompareBranch() const {
-    return Mask(CompareBranchFMask) == CompareBranchFixed;
-  }
-
-  bool IsTestBranch() const {
-    return Mask(TestBranchFMask) == TestBranchFixed;
-  }
-
-  bool IsPCRelAddressing() const {
-    return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
-  }
-
-  bool IsLogicalImmediate() const {
-    return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
-  }
-
-  bool IsAddSubImmediate() const {
-    return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
-  }
-
-  bool IsAddSubExtended() const {
-    return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
-  }
-
-  bool IsLoadOrStore() const {
-    return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
-  }
-
-  bool IsLoad() const;
-  bool IsStore() const;
-
-  bool IsLoadLiteral() const {
-    // This includes PRFM_lit.
-    return Mask(LoadLiteralFMask) == LoadLiteralFixed;
-  }
-
-  bool IsMovn() const {
-    return (Mask(MoveWideImmediateMask) == MOVN_x) ||
-           (Mask(MoveWideImmediateMask) == MOVN_w);
-  }
-
-  // Indicate whether Rd can be the stack pointer or the zero register. This
-  // does not check that the instruction actually has an Rd field.
-  Reg31Mode RdMode() const {
-    // The following instructions use sp or wsp as Rd:
-    //  Add/sub (immediate) when not setting the flags.
-    //  Add/sub (extended) when not setting the flags.
-    //  Logical (immediate) when not setting the flags.
-    // Otherwise, r31 is the zero register.
-    if (IsAddSubImmediate() || IsAddSubExtended()) {
-      if (Mask(AddSubSetFlagsBit)) {
-        return Reg31IsZeroRegister;
-      } else {
-        return Reg31IsStackPointer;
-      }
-    }
-    if (IsLogicalImmediate()) {
-      // Of the logical (immediate) instructions, only ANDS (and its aliases)
-      // can set the flags. The others can all write into sp.
-      // Note that some logical operations are not available to
-      // immediate-operand instructions, so we have to combine two masks here.
-      if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
-        return Reg31IsZeroRegister;
-      } else {
-        return Reg31IsStackPointer;
-      }
-    }
-    return Reg31IsZeroRegister;
-  }
-
-  // Indicate whether Rn can be the stack pointer or the zero register. This
-  // does not check that the instruction actually has an Rn field.
-  Reg31Mode RnMode() const {
-    // The following instructions use sp or wsp as Rn:
-    //  All loads and stores.
-    //  Add/sub (immediate).
-    //  Add/sub (extended).
-    // Otherwise, r31 is the zero register.
-    if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
-      return Reg31IsStackPointer;
-    }
-    return Reg31IsZeroRegister;
-  }
-
-  ImmBranchType BranchType() const {
-    if (IsCondBranchImm()) {
-      return CondBranchType;
-    } else if (IsUncondBranchImm()) {
-      return UncondBranchType;
-    } else if (IsCompareBranch()) {
-      return CompareBranchType;
-    } else if (IsTestBranch()) {
-      return TestBranchType;
-    } else {
-      return UnknownBranchType;
-    }
-  }
-
-  // Find the target of this instruction. 'this' may be a branch or a
-  // PC-relative addressing instruction.
-  const Instruction* ImmPCOffsetTarget() const;
-
-  // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
-  // a PC-relative addressing instruction.
-  void SetImmPCOffsetTarget(const Instruction* target);
-  // Patch a literal load instruction to load from 'source'.
-  void SetImmLLiteral(const Instruction* source);
-
-  // Calculate the address of a literal referred to by a load-literal
-  // instruction, and return it as the specified type.
-  //
-  // The literal itself is safely mutable only if the backing buffer is safely
-  // mutable.
-  template <typename T>
-  T LiteralAddress() const {
-    uint64_t base_raw = reinterpret_cast<uintptr_t>(this);
-    ptrdiff_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
-    uint64_t address_raw = base_raw + offset;
-
-    // Cast the address using a C-style cast. A reinterpret_cast would be
-    // appropriate, but it can't cast one integral type to another.
-    T address = (T)(address_raw);
-
-    // Assert that the address can be represented by the specified type.
-    VIXL_ASSERT((uint64_t)(address) == address_raw);
-
-    return address;
-  }
-
-  uint32_t Literal32() const {
-    uint32_t literal;
-    memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
-    return literal;
-  }
-
-  uint64_t Literal64() const {
-    uint64_t literal;
-    memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
-    return literal;
-  }
-
-  float LiteralFP32() const {
-    return rawbits_to_float(Literal32());
-  }
-
-  double LiteralFP64() const {
-    return rawbits_to_double(Literal64());
-  }
-
-  const Instruction* NextInstruction() const {
-    return this + kInstructionSize;
-  }
-
-  const Instruction* InstructionAtOffset(int64_t offset) const {
-    VIXL_ASSERT(IsWordAligned(this + offset));
-    return this + offset;
-  }
-
-  template<typename T> static Instruction* Cast(T src) {
-    return reinterpret_cast<Instruction*>(src);
-  }
-
-  template<typename T> static const Instruction* CastConst(T src) {
-    return reinterpret_cast<const Instruction*>(src);
-  }
-
- private:
-  int ImmBranch() const;
-
-  void SetPCRelImmTarget(const Instruction* target);
-  void SetBranchImmTarget(const Instruction* target);
-};
-}  // namespace vixl
-
-#endif  // VIXL_A64_INSTRUCTIONS_A64_H_
diff --git a/disas/libvixl/vixl/a64/assembler-a64.h b/disas/libvixl/vixl/a64/assembler-a64.h
new file mode 100644
index 0000000000..fda5ccc6c7
--- /dev/null
+++ b/disas/libvixl/vixl/a64/assembler-a64.h
@@ -0,0 +1,4624 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//   * Redistributions of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//   * Redistributions in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//   * Neither the name of ARM Limited nor the names of its contributors may be
+//     used to endorse or promote products derived from this software without
+//     specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_ASSEMBLER_A64_H_
+#define VIXL_A64_ASSEMBLER_A64_H_
+
+
+#include "vixl/globals.h"
+#include "vixl/invalset.h"
+#include "vixl/utils.h"
+#include "vixl/code-buffer.h"
+#include "vixl/a64/instructions-a64.h"
+
+namespace vixl {
+
+typedef uint64_t RegList;
+static const int kRegListSizeInBits = sizeof(RegList) * 8;
+
+
+// Registers.
+
+// Some CPURegister methods can return Register or VRegister types, so we need
+// to declare them in advance.
+class Register;
+class VRegister;
+
+class CPURegister {
+ public:
+  enum RegisterType {
+    // The kInvalid value is used to detect uninitialized static instances,
+    // which are always zero-initialized before any constructors are called.
+    kInvalid = 0,
+    kRegister,
+    kVRegister,
+    kFPRegister = kVRegister,
+    kNoRegister
+  };
+
+  CPURegister() : code_(0), size_(0), type_(kNoRegister) {
+    VIXL_ASSERT(!IsValid());
+    VIXL_ASSERT(IsNone());
+  }
+
+  CPURegister(unsigned code, unsigned size, RegisterType type)
+      : code_(code), size_(size), type_(type) {
+    VIXL_ASSERT(IsValidOrNone());
+  }
+
+  unsigned code() const {
+    VIXL_ASSERT(IsValid());
+    return code_;
+  }
+
+  RegisterType type() const {
+    VIXL_ASSERT(IsValidOrNone());
+    return type_;
+  }
+
+  RegList Bit() const {
+    VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
+    return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
+  }
+
+  unsigned size() const {
+    VIXL_ASSERT(IsValid());
+    return size_;
+  }
+
+  int SizeInBytes() const {
+    VIXL_ASSERT(IsValid());
+    VIXL_ASSERT(size() % 8 == 0);
+    return size_ / 8;
+  }
+
+  int SizeInBits() const {
+    VIXL_ASSERT(IsValid());
+    return size_;
+  }
+
+  bool Is8Bits() const {
+    VIXL_ASSERT(IsValid());
+    return size_ == 8;
+  }
+
+  bool Is16Bits() const {
+    VIXL_ASSERT(IsValid());
+    return size_ == 16;
+  }
+
+  bool Is32Bits() const {
+    VIXL_ASSERT(IsValid());
+    return size_ == 32;
+  }
+
+  bool Is64Bits() const {
+    VIXL_ASSERT(IsValid());
+    return size_ == 64;
+  }
+
+  bool Is128Bits() const {
+    VIXL_ASSERT(IsValid());
+    return size_ == 128;
+  }
+
+  bool IsValid() const {
+    if (IsValidRegister() || IsValidVRegister()) {
+      VIXL_ASSERT(!IsNone());
+      return true;
+    } else {
+      // This assert is hit when the register has not been properly initialized.
+      // One cause for this can be an initialisation order fiasco. See
+      // https://isocpp.org/wiki/faq/ctors#static-init-order for some details.
+      VIXL_ASSERT(IsNone());
+      return false;
+    }
+  }
+
+  bool IsValidRegister() const {
+    return IsRegister() &&
+           ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
+           ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
+  }
+
+  bool IsValidVRegister() const {
+    return IsVRegister() &&
+           ((size_ == kBRegSize) || (size_ == kHRegSize) ||
+            (size_ == kSRegSize) || (size_ == kDRegSize) ||
+            (size_ == kQRegSize)) &&
+           (code_ < kNumberOfVRegisters);
+  }
+
+  bool IsValidFPRegister() const {
+    return IsFPRegister() && (code_ < kNumberOfVRegisters);
+  }
+
+  bool IsNone() const {
+    // kNoRegister types should always have size 0 and code 0.
+    VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
+    VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
+
+    return type_ == kNoRegister;
+  }
+
+  bool Aliases(const CPURegister& other) const {
+    VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+    return (code_ == other.code_) && (type_ == other.type_);
+  }
+
+  bool Is(const CPURegister& other) const {
+    VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+    return Aliases(other) && (size_ == other.size_);
+  }
+
+  bool IsZero() const {
+    VIXL_ASSERT(IsValid());
+    return IsRegister() && (code_ == kZeroRegCode);
+  }
+
+  bool IsSP() const {
+    VIXL_ASSERT(IsValid());
+    return IsRegister() && (code_ == kSPRegInternalCode);
+  }
+
+  bool IsRegister() const {
+    return type_ == kRegister;
+  }
+
+  bool IsVRegister() const {
+    return type_ == kVRegister;
+  }
+
+  bool IsFPRegister() const {
+    return IsS() || IsD();
+  }
+
+  bool IsW() const { return IsValidRegister() && Is32Bits(); }
+  bool IsX() const { return IsValidRegister() && Is64Bits(); }
+
+  // These assertions ensure that the size and type of the register are as
+  // described. They do not consider the number of lanes that make up a vector.
+  // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
+  // does not imply Is1D() or Is8B().
+  // Check the number of lanes, ie. the format of the vector, using methods such
+  // as Is8B(), Is1D(), etc. in the VRegister class.
+  bool IsV() const { return IsVRegister(); }
+  bool IsB() const { return IsV() && Is8Bits(); }
+  bool IsH() const { return IsV() && Is16Bits(); }
+  bool IsS() const { return IsV() && Is32Bits(); }
+  bool IsD() const { return IsV() && Is64Bits(); }
+  bool IsQ() const { return IsV() && Is128Bits(); }
+
+  const Register& W() const;
+  const Register& X() const;
+  const VRegister& V() const;
+  const VRegister& B() const;
+  const VRegister& H() const;
+  const VRegister& S() const;
+  const VRegister& D() const;
+  const VRegister& Q() const;
+
+  bool IsSameSizeAndType(const CPURegister& other) const {
+    return (size_ == other.size_) && (type_ == other.type_);
+  }
+
+ protected:
+  unsigned code_;
+  unsigned size_;
+  RegisterType type_;
+
+ private:
+  bool IsValidOrNone() const {
+    return IsValid() || IsNone();
+  }
+};
+
+
+class Register : public CPURegister {
+ public:
+  Register() : CPURegister() {}
+  explicit Register(const CPURegister& other)
+      : CPURegister(other.code(), other.size(), other.type()) {
+    VIXL_ASSERT(IsValidRegister());
+  }
+  Register(unsigned code, unsigned size)
+      : CPURegister(code, size, kRegister) {}
+
+  bool IsValid() const {
+    VIXL_ASSERT(IsRegister() || IsNone());
+    return IsValidRegister();
+  }
+
+  static const Register& WRegFromCode(unsigned code);
+  static const Register& XRegFromCode(unsigned code);
+
+ private:
+  static const Register wregisters[];
+  static const Register xregisters[];
+};
+
+
+class VRegister : public CPURegister {
+ public:
+  VRegister() : CPURegister(), lanes_(1) {}
+  explicit VRegister(const CPURegister& other)
+      : CPURegister(other.code(), other.size(), other.type()), lanes_(1) {
+    VIXL_ASSERT(IsValidVRegister());
+    VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+  }
+  VRegister(unsigned code, unsigned size, unsigned lanes = 1)
+      : CPURegister(code, size, kVRegister), lanes_(lanes) {
+    VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+  }
+  VRegister(unsigned code, VectorFormat format)
+      : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
+        lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
+    VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+  }
+
+  bool IsValid() const {
+    VIXL_ASSERT(IsVRegister() || IsNone());
+    return IsValidVRegister();
+  }
+
+  static const VRegister& BRegFromCode(unsigned code);
+  static const VRegister& HRegFromCode(unsigned code);
+  static const VRegister& SRegFromCode(unsigned code);
+  static const VRegister& DRegFromCode(unsigned code);
+  static const VRegister& QRegFromCode(unsigned code);
+  static const VRegister& VRegFromCode(unsigned code);
+
+  VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
+  VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
+  VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
+  VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
+  VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
+  VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
+  VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
+  VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
+
+  bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
+  bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
+  bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
+  bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
+  bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
+  bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
+  bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
+  bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
+
+  // For consistency, we assert the number of lanes of these scalar registers,
+  // even though there are no vectors of equivalent total size with which they
+  // could alias.
+  bool Is1B() const {
+    VIXL_ASSERT(!(Is8Bits() && IsVector()));
+    return Is8Bits();
+  }
+  bool Is1H() const {
+    VIXL_ASSERT(!(Is16Bits() && IsVector()));
+    return Is16Bits();
+  }
+  bool Is1S() const {
+    VIXL_ASSERT(!(Is32Bits() && IsVector()));
+    return Is32Bits();
+  }
+
+  bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSize; }
+  bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSize; }
+  bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSize; }
+  bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSize; }
+
+  int lanes() const {
+    return lanes_;
+  }
+
+  bool IsScalar() const {
+    return lanes_ == 1;
+  }
+
+  bool IsVector() const {
+    return lanes_ > 1;
+  }
+
+  bool IsSameFormat(const VRegister& other) const {
+    return (size_ == other.size_) && (lanes_ == other.lanes_);
+  }
+
+  unsigned LaneSizeInBytes() const {
+    return SizeInBytes() / lanes_;
+  }
+
+  unsigned LaneSizeInBits() const {
+    return LaneSizeInBytes() * 8;
+  }
+
+ private:
+  static const VRegister bregisters[];
+  static const VRegister hregisters[];
+  static const VRegister sregisters[];
+  static const VRegister dregisters[];
+  static const VRegister qregisters[];
+  static const VRegister vregisters[];
+  int lanes_;
+};
+
+
+// Backward compatibility for FPRegisters.
+typedef VRegister FPRegister;
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and VRegister
+// variants are provided for convenience.
+const Register NoReg;
+const VRegister NoVReg;
+const FPRegister NoFPReg;  // For backward compatibility.
+const CPURegister NoCPUReg;
+
+
+#define DEFINE_REGISTERS(N)  \
+const Register w##N(N, kWRegSize);  \
+const Register x##N(N, kXRegSize);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+const Register wsp(kSPRegInternalCode, kWRegSize);
+const Register sp(kSPRegInternalCode, kXRegSize);
+
+
+#define DEFINE_VREGISTERS(N)  \
+const VRegister b##N(N, kBRegSize);  \
+const VRegister h##N(N, kHRegSize);  \
+const VRegister s##N(N, kSRegSize);  \
+const VRegister d##N(N, kDRegSize);  \
+const VRegister q##N(N, kQRegSize);  \
+const VRegister v##N(N, kQRegSize);
+REGISTER_CODE_LIST(DEFINE_VREGISTERS)
+#undef DEFINE_VREGISTERS
+
+
+// Registers aliases.
+const Register ip0 = x16;
+const Register ip1 = x17;
+const Register lr = x30;
+const Register xzr = x31;
+const Register wzr = w31;
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments
+// set to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+                const CPURegister& reg2,
+                const CPURegister& reg3 = NoReg,
+                const CPURegister& reg4 = NoReg,
+                const CPURegister& reg5 = NoReg,
+                const CPURegister& reg6 = NoReg,
+                const CPURegister& reg7 = NoReg,
+                const CPURegister& reg8 = NoReg);
+
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+                        const CPURegister& reg2,
+                        const CPURegister& reg3 = NoCPUReg,
+                        const CPURegister& reg4 = NoCPUReg,
+                        const CPURegister& reg5 = NoCPUReg,
+                        const CPURegister& reg6 = NoCPUReg,
+                        const CPURegister& reg7 = NoCPUReg,
+                        const CPURegister& reg8 = NoCPUReg);
+
+
+// AreSameFormat returns true if all of the specified VRegisters have the same
+// vector format. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoVReg).
+bool AreSameFormat(const VRegister& reg1,
+                   const VRegister& reg2,
+                   const VRegister& reg3 = NoVReg,
+                   const VRegister& reg4 = NoVReg);
+
+
+// AreConsecutive returns true if all of the specified VRegisters are
+// consecutive in the register file. Arguments set to NoReg are ignored, as are
+// any subsequent arguments. At least one argument (reg1) must be valid
+// (not NoVReg).
+bool AreConsecutive(const VRegister& reg1,
+                    const VRegister& reg2,
+                    const VRegister& reg3 = NoVReg,
+                    const VRegister& reg4 = NoVReg);
+
+
+// Lists of registers.
+class CPURegList {
+ public:
+  explicit CPURegList(CPURegister reg1,
+                      CPURegister reg2 = NoCPUReg,
+                      CPURegister reg3 = NoCPUReg,
+                      CPURegister reg4 = NoCPUReg)
+      : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+        size_(reg1.size()), type_(reg1.type()) {
+    VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+    VIXL_ASSERT(IsValid());
+  }
+
+  CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+      : list_(list), size_(size), type_(type) {
+    VIXL_ASSERT(IsValid());
+  }
+
+  CPURegList(CPURegister::RegisterType type, unsigned size,
+             unsigned first_reg, unsigned last_reg)
+      : size_(size), type_(type) {
+    VIXL_ASSERT(((type == CPURegister::kRegister) &&
+                 (last_reg < kNumberOfRegisters)) ||
+                ((type == CPURegister::kVRegister) &&
+                 (last_reg < kNumberOfVRegisters)));
+    VIXL_ASSERT(last_reg >= first_reg);
+    list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
+    list_ &= ~((UINT64_C(1) << first_reg) - 1);
+    VIXL_ASSERT(IsValid());
+  }
+
+  CPURegister::RegisterType type() const {
+    VIXL_ASSERT(IsValid());
+    return type_;
+  }
+
+  // Combine another CPURegList into this one. Registers that already exist in
+  // this list are left unchanged. The type and size of the registers in the
+  // 'other' list must match those in this list.
+  void Combine(const CPURegList& other) {
+    VIXL_ASSERT(IsValid());
+    VIXL_ASSERT(other.type() == type_);
+    VIXL_ASSERT(other.RegisterSizeInBits() == size_);
+    list_ |= other.list();
+  }
+
+  // Remove every register in the other CPURegList from this one. Registers that
+  // do not exist in this list are ignored. The type and size of the registers
+  // in the 'other' list must match those in this list.
+  void Remove(const CPURegList& other) {
+    VIXL_ASSERT(IsValid());
+    VIXL_ASSERT(other.type() == type_);
+    VIXL_ASSERT(other.RegisterSizeInBits() == size_);
+    list_ &= ~other.list();
+  }
+
+  // Variants of Combine and Remove which take a single register.
+  void Combine(const CPURegister& other) {
+    VIXL_ASSERT(other.type() == type_);
+    VIXL_ASSERT(other.size() == size_);
+    Combine(other.code());
+  }
+
+  void Remove(const CPURegister& other) {
+    VIXL_ASSERT(other.type() == type_);
+    VIXL_ASSERT(other.size() == size_);
+    Remove(other.code());
+  }
+
+  // Variants of Combine and Remove which take a single register by its code;
+  // the type and size of the register is inferred from this list.
+  void Combine(int code) {
+    VIXL_ASSERT(IsValid());
+    VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+    list_ |= (UINT64_C(1) << code);
+  }
+
+  void Remove(int code) {
+    VIXL_ASSERT(IsValid());
+    VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+    list_ &= ~(UINT64_C(1) << code);
+  }
+
+  static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
+    VIXL_ASSERT(list_1.type_ == list_2.type_);
+    VIXL_ASSERT(list_1.size_ == list_2.size_);
+    return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
+  }
+  static CPURegList Union(const CPURegList& list_1,
+                          const CPURegList& list_2,
+                          const CPURegList& list_3);
+  static CPURegList Union(const CPURegList& list_1,
+                          const CPURegList& list_2,
+                          const CPURegList& list_3,
+                          const CPURegList& list_4);
+
+  static CPURegList Intersection(const CPURegList& list_1,
+                                 const CPURegList& list_2) {
+    VIXL_ASSERT(list_1.type_ == list_2.type_);
+    VIXL_ASSERT(list_1.size_ == list_2.size_);
+    return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
+  }
+  static CPURegList Intersection(const CPURegList& list_1,
+                                 const CPURegList& list_2,
+                                 const CPURegList& list_3);
+  static CPURegList Intersection(const CPURegList& list_1,
+                                 const CPURegList& list_2,
+                                 const CPURegList& list_3,
+                                 const CPURegList& list_4);
+
+  bool Overlaps(const CPURegList& other) const {
+    return (type_ == other.type_) && ((list_ & other.list_) != 0);
+  }
+
+  RegList list() const {
+    VIXL_ASSERT(IsValid());
+    return list_;
+  }
+
+  void set_list(RegList new_list) {
+    VIXL_ASSERT(IsValid());
+    list_ = new_list;
+  }
+
+  // Remove all callee-saved registers from the list. This can be useful when
+  // preparing registers for an AAPCS64 function call, for example.
+  void RemoveCalleeSaved();
+
+  CPURegister PopLowestIndex();
+  CPURegister PopHighestIndex();
+
+  // AAPCS64 callee-saved registers.
+  static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
+  static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
+
+  // AAPCS64 caller-saved registers. Note that this includes lr.
+  // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
+  // 64-bits being caller-saved.
+  static CPURegList GetCallerSaved(unsigned size = kXRegSize);
+  static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
+
+  bool IsEmpty() const {
+    VIXL_ASSERT(IsValid());
+    return list_ == 0;
+  }
+
+  bool IncludesAliasOf(const CPURegister& other) const {
+    VIXL_ASSERT(IsValid());
+    return (type_ == other.type()) && ((other.Bit() & list_) != 0);
+  }
+
+  bool IncludesAliasOf(int code) const {
+    VIXL_ASSERT(IsValid());
+    return ((code & list_) != 0);
+  }
+
+  int Count() const {
+    VIXL_ASSERT(IsValid());
+    return CountSetBits(list_);
+  }
+
+  unsigned RegisterSizeInBits() const {
+    VIXL_ASSERT(IsValid());
+    return size_;
+  }
+
+  unsigned RegisterSizeInBytes() const {
+    int size_in_bits = RegisterSizeInBits();
+    VIXL_ASSERT((size_in_bits % 8) == 0);
+    return size_in_bits / 8;
+  }
+
+  unsigned TotalSizeInBytes() const {
+    VIXL_ASSERT(IsValid());
+    return RegisterSizeInBytes() * Count();
+  }
+
+ private:
+  RegList list_;
+  unsigned size_;
+  CPURegister::RegisterType type_;
+
+  bool IsValid() const;
+};
+
+
+// AAPCS64 callee-saved registers.
+extern const CPURegList kCalleeSaved;
+extern const CPURegList kCalleeSavedV;
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+extern const CPURegList kCallerSaved;
+extern const CPURegList kCallerSavedV;
+
+
+// Operand.
+class Operand {
+ public:
+  // #<immediate>
+  // where <immediate> is int64_t.
+  // This is allowed to be an implicit constructor because Operand is
+  // a wrapper class that doesn't normally perform any type conversion.
+  Operand(int64_t immediate = 0);           // NOLINT(runtime/explicit)
+
+  // rm, {<shift> #<shift_amount>}
+  // where <shift> is one of {LSL, LSR, ASR, ROR}.
+  //       <shift_amount> is uint6_t.
+  // This is allowed to be an implicit constructor because Operand is
+  // a wrapper class that doesn't normally perform any type conversion.
+  Operand(Register reg,
+          Shift shift = LSL,
+          unsigned shift_amount = 0);   // NOLINT(runtime/explicit)
+
+  // rm, {<extend> {#<shift_amount>}}
+  // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+  //       <shift_amount> is uint2_t.
+  explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
+
+  bool IsImmediate() const;
+  bool IsShiftedRegister() const;
+  bool IsExtendedRegister() const;
+  bool IsZero() const;
+
+  // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+  // which helps in the encoding of instructions that use the stack pointer.
+  Operand ToExtendedRegister() const;
+
+  int64_t immediate() const {
+    VIXL_ASSERT(IsImmediate());
+    return immediate_;
+  }
+
+  Register reg() const {
+    VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
+    return reg_;
+  }
+
+  Shift shift() const {
+    VIXL_ASSERT(IsShiftedRegister());
+    return shift_;
+  }
+
+  Extend extend() const {
+    VIXL_ASSERT(IsExtendedRegister());
+    return extend_;
+  }
+
+  unsigned shift_amount() const {
+    VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
+    return shift_amount_;
+  }
+
+ private:
+  int64_t immediate_;
+  Register reg_;
+  Shift shift_;
+  Extend extend_;
+  unsigned shift_amount_;
+};
+
+
+// MemOperand represents the addressing mode of a load or store instruction.
+class MemOperand {
+ public:
+  explicit MemOperand(Register base,
+                      int64_t offset = 0,
+                      AddrMode addrmode = Offset);
+  MemOperand(Register base,
+             Register regoffset,
+             Shift shift = LSL,
+             unsigned shift_amount = 0);
+  MemOperand(Register base,
+             Register regoffset,
+             Extend extend,
+             unsigned shift_amount = 0);
+  MemOperand(Register base,
+             const Operand& offset,
+             AddrMode addrmode = Offset);
+
+  const Register& base() const { return base_; }
+  const Register& regoffset() const { return regoffset_; }
+  int64_t offset() const { return offset_; }
+  AddrMode addrmode() const { return addrmode_; }
+  Shift shift() const { return shift_; }
+  Extend extend() const { return extend_; }
+  unsigned shift_amount() const { return shift_amount_; }
+  bool IsImmediateOffset() const;
+  bool IsRegisterOffset() const;
+  bool IsPreIndex() const;
+  bool IsPostIndex() const;
+
+  void AddOffset(int64_t offset);
+
+ private:
+  Register base_;
+  Register regoffset_;
+  int64_t offset_;
+  AddrMode addrmode_;
+  Shift shift_;
+  Extend extend_;
+  unsigned shift_amount_;
+};
+
+
+class LabelTestHelper;  // Forward declaration.
+
+
+class Label {
+ public:
+  Label() : location_(kLocationUnbound) {}
+  ~Label() {
+    // If the label has been linked to, it needs to be bound to a target.
+    VIXL_ASSERT(!IsLinked() || IsBound());
+  }
+
+  bool IsBound() const { return location_ >= 0; }
+  bool IsLinked() const { return !links_.empty(); }
+
+  ptrdiff_t location() const { return location_; }
+
+  static const int kNPreallocatedLinks = 4;
+  static const ptrdiff_t kInvalidLinkKey = PTRDIFF_MAX;
+  static const size_t kReclaimFrom = 512;
+  static const size_t kReclaimFactor = 2;
+
+  typedef InvalSet<ptrdiff_t,
+                   kNPreallocatedLinks,
+                   ptrdiff_t,
+                   kInvalidLinkKey,
+                   kReclaimFrom,
+                   kReclaimFactor> LinksSetBase;
+  typedef InvalSetIterator<LinksSetBase> LabelLinksIteratorBase;
+
+ private:
+  class LinksSet : public LinksSetBase {
+   public:
+    LinksSet() : LinksSetBase() {}
+  };
+
+  // Allows iterating over the links of a label. The behaviour is undefined if
+  // the list of links is modified in any way while iterating.
+  class LabelLinksIterator : public LabelLinksIteratorBase {
+   public:
+    explicit LabelLinksIterator(Label* label)
+        : LabelLinksIteratorBase(&label->links_) {}
+  };
+
+  void Bind(ptrdiff_t location) {
+    // Labels can only be bound once.
+    VIXL_ASSERT(!IsBound());
+    location_ = location;
+  }
+
+  void AddLink(ptrdiff_t instruction) {
+    // If a label is bound, the assembler already has the information it needs
+    // to write the instruction, so there is no need to add it to links_.
+    VIXL_ASSERT(!IsBound());
+    links_.insert(instruction);
+  }
+
+  void DeleteLink(ptrdiff_t instruction) {
+    links_.erase(instruction);
+  }
+
+  void ClearAllLinks() {
+    links_.clear();
+  }
+
+  // TODO: The comment below considers average case complexity for our
+  // usual use-cases. The elements of interest are:
+  // - Branches to a label are emitted in order: branch instructions to a label
+  // are generated at an offset in the code generation buffer greater than any
+  // other branch to that same label already generated. As an example, this can
+  // be broken when an instruction is patched to become a branch. Note that the
+  // code will still work, but the complexity considerations below may locally
+  // not apply any more.
+  // - Veneers are generated in order: for multiple branches of the same type
+  // branching to the same unbound label going out of range, veneers are
+  // generated in growing order of the branch instruction offset from the start
+  // of the buffer.
+  //
+  // When creating a veneer for a branch going out of range, the link for this
+  // branch needs to be removed from this `links_`. Since all branches are
+  // tracked in one underlying InvalSet, the complexity for this deletion is the
+  // same as for finding the element, ie. O(n), where n is the number of links
+  // in the set.
+  // This could be reduced to O(1) by using the same trick as used when tracking
+  // branch information for veneers: split the container to use one set per type
+  // of branch. With that setup, when a veneer is created and the link needs to
+  // be deleted, if the two points above hold, it must be the minimum element of
+  // the set for its type of branch, and that minimum element will be accessible
+  // in O(1).
+
+  // The offsets of the instructions that have linked to this label.
+  LinksSet links_;
+  // The label location.
+  ptrdiff_t location_;
+
+  static const ptrdiff_t kLocationUnbound = -1;
+
+  // It is not safe to copy labels, so disable the copy constructor and operator
+  // by declaring them private (without an implementation).
+  Label(const Label&);
+  void operator=(const Label&);
+
+  // The Assembler class is responsible for binding and linking labels, since
+  // the stored offsets need to be consistent with the Assembler's buffer.
+  friend class Assembler;
+  // The MacroAssembler and VeneerPool handle resolution of branches to distant
+  // targets.
+  friend class MacroAssembler;
+  friend class VeneerPool;
+};
+
+
+// Required InvalSet template specialisations.
+#define INVAL_SET_TEMPLATE_PARAMETERS \
+    ptrdiff_t,                        \
+    Label::kNPreallocatedLinks,       \
+    ptrdiff_t,                        \
+    Label::kInvalidLinkKey,           \
+    Label::kReclaimFrom,              \
+    Label::kReclaimFactor
+template<>
+inline ptrdiff_t InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::Key(
+    const ptrdiff_t& element) {
+  return element;
+}
+template<>
+inline void InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::SetKey(
+              ptrdiff_t* element, ptrdiff_t key) {
+  *element = key;
+}
+#undef INVAL_SET_TEMPLATE_PARAMETERS
+
+
+class Assembler;
+class LiteralPool;
+
+// A literal is a 32-bit or 64-bit piece of data stored in the instruction
+// stream and loaded through a pc relative load. The same literal can be
+// referred to by multiple instructions but a literal can only reside at one
+// place in memory. A literal can be used by a load before or after being
+// placed in memory.
+//
+// Internally an offset of 0 is associated with a literal which has been
+// neither used nor placed. Then two possibilities arise:
+//  1) the label is placed, the offset (stored as offset + 1) is used to
+//     resolve any subsequent load using the label.
+//  2) the label is not placed and offset is the offset of the last load using
+//     the literal (stored as -offset -1). If multiple loads refer to this
+//     literal then the last load holds the offset of the preceding load and
+//     all loads form a chain. Once the offset is placed all the loads in the
+//     chain are resolved and future loads fall back to possibility 1.
+class RawLiteral {
+ public:
+  enum DeletionPolicy {
+    kDeletedOnPlacementByPool,
+    kDeletedOnPoolDestruction,
+    kManuallyDeleted
+  };
+
+  RawLiteral(size_t size,
+             LiteralPool* literal_pool,
+             DeletionPolicy deletion_policy = kManuallyDeleted);
+
+  // The literal pool only sees and deletes `RawLiteral*` pointers, but they are
+  // actually pointing to `Literal<T>` objects.
+  virtual ~RawLiteral() {}
+
+  size_t size() {
+    VIXL_STATIC_ASSERT(kDRegSizeInBytes == kXRegSizeInBytes);
+    VIXL_STATIC_ASSERT(kSRegSizeInBytes == kWRegSizeInBytes);
+    VIXL_ASSERT((size_ == kXRegSizeInBytes) ||
+                (size_ == kWRegSizeInBytes) ||
+                (size_ == kQRegSizeInBytes));
+    return size_;
+  }
+  uint64_t raw_value128_low64() {
+    VIXL_ASSERT(size_ == kQRegSizeInBytes);
+    return low64_;
+  }
+  uint64_t raw_value128_high64() {
+    VIXL_ASSERT(size_ == kQRegSizeInBytes);
+    return high64_;
+  }
+  uint64_t raw_value64() {
+    VIXL_ASSERT(size_ == kXRegSizeInBytes);
+    VIXL_ASSERT(high64_ == 0);
+    return low64_;
+  }
+  uint32_t raw_value32() {
+    VIXL_ASSERT(size_ == kWRegSizeInBytes);
+    VIXL_ASSERT(high64_ == 0);
+    VIXL_ASSERT(is_uint32(low64_) || is_int32(low64_));
+    return static_cast<uint32_t>(low64_);
+  }
+  bool IsUsed() { return offset_ < 0; }
+  bool IsPlaced() { return offset_ > 0; }
+
+  LiteralPool* GetLiteralPool() const {
+    return literal_pool_;
+  }
+
+  ptrdiff_t offset() {
+    VIXL_ASSERT(IsPlaced());
+    return offset_ - 1;
+  }
+
+ protected:
+  void set_offset(ptrdiff_t offset) {
+    VIXL_ASSERT(offset >= 0);
+    VIXL_ASSERT(IsWordAligned(offset));
+    VIXL_ASSERT(!IsPlaced());
+    offset_ = offset + 1;
+  }
+  ptrdiff_t last_use() {
+    VIXL_ASSERT(IsUsed());
+    return -offset_ - 1;
+  }
+  void set_last_use(ptrdiff_t offset) {
+    VIXL_ASSERT(offset >= 0);
+    VIXL_ASSERT(IsWordAligned(offset));
+    VIXL_ASSERT(!IsPlaced());
+    offset_ = -offset - 1;
+  }
+
+  size_t size_;
+  ptrdiff_t offset_;
+  uint64_t low64_;
+  uint64_t high64_;
+
+ private:
+  LiteralPool* literal_pool_;
+  DeletionPolicy deletion_policy_;
+
+  friend class Assembler;
+  friend class LiteralPool;
+};
+
+
+template <typename T>
+class Literal : public RawLiteral {
+ public:
+  explicit Literal(T value,
+                   LiteralPool* literal_pool = NULL,
+                   RawLiteral::DeletionPolicy ownership = kManuallyDeleted)
+      : RawLiteral(sizeof(value), literal_pool, ownership) {
+    VIXL_STATIC_ASSERT(sizeof(value) <= kXRegSizeInBytes);
+    UpdateValue(value);
+  }
+
+  Literal(T high64, T low64,
+          LiteralPool* literal_pool = NULL,
+          RawLiteral::DeletionPolicy ownership = kManuallyDeleted)
+      : RawLiteral(kQRegSizeInBytes, literal_pool, ownership) {
+    VIXL_STATIC_ASSERT(sizeof(low64) == (kQRegSizeInBytes / 2));
+    UpdateValue(high64, low64);
+  }
+
+  virtual ~Literal() {}
+
+  // Update the value of this literal, if necessary by rewriting the value in
+  // the pool.
+  // If the literal has already been placed in a literal pool, the address of
+  // the start of the code buffer must be provided, as the literal only knows it
+  // offset from there.  This also allows patching the value after the code has
+  // been moved in memory.
+  void UpdateValue(T new_value, uint8_t* code_buffer = NULL) {
+    VIXL_ASSERT(sizeof(new_value) == size_);
+    memcpy(&low64_, &new_value, sizeof(new_value));
+    if (IsPlaced()) {
+      VIXL_ASSERT(code_buffer != NULL);
+      RewriteValueInCode(code_buffer);
+    }
+  }
+
+  void UpdateValue(T high64, T low64, uint8_t* code_buffer = NULL) {
+    VIXL_ASSERT(sizeof(low64) == size_ / 2);
+    memcpy(&low64_, &low64, sizeof(low64));
+    memcpy(&high64_, &high64, sizeof(high64));
+    if (IsPlaced()) {
+      VIXL_ASSERT(code_buffer != NULL);
+      RewriteValueInCode(code_buffer);
+    }
+  }
+
+  void UpdateValue(T new_value, const Assembler* assembler);
+  void UpdateValue(T high64, T low64, const Assembler* assembler);
+
+ private:
+  void RewriteValueInCode(uint8_t* code_buffer) {
+    VIXL_ASSERT(IsPlaced());
+    VIXL_STATIC_ASSERT(sizeof(T) <= kXRegSizeInBytes);
+    switch (size()) {
+      case kSRegSizeInBytes:
+        *reinterpret_cast<uint32_t*>(code_buffer + offset()) = raw_value32();
+        break;
+      case kDRegSizeInBytes:
+        *reinterpret_cast<uint64_t*>(code_buffer + offset()) = raw_value64();
+        break;
+      default:
+        VIXL_ASSERT(size() == kQRegSizeInBytes);
+        uint64_t* base_address =
+            reinterpret_cast<uint64_t*>(code_buffer + offset());
+        *base_address = raw_value128_low64();
+        *(base_address + 1) = raw_value128_high64();
+    }
+  }
+};
+
+
+// Control whether or not position-independent code should be emitted.
+enum PositionIndependentCodeOption {
+  // All code generated will be position-independent; all branches and
+  // references to labels generated with the Label class will use PC-relative
+  // addressing.
+  PositionIndependentCode,
+
+  // Allow VIXL to generate code that refers to absolute addresses. With this
+  // option, it will not be possible to copy the code buffer and run it from a
+  // different address; code must be generated in its final location.
+  PositionDependentCode,
+
+  // Allow VIXL to assume that the bottom 12 bits of the address will be
+  // constant, but that the top 48 bits may change. This allows `adrp` to
+  // function in systems which copy code between pages, but otherwise maintain
+  // 4KB page alignment.
+  PageOffsetDependentCode
+};
+
+
+// Control how scaled- and unscaled-offset loads and stores are generated.
+enum LoadStoreScalingOption {
+  // Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
+  // register-offset, pre-index or post-index instructions if necessary.
+  PreferScaledOffset,
+
+  // Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
+  // register-offset, pre-index or post-index instructions if necessary.
+  PreferUnscaledOffset,
+
+  // Require scaled-immediate-offset instructions.
+  RequireScaledOffset,
+
+  // Require unscaled-immediate-offset instructions.
+  RequireUnscaledOffset
+};
+
+
+// Assembler.
+class Assembler {
+ public:
+  Assembler(size_t capacity,
+            PositionIndependentCodeOption pic = PositionIndependentCode);
+  Assembler(byte* buffer, size_t capacity,
+            PositionIndependentCodeOption pic = PositionIndependentCode);
+
+  // The destructor asserts that one of the following is true:
+  //  * The Assembler object has not been used.
+  //  * Nothing has been emitted since the last Reset() call.
+  //  * Nothing has been emitted since the last FinalizeCode() call.
+  ~Assembler();
+
+  // System functions.
+
+  // Start generating code from the beginning of the buffer, discarding any code
+  // and data that has already been emitted into the buffer.
+  void Reset();
+
+  // Finalize a code buffer of generated instructions. This function must be
+  // called before executing or copying code from the buffer.
+  void FinalizeCode();
+
+  // Label.
+  // Bind a label to the current PC.
+  void bind(Label* label);
+
+  // Bind a label to a specified offset from the start of the buffer.
+  void BindToOffset(Label* label, ptrdiff_t offset);
+
+  // Place a literal at the current PC.
+  void place(RawLiteral* literal);
+
+  ptrdiff_t CursorOffset() const {
+    return buffer_->CursorOffset();
+  }
+
+  ptrdiff_t BufferEndOffset() const {
+    return static_cast<ptrdiff_t>(buffer_->capacity());
+  }
+
+  // Return the address of an offset in the buffer.
+  template <typename T>
+  T GetOffsetAddress(ptrdiff_t offset) const {
+    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
+    return buffer_->GetOffsetAddress<T>(offset);
+  }
+
+  // Return the address of a bound label.
+  template <typename T>
+  T GetLabelAddress(const Label * label) const {
+    VIXL_ASSERT(label->IsBound());
+    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
+    return GetOffsetAddress<T>(label->location());
+  }
+
+  // Return the address of the cursor.
+  template <typename T>
+  T GetCursorAddress() const {
+    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
+    return GetOffsetAddress<T>(CursorOffset());
+  }
+
+  // Return the address of the start of the buffer.
+  template <typename T>
+  T GetStartAddress() const {
+    VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
+    return GetOffsetAddress<T>(0);
+  }
+
+  Instruction* InstructionAt(ptrdiff_t instruction_offset) {
+    return GetOffsetAddress<Instruction*>(instruction_offset);
+  }
+
+  ptrdiff_t InstructionOffset(Instruction* instruction) {
+    VIXL_STATIC_ASSERT(sizeof(*instruction) == 1);
+    ptrdiff_t offset = instruction - GetStartAddress<Instruction*>();
+    VIXL_ASSERT((0 <= offset) &&
+                (offset < static_cast<ptrdiff_t>(BufferCapacity())));
+    return offset;
+  }
+
+  // Instruction set functions.
+
+  // Branch / Jump instructions.
+  // Branch to register.
+  void br(const Register& xn);
+
+  // Branch with link to register.
+  void blr(const Register& xn);
+
+  // Branch to register with return hint.
+  void ret(const Register& xn = lr);
+
+  // Unconditional branch to label.
+  void b(Label* label);
+
+  // Conditional branch to label.
+  void b(Label* label, Condition cond);
+
+  // Unconditional branch to PC offset.
+  void b(int imm26);
+
+  // Conditional branch to PC offset.
+  void b(int imm19, Condition cond);
+
+  // Branch with link to label.
+  void bl(Label* label);
+
+  // Branch with link to PC offset.
+  void bl(int imm26);
+
+  // Compare and branch to label if zero.
+  void cbz(const Register& rt, Label* label);
+
+  // Compare and branch to PC offset if zero.
+  void cbz(const Register& rt, int imm19);
+
+  // Compare and branch to label if not zero.
+  void cbnz(const Register& rt, Label* label);
+
+  // Compare and branch to PC offset if not zero.
+  void cbnz(const Register& rt, int imm19);
+
+  // Table lookup from one register.
+  void tbl(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Table lookup from two registers.
+  void tbl(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vn2,
+           const VRegister& vm);
+
+  // Table lookup from three registers.
+  void tbl(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vn2,
+           const VRegister& vn3,
+           const VRegister& vm);
+
+  // Table lookup from four registers.
+  void tbl(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vn2,
+           const VRegister& vn3,
+           const VRegister& vn4,
+           const VRegister& vm);
+
+  // Table lookup extension from one register.
+  void tbx(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Table lookup extension from two registers.
+  void tbx(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vn2,
+           const VRegister& vm);
+
+  // Table lookup extension from three registers.
+  void tbx(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vn2,
+           const VRegister& vn3,
+           const VRegister& vm);
+
+  // Table lookup extension from four registers.
+  void tbx(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vn2,
+           const VRegister& vn3,
+           const VRegister& vn4,
+           const VRegister& vm);
+
+  // Test bit and branch to label if zero.
+  void tbz(const Register& rt, unsigned bit_pos, Label* label);
+
+  // Test bit and branch to PC offset if zero.
+  void tbz(const Register& rt, unsigned bit_pos, int imm14);
+
+  // Test bit and branch to label if not zero.
+  void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+
+  // Test bit and branch to PC offset if not zero.
+  void tbnz(const Register& rt, unsigned bit_pos, int imm14);
+
+  // Address calculation instructions.
+  // Calculate a PC-relative address. Unlike for branches the offset in adr is
+  // unscaled (i.e. the result can be unaligned).
+
+  // Calculate the address of a label.
+  void adr(const Register& rd, Label* label);
+
+  // Calculate the address of a PC offset.
+  void adr(const Register& rd, int imm21);
+
+  // Calculate the page address of a label.
+  void adrp(const Register& rd, Label* label);
+
+  // Calculate the page address of a PC offset.
+  void adrp(const Register& rd, int imm21);
+
+  // Data Processing instructions.
+  // Add.
+  void add(const Register& rd,
+           const Register& rn,
+           const Operand& operand);
+
+  // Add and update status flags.
+  void adds(const Register& rd,
+            const Register& rn,
+            const Operand& operand);
+
+  // Compare negative.
+  void cmn(const Register& rn, const Operand& operand);
+
+  // Subtract.
+  void sub(const Register& rd,
+           const Register& rn,
+           const Operand& operand);
+
+  // Subtract and update status flags.
+  void subs(const Register& rd,
+            const Register& rn,
+            const Operand& operand);
+
+  // Compare.
+  void cmp(const Register& rn, const Operand& operand);
+
+  // Negate.
+  void neg(const Register& rd,
+           const Operand& operand);
+
+  // Negate and update status flags.
+  void negs(const Register& rd,
+            const Operand& operand);
+
+  // Add with carry bit.
+  void adc(const Register& rd,
+           const Register& rn,
+           const Operand& operand);
+
+  // Add with carry bit and update status flags.
+  void adcs(const Register& rd,
+            const Register& rn,
+            const Operand& operand);
+
+  // Subtract with carry bit.
+  void sbc(const Register& rd,
+           const Register& rn,
+           const Operand& operand);
+
+  // Subtract with carry bit and update status flags.
+  void sbcs(const Register& rd,
+            const Register& rn,
+            const Operand& operand);
+
+  // Negate with carry bit.
+  void ngc(const Register& rd,
+           const Operand& operand);
+
+  // Negate with carry bit and update status flags.
+  void ngcs(const Register& rd,
+            const Operand& operand);
+
+  // Logical instructions.
+  // Bitwise and (A & B).
+  void and_(const Register& rd,
+            const Register& rn,
+            const Operand& operand);
+
+  // Bitwise and (A & B) and update status flags.
+  void ands(const Register& rd,
+            const Register& rn,
+            const Operand& operand);
+
+  // Bit test and set flags.
+  void tst(const Register& rn, const Operand& operand);
+
+  // Bit clear (A & ~B).
+  void bic(const Register& rd,
+           const Register& rn,
+           const Operand& operand);
+
+  // Bit clear (A & ~B) and update status flags.
+  void bics(const Register& rd,
+            const Register& rn,
+            const Operand& operand);
+
+  // Bitwise or (A | B).
+  void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+  // Bitwise nor (A | ~B).
+  void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+  // Bitwise eor/xor (A ^ B).
+  void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+  // Bitwise enor/xnor (A ^ ~B).
+  void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+  // Logical shift left by variable.
+  void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+  // Logical shift right by variable.
+  void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+  // Arithmetic shift right by variable.
+  void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+  // Rotate right by variable.
+  void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+  // Bitfield instructions.
+  // Bitfield move.
+  void bfm(const Register& rd,
+           const Register& rn,
+           unsigned immr,
+           unsigned imms);
+
+  // Signed bitfield move.
+  void sbfm(const Register& rd,
+            const Register& rn,
+            unsigned immr,
+            unsigned imms);
+
+  // Unsigned bitfield move.
+  void ubfm(const Register& rd,
+            const Register& rn,
+            unsigned immr,
+            unsigned imms);
+
+  // Bfm aliases.
+  // Bitfield insert.
+  void bfi(const Register& rd,
+           const Register& rn,
+           unsigned lsb,
+           unsigned width) {
+    VIXL_ASSERT(width >= 1);
+    VIXL_ASSERT(lsb + width <= rn.size());
+    bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+  }
+
+  // Bitfield extract and insert low.
+  void bfxil(const Register& rd,
+             const Register& rn,
+             unsigned lsb,
+             unsigned width) {
+    VIXL_ASSERT(width >= 1);
+    VIXL_ASSERT(lsb + width <= rn.size());
+    bfm(rd, rn, lsb, lsb + width - 1);
+  }
+
+  // Sbfm aliases.
+  // Arithmetic shift right.
+  void asr(const Register& rd, const Register& rn, unsigned shift) {
+    VIXL_ASSERT(shift < rd.size());
+    sbfm(rd, rn, shift, rd.size() - 1);
+  }
+
+  // Signed bitfield insert with zero at right.
+  void sbfiz(const Register& rd,
+             const Register& rn,
+             unsigned lsb,
+             unsigned width) {
+    VIXL_ASSERT(width >= 1);
+    VIXL_ASSERT(lsb + width <= rn.size());
+    sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+  }
+
+  // Signed bitfield extract.
+  void sbfx(const Register& rd,
+            const Register& rn,
+            unsigned lsb,
+            unsigned width) {
+    VIXL_ASSERT(width >= 1);
+    VIXL_ASSERT(lsb + width <= rn.size());
+    sbfm(rd, rn, lsb, lsb + width - 1);
+  }
+
+  // Signed extend byte.
+  void sxtb(const Register& rd, const Register& rn) {
+    sbfm(rd, rn, 0, 7);
+  }
+
+  // Signed extend halfword.
+  void sxth(const Register& rd, const Register& rn) {
+    sbfm(rd, rn, 0, 15);
+  }
+
+  // Signed extend word.
+  void sxtw(const Register& rd, const Register& rn) {
+    sbfm(rd, rn, 0, 31);
+  }
+
+  // Ubfm aliases.
+  // Logical shift left.
+  void lsl(const Register& rd, const Register& rn, unsigned shift) {
+    unsigned reg_size = rd.size();
+    VIXL_ASSERT(shift < reg_size);
+    ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+  }
+
+  // Logical shift right.
+  void lsr(const Register& rd, const Register& rn, unsigned shift) {
+    VIXL_ASSERT(shift < rd.size());
+    ubfm(rd, rn, shift, rd.size() - 1);
+  }
+
+  // Unsigned bitfield insert with zero at right.
+  void ubfiz(const Register& rd,
+             const Register& rn,
+             unsigned lsb,
+             unsigned width) {
+    VIXL_ASSERT(width >= 1);
+    VIXL_ASSERT(lsb + width <= rn.size());
+    ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+  }
+
+  // Unsigned bitfield extract.
+  void ubfx(const Register& rd,
+            const Register& rn,
+            unsigned lsb,
+            unsigned width) {
+    VIXL_ASSERT(width >= 1);
+    VIXL_ASSERT(lsb + width <= rn.size());
+    ubfm(rd, rn, lsb, lsb + width - 1);
+  }
+
+  // Unsigned extend byte.
+  void uxtb(const Register& rd, const Register& rn) {
+    ubfm(rd, rn, 0, 7);
+  }
+
+  // Unsigned extend halfword.
+  void uxth(const Register& rd, const Register& rn) {
+    ubfm(rd, rn, 0, 15);
+  }
+
+  // Unsigned extend word.
+  void uxtw(const Register& rd, const Register& rn) {
+    ubfm(rd, rn, 0, 31);
+  }
+
+  // Extract.
+  void extr(const Register& rd,
+            const Register& rn,
+            const Register& rm,
+            unsigned lsb);
+
+  // Conditional select: rd = cond ? rn : rm.
+  void csel(const Register& rd,
+            const Register& rn,
+            const Register& rm,
+            Condition cond);
+
+  // Conditional select increment: rd = cond ? rn : rm + 1.
+  void csinc(const Register& rd,
+             const Register& rn,
+             const Register& rm,
+             Condition cond);
+
+  // Conditional select inversion: rd = cond ? rn : ~rm.
+  void csinv(const Register& rd,
+             const Register& rn,
+             const Register& rm,
+             Condition cond);
+
+  // Conditional select negation: rd = cond ? rn : -rm.
+  void csneg(const Register& rd,
+             const Register& rn,
+             const Register& rm,
+             Condition cond);
+
+  // Conditional set: rd = cond ? 1 : 0.
+  void cset(const Register& rd, Condition cond);
+
+  // Conditional set mask: rd = cond ? -1 : 0.
+  void csetm(const Register& rd, Condition cond);
+
+  // Conditional increment: rd = cond ? rn + 1 : rn.
+  void cinc(const Register& rd, const Register& rn, Condition cond);
+
+  // Conditional invert: rd = cond ? ~rn : rn.
+  void cinv(const Register& rd, const Register& rn, Condition cond);
+
+  // Conditional negate: rd = cond ? -rn : rn.
+  void cneg(const Register& rd, const Register& rn, Condition cond);
+
+  // Rotate right.
+  void ror(const Register& rd, const Register& rs, unsigned shift) {
+    extr(rd, rs, rs, shift);
+  }
+
+  // Conditional comparison.
+  // Conditional compare negative.
+  void ccmn(const Register& rn,
+            const Operand& operand,
+            StatusFlags nzcv,
+            Condition cond);
+
+  // Conditional compare.
+  void ccmp(const Register& rn,
+            const Operand& operand,
+            StatusFlags nzcv,
+            Condition cond);
+
+  // CRC-32 checksum from byte.
+  void crc32b(const Register& rd,
+              const Register& rn,
+              const Register& rm);
+
+  // CRC-32 checksum from half-word.
+  void crc32h(const Register& rd,
+              const Register& rn,
+              const Register& rm);
+
+  // CRC-32 checksum from word.
+  void crc32w(const Register& rd,
+              const Register& rn,
+              const Register& rm);
+
+  // CRC-32 checksum from double word.
+  void crc32x(const Register& rd,
+              const Register& rn,
+              const Register& rm);
+
+  // CRC-32 C checksum from byte.
+  void crc32cb(const Register& rd,
+               const Register& rn,
+               const Register& rm);
+
+  // CRC-32 C checksum from half-word.
+  void crc32ch(const Register& rd,
+               const Register& rn,
+               const Register& rm);
+
+  // CRC-32 C checksum from word.
+  void crc32cw(const Register& rd,
+               const Register& rn,
+               const Register& rm);
+
+  // CRC-32C checksum from double word.
+  void crc32cx(const Register& rd,
+               const Register& rn,
+               const Register& rm);
+
+  // Multiply.
+  void mul(const Register& rd, const Register& rn, const Register& rm);
+
+  // Negated multiply.
+  void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+  // Signed long multiply: 32 x 32 -> 64-bit.
+  void smull(const Register& rd, const Register& rn, const Register& rm);
+
+  // Signed multiply high: 64 x 64 -> 64-bit <127:64>.
+  void smulh(const Register& xd, const Register& xn, const Register& xm);
+
+  // Multiply and accumulate.
+  void madd(const Register& rd,
+            const Register& rn,
+            const Register& rm,
+            const Register& ra);
+
+  // Multiply and subtract.
+  void msub(const Register& rd,
+            const Register& rn,
+            const Register& rm,
+            const Register& ra);
+
+  // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+  void smaddl(const Register& rd,
+              const Register& rn,
+              const Register& rm,
+              const Register& ra);
+
+  // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+  void umaddl(const Register& rd,
+              const Register& rn,
+              const Register& rm,
+              const Register& ra);
+
+  // Unsigned long multiply: 32 x 32 -> 64-bit.
+  void umull(const Register& rd,
+             const Register& rn,
+             const Register& rm) {
+    umaddl(rd, rn, rm, xzr);
+  }
+
+  // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>.
+  void umulh(const Register& xd,
+             const Register& xn,
+             const Register& xm);
+
+  // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+  void smsubl(const Register& rd,
+              const Register& rn,
+              const Register& rm,
+              const Register& ra);
+
+  // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+  void umsubl(const Register& rd,
+              const Register& rn,
+              const Register& rm,
+              const Register& ra);
+
+  // Signed integer divide.
+  void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+  // Unsigned integer divide.
+  void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+  // Bit reverse.
+  void rbit(const Register& rd, const Register& rn);
+
+  // Reverse bytes in 16-bit half words.
+  void rev16(const Register& rd, const Register& rn);
+
+  // Reverse bytes in 32-bit words.
+  void rev32(const Register& rd, const Register& rn);
+
+  // Reverse bytes.
+  void rev(const Register& rd, const Register& rn);
+
+  // Count leading zeroes.
+  void clz(const Register& rd, const Register& rn);
+
+  // Count leading sign bits.
+  void cls(const Register& rd, const Register& rn);
+
+  // Memory instructions.
+  // Load integer or FP register.
+  void ldr(const CPURegister& rt, const MemOperand& src,
+           LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Store integer or FP register.
+  void str(const CPURegister& rt, const MemOperand& dst,
+           LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Load word with sign extension.
+  void ldrsw(const Register& rt, const MemOperand& src,
+             LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Load byte.
+  void ldrb(const Register& rt, const MemOperand& src,
+            LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Store byte.
+  void strb(const Register& rt, const MemOperand& dst,
+            LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Load byte with sign extension.
+  void ldrsb(const Register& rt, const MemOperand& src,
+             LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Load half-word.
+  void ldrh(const Register& rt, const MemOperand& src,
+            LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Store half-word.
+  void strh(const Register& rt, const MemOperand& dst,
+            LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Load half-word with sign extension.
+  void ldrsh(const Register& rt, const MemOperand& src,
+             LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Load integer or FP register (with unscaled offset).
+  void ldur(const CPURegister& rt, const MemOperand& src,
+            LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Store integer or FP register (with unscaled offset).
+  void stur(const CPURegister& rt, const MemOperand& src,
+            LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Load word with sign extension.
+  void ldursw(const Register& rt, const MemOperand& src,
+              LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Load byte (with unscaled offset).
+  void ldurb(const Register& rt, const MemOperand& src,
+             LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Store byte (with unscaled offset).
+  void sturb(const Register& rt, const MemOperand& dst,
+             LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Load byte with sign extension (and unscaled offset).
+  void ldursb(const Register& rt, const MemOperand& src,
+              LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Load half-word (with unscaled offset).
+  void ldurh(const Register& rt, const MemOperand& src,
+             LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Store half-word (with unscaled offset).
+  void sturh(const Register& rt, const MemOperand& dst,
+             LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Load half-word with sign extension (and unscaled offset).
+  void ldursh(const Register& rt, const MemOperand& src,
+              LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Load integer or FP register pair.
+  void ldp(const CPURegister& rt, const CPURegister& rt2,
+           const MemOperand& src);
+
+  // Store integer or FP register pair.
+  void stp(const CPURegister& rt, const CPURegister& rt2,
+           const MemOperand& dst);
+
+  // Load word pair with sign extension.
+  void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+  // Load integer or FP register pair, non-temporal.
+  void ldnp(const CPURegister& rt, const CPURegister& rt2,
+            const MemOperand& src);
+
+  // Store integer or FP register pair, non-temporal.
+  void stnp(const CPURegister& rt, const CPURegister& rt2,
+            const MemOperand& dst);
+
+  // Load integer or FP register from literal pool.
+  void ldr(const CPURegister& rt, RawLiteral* literal);
+
+  // Load word with sign extension from literal pool.
+  void ldrsw(const Register& rt, RawLiteral* literal);
+
+  // Load integer or FP register from pc + imm19 << 2.
+  void ldr(const CPURegister& rt, int imm19);
+
+  // Load word with sign extension from pc + imm19 << 2.
+  void ldrsw(const Register& rt, int imm19);
+
+  // Store exclusive byte.
+  void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);
+
+  // Store exclusive half-word.
+  void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);
+
+  // Store exclusive register.
+  void stxr(const Register& rs, const Register& rt, const MemOperand& dst);
+
+  // Load exclusive byte.
+  void ldxrb(const Register& rt, const MemOperand& src);
+
+  // Load exclusive half-word.
+  void ldxrh(const Register& rt, const MemOperand& src);
+
+  // Load exclusive register.
+  void ldxr(const Register& rt, const MemOperand& src);
+
+  // Store exclusive register pair.
+  void stxp(const Register& rs,
+            const Register& rt,
+            const Register& rt2,
+            const MemOperand& dst);
+
+  // Load exclusive register pair.
+  void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);
+
+  // Store-release exclusive byte.
+  void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);
+
+  // Store-release exclusive half-word.
+  void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);
+
+  // Store-release exclusive register.
+  void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);
+
+  // Load-acquire exclusive byte.
+  void ldaxrb(const Register& rt, const MemOperand& src);
+
+  // Load-acquire exclusive half-word.
+  void ldaxrh(const Register& rt, const MemOperand& src);
+
+  // Load-acquire exclusive register.
+  void ldaxr(const Register& rt, const MemOperand& src);
+
+  // Store-release exclusive register pair.
+  void stlxp(const Register& rs,
+             const Register& rt,
+             const Register& rt2,
+             const MemOperand& dst);
+
+  // Load-acquire exclusive register pair.
+  void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);
+
+  // Store-release byte.
+  void stlrb(const Register& rt, const MemOperand& dst);
+
+  // Store-release half-word.
+  void stlrh(const Register& rt, const MemOperand& dst);
+
+  // Store-release register.
+  void stlr(const Register& rt, const MemOperand& dst);
+
+  // Load-acquire byte.
+  void ldarb(const Register& rt, const MemOperand& src);
+
+  // Load-acquire half-word.
+  void ldarh(const Register& rt, const MemOperand& src);
+
+  // Load-acquire register.
+  void ldar(const Register& rt, const MemOperand& src);
+
+  // Prefetch memory.
+  void prfm(PrefetchOperation op, const MemOperand& addr,
+            LoadStoreScalingOption option = PreferScaledOffset);
+
+  // Prefetch memory (with unscaled offset).
+  void prfum(PrefetchOperation op, const MemOperand& addr,
+             LoadStoreScalingOption option = PreferUnscaledOffset);
+
+  // Prefetch memory in the literal pool.
+  void prfm(PrefetchOperation op, RawLiteral* literal);
+
+  // Prefetch from pc + imm19 << 2.
+  void prfm(PrefetchOperation op, int imm19);
+
+  // Move instructions. The default shift of -1 indicates that the move
+  // instruction will calculate an appropriate 16-bit immediate and left shift
+  // that is equal to the 64-bit immediate argument. If an explicit left shift
+  // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+  //
+  // For movk, an explicit shift can be used to indicate which half word should
+  // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+  // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+  // most-significant.
+
+  // Move immediate and keep.
+  void movk(const Register& rd, uint64_t imm, int shift = -1) {
+    MoveWide(rd, imm, shift, MOVK);
+  }
+
+  // Move inverted immediate.
+  void movn(const Register& rd, uint64_t imm, int shift = -1) {
+    MoveWide(rd, imm, shift, MOVN);
+  }
+
+  // Move immediate.
+  void movz(const Register& rd, uint64_t imm, int shift = -1) {
+    MoveWide(rd, imm, shift, MOVZ);
+  }
+
+  // Misc instructions.
+  // Monitor debug-mode breakpoint.
+  void brk(int code);
+
+  // Halting debug-mode breakpoint.
+  void hlt(int code);
+
+  // Generate exception targeting EL1.
+  void svc(int code);
+
+  // Move register to register.
+  void mov(const Register& rd, const Register& rn);
+
+  // Move inverted operand to register.
+  void mvn(const Register& rd, const Operand& operand);
+
+  // System instructions.
+  // Move to register from system register.
+  void mrs(const Register& rt, SystemRegister sysreg);
+
+  // Move from register to system register.
+  void msr(SystemRegister sysreg, const Register& rt);
+
+  // System instruction.
+  void sys(int op1, int crn, int crm, int op2, const Register& rt = xzr);
+
+  // System instruction with pre-encoded op (op1:crn:crm:op2).
+  void sys(int op, const Register& rt = xzr);
+
+  // System data cache operation.
+  void dc(DataCacheOp op, const Register& rt);
+
+  // System instruction cache operation.
+  void ic(InstructionCacheOp op, const Register& rt);
+
+  // System hint.
+  void hint(SystemHint code);
+
+  // Clear exclusive monitor.
+  void clrex(int imm4 = 0xf);
+
+  // Data memory barrier.
+  void dmb(BarrierDomain domain, BarrierType type);
+
+  // Data synchronization barrier.
+  void dsb(BarrierDomain domain, BarrierType type);
+
+  // Instruction synchronization barrier.
+  void isb();
+
+  // Alias for system instructions.
+  // No-op.
+  void nop() {
+    hint(NOP);
+  }
+
+  // FP and NEON instructions.
+  // Move double precision immediate to FP register.
+  void fmov(const VRegister& vd, double imm);
+
+  // Move single precision immediate to FP register.
+  void fmov(const VRegister& vd, float imm);
+
+  // Move FP register to register.
+  void fmov(const Register& rd, const VRegister& fn);
+
+  // Move register to FP register.
+  void fmov(const VRegister& vd, const Register& rn);
+
+  // Move FP register to FP register.
+  void fmov(const VRegister& vd, const VRegister& fn);
+
+  // Move 64-bit register to top half of 128-bit FP register.
+  void fmov(const VRegister& vd, int index, const Register& rn);
+
+  // Move top half of 128-bit FP register to 64-bit register.
+  void fmov(const Register& rd, const VRegister& vn, int index);
+
+  // FP add.
+  void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+  // FP subtract.
+  void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+  // FP multiply.
+  void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+  // FP fused multiply-add.
+  void fmadd(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             const VRegister& va);
+
+  // FP fused multiply-subtract.
+  void fmsub(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             const VRegister& va);
+
+  // FP fused multiply-add and negate.
+  void fnmadd(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm,
+              const VRegister& va);
+
+  // FP fused multiply-subtract and negate.
+  void fnmsub(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm,
+              const VRegister& va);
+
+  // FP multiply-negate scalar.
+  void fnmul(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP reciprocal exponent scalar.
+  void frecpx(const VRegister& vd,
+              const VRegister& vn);
+
+  // FP divide.
+  void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+  // FP maximum.
+  void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+  // FP minimum.
+  void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+  // FP maximum number.
+  void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+  // FP minimum number.
+  void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+  // FP absolute.
+  void fabs(const VRegister& vd, const VRegister& vn);
+
+  // FP negate.
+  void fneg(const VRegister& vd, const VRegister& vn);
+
+  // FP square root.
+  void fsqrt(const VRegister& vd, const VRegister& vn);
+
+  // FP round to integer, nearest with ties to away.
+  void frinta(const VRegister& vd, const VRegister& vn);
+
+  // FP round to integer, implicit rounding.
+  void frinti(const VRegister& vd, const VRegister& vn);
+
+  // FP round to integer, toward minus infinity.
+  void frintm(const VRegister& vd, const VRegister& vn);
+
+  // FP round to integer, nearest with ties to even.
+  void frintn(const VRegister& vd, const VRegister& vn);
+
+  // FP round to integer, toward plus infinity.
+  void frintp(const VRegister& vd, const VRegister& vn);
+
+  // FP round to integer, exact, implicit rounding.
+  void frintx(const VRegister& vd, const VRegister& vn);
+
+  // FP round to integer, towards zero.
+  void frintz(const VRegister& vd, const VRegister& vn);
+
+  void FPCompareMacro(const VRegister& vn,
+                      double value,
+                      FPTrapFlags trap);
+
+  void FPCompareMacro(const VRegister& vn,
+                      const VRegister& vm,
+                      FPTrapFlags trap);
+
+  // FP compare registers.
+  void fcmp(const VRegister& vn, const VRegister& vm);
+
+  // FP compare immediate.
+  void fcmp(const VRegister& vn, double value);
+
+  void FPCCompareMacro(const VRegister& vn,
+                       const VRegister& vm,
+                       StatusFlags nzcv,
+                       Condition cond,
+                       FPTrapFlags trap);
+
+  // FP conditional compare.
+  void fccmp(const VRegister& vn,
+             const VRegister& vm,
+             StatusFlags nzcv,
+             Condition cond);
+
+  // FP signaling compare registers.
+  void fcmpe(const VRegister& vn, const VRegister& vm);
+
+  // FP signaling compare immediate.
+  void fcmpe(const VRegister& vn, double value);
+
+  // FP conditional signaling compare.
+  void fccmpe(const VRegister& vn,
+              const VRegister& vm,
+              StatusFlags nzcv,
+              Condition cond);
+
+  // FP conditional select.
+  void fcsel(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             Condition cond);
+
+  // Common FP Convert functions.
+  void NEONFPConvertToInt(const Register& rd,
+                          const VRegister& vn,
+                          Instr op);
+  void NEONFPConvertToInt(const VRegister& vd,
+                          const VRegister& vn,
+                          Instr op);
+
+  // FP convert between precisions.
+  void fcvt(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to higher precision.
+  void fcvtl(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to higher precision (second part).
+  void fcvtl2(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to lower precision.
+  void fcvtn(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to lower prevision (second part).
+  void fcvtn2(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to lower precision, rounding to odd.
+  void fcvtxn(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to lower precision, rounding to odd (second part).
+  void fcvtxn2(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to signed integer, nearest with ties to away.
+  void fcvtas(const Register& rd, const VRegister& vn);
+
+  // FP convert to unsigned integer, nearest with ties to away.
+  void fcvtau(const Register& rd, const VRegister& vn);
+
+  // FP convert to signed integer, nearest with ties to away.
+  void fcvtas(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to unsigned integer, nearest with ties to away.
+  void fcvtau(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to signed integer, round towards -infinity.
+  void fcvtms(const Register& rd, const VRegister& vn);
+
+  // FP convert to unsigned integer, round towards -infinity.
+  void fcvtmu(const Register& rd, const VRegister& vn);
+
+  // FP convert to signed integer, round towards -infinity.
+  void fcvtms(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to unsigned integer, round towards -infinity.
+  void fcvtmu(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to signed integer, nearest with ties to even.
+  void fcvtns(const Register& rd, const VRegister& vn);
+
+  // FP convert to unsigned integer, nearest with ties to even.
+  void fcvtnu(const Register& rd, const VRegister& vn);
+
+  // FP convert to signed integer, nearest with ties to even.
+  void fcvtns(const VRegister& rd, const VRegister& vn);
+
+  // FP convert to unsigned integer, nearest with ties to even.
+  void fcvtnu(const VRegister& rd, const VRegister& vn);
+
+  // FP convert to signed integer or fixed-point, round towards zero.
+  void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0);
+
+  // FP convert to unsigned integer or fixed-point, round towards zero.
+  void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0);
+
+  // FP convert to signed integer or fixed-point, round towards zero.
+  void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0);
+
+  // FP convert to unsigned integer or fixed-point, round towards zero.
+  void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0);
+
+  // FP convert to signed integer, round towards +infinity.
+  void fcvtps(const Register& rd, const VRegister& vn);
+
+  // FP convert to unsigned integer, round towards +infinity.
+  void fcvtpu(const Register& rd, const VRegister& vn);
+
+  // FP convert to signed integer, round towards +infinity.
+  void fcvtps(const VRegister& vd, const VRegister& vn);
+
+  // FP convert to unsigned integer, round towards +infinity.
+  void fcvtpu(const VRegister& vd, const VRegister& vn);
+
+  // Convert signed integer or fixed point to FP.
+  void scvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+  // Convert unsigned integer or fixed point to FP.
+  void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+  // Convert signed integer or fixed-point to FP.
+  void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+  // Convert unsigned integer or fixed-point to FP.
+  void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+  // Unsigned absolute difference.
+  void uabd(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Signed absolute difference.
+  void sabd(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Unsigned absolute difference and accumulate.
+  void uaba(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Signed absolute difference and accumulate.
+  void saba(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Add.
+  void add(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Subtract.
+  void sub(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Unsigned halving add.
+  void uhadd(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed halving add.
+  void shadd(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned rounding halving add.
+  void urhadd(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed rounding halving add.
+  void srhadd(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned halving sub.
+  void uhsub(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed halving sub.
+  void shsub(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned saturating add.
+  void uqadd(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed saturating add.
+  void sqadd(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned saturating subtract.
+  void uqsub(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed saturating subtract.
+  void sqsub(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Add pairwise.
+  void addp(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Add pair of elements scalar.
+  void addp(const VRegister& vd,
+            const VRegister& vn);
+
+  // Multiply-add to accumulator.
+  void mla(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Multiply-subtract to accumulator.
+  void mls(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Multiply.
+  void mul(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Multiply by scalar element.
+  void mul(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm,
+           int vm_index);
+
+  // Multiply-add by scalar element.
+  void mla(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm,
+           int vm_index);
+
+  // Multiply-subtract by scalar element.
+  void mls(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm,
+           int vm_index);
+
+  // Signed long multiply-add by scalar element.
+  void smlal(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             int vm_index);
+
+  // Signed long multiply-add by scalar element (second part).
+  void smlal2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm,
+              int vm_index);
+
+  // Unsigned long multiply-add by scalar element.
+  void umlal(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             int vm_index);
+
+  // Unsigned long multiply-add by scalar element (second part).
+  void umlal2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm,
+              int vm_index);
+
+  // Signed long multiply-sub by scalar element.
+  void smlsl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             int vm_index);
+
+  // Signed long multiply-sub by scalar element (second part).
+  void smlsl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm,
+              int vm_index);
+
+  // Unsigned long multiply-sub by scalar element.
+  void umlsl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             int vm_index);
+
+  // Unsigned long multiply-sub by scalar element (second part).
+  void umlsl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm,
+              int vm_index);
+
+  // Signed long multiply by scalar element.
+  void smull(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             int vm_index);
+
+  // Signed long multiply by scalar element (second part).
+  void smull2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm,
+              int vm_index);
+
+  // Unsigned long multiply by scalar element.
+  void umull(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             int vm_index);
+
+  // Unsigned long multiply by scalar element (second part).
+  void umull2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm,
+              int vm_index);
+
+  // Signed saturating double long multiply by element.
+  void sqdmull(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm,
+               int vm_index);
+
+  // Signed saturating double long multiply by element (second part).
+  void sqdmull2(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm,
+                int vm_index);
+
+  // Signed saturating doubling long multiply-add by element.
+  void sqdmlal(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm,
+               int vm_index);
+
+  // Signed saturating doubling long multiply-add by element (second part).
+  void sqdmlal2(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm,
+                int vm_index);
+
+  // Signed saturating doubling long multiply-sub by element.
+  void sqdmlsl(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm,
+               int vm_index);
+
+  // Signed saturating doubling long multiply-sub by element (second part).
+  void sqdmlsl2(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm,
+                int vm_index);
+
+  // Compare equal.
+  void cmeq(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Compare signed greater than or equal.
+  void cmge(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Compare signed greater than.
+  void cmgt(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Compare unsigned higher.
+  void cmhi(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Compare unsigned higher or same.
+  void cmhs(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Compare bitwise test bits nonzero.
+  void cmtst(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Compare bitwise to zero.
+  void cmeq(const VRegister& vd,
+            const VRegister& vn,
+            int value);
+
+  // Compare signed greater than or equal to zero.
+  void cmge(const VRegister& vd,
+            const VRegister& vn,
+            int value);
+
+  // Compare signed greater than zero.
+  void cmgt(const VRegister& vd,
+            const VRegister& vn,
+            int value);
+
+  // Compare signed less than or equal to zero.
+  void cmle(const VRegister& vd,
+            const VRegister& vn,
+            int value);
+
+  // Compare signed less than zero.
+  void cmlt(const VRegister& vd,
+            const VRegister& vn,
+            int value);
+
+  // Signed shift left by register.
+  void sshl(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Unsigned shift left by register.
+  void ushl(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Signed saturating shift left by register.
+  void sqshl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned saturating shift left by register.
+  void uqshl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed rounding shift left by register.
+  void srshl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned rounding shift left by register.
+  void urshl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed saturating rounding shift left by register.
+  void sqrshl(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned saturating rounding shift left by register.
+  void uqrshl(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Bitwise and.
+  void and_(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Bitwise or.
+  void orr(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Bitwise or immediate.
+  void orr(const VRegister& vd,
+           const int imm8,
+           const int left_shift = 0);
+
+  // Move register to register.
+  void mov(const VRegister& vd,
+           const VRegister& vn);
+
+  // Bitwise orn.
+  void orn(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Bitwise eor.
+  void eor(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Bit clear immediate.
+  void bic(const VRegister& vd,
+           const int imm8,
+           const int left_shift = 0);
+
+  // Bit clear.
+  void bic(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Bitwise insert if false.
+  void bif(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Bitwise insert if true.
+  void bit(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Bitwise select.
+  void bsl(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm);
+
+  // Polynomial multiply.
+  void pmul(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Vector move immediate.
+  void movi(const VRegister& vd,
+            const uint64_t imm,
+            Shift shift = LSL,
+            const int shift_amount = 0);
+
+  // Bitwise not.
+  void mvn(const VRegister& vd,
+           const VRegister& vn);
+
+  // Vector move inverted immediate.
+  void mvni(const VRegister& vd,
+            const int imm8,
+            Shift shift = LSL,
+            const int shift_amount = 0);
+
+  // Signed saturating accumulate of unsigned value.
+  void suqadd(const VRegister& vd,
+              const VRegister& vn);
+
+  // Unsigned saturating accumulate of signed value.
+  void usqadd(const VRegister& vd,
+              const VRegister& vn);
+
+  // Absolute value.
+  void abs(const VRegister& vd,
+           const VRegister& vn);
+
+  // Signed saturating absolute value.
+  void sqabs(const VRegister& vd,
+             const VRegister& vn);
+
+  // Negate.
+  void neg(const VRegister& vd,
+           const VRegister& vn);
+
+  // Signed saturating negate.
+  void sqneg(const VRegister& vd,
+             const VRegister& vn);
+
+  // Bitwise not.
+  void not_(const VRegister& vd,
+            const VRegister& vn);
+
+  // Extract narrow.
+  void xtn(const VRegister& vd,
+           const VRegister& vn);
+
+  // Extract narrow (second part).
+  void xtn2(const VRegister& vd,
+            const VRegister& vn);
+
+  // Signed saturating extract narrow.
+  void sqxtn(const VRegister& vd,
+             const VRegister& vn);
+
+  // Signed saturating extract narrow (second part).
+  void sqxtn2(const VRegister& vd,
+              const VRegister& vn);
+
+  // Unsigned saturating extract narrow.
+  void uqxtn(const VRegister& vd,
+             const VRegister& vn);
+
+  // Unsigned saturating extract narrow (second part).
+  void uqxtn2(const VRegister& vd,
+              const VRegister& vn);
+
+  // Signed saturating extract unsigned narrow.
+  void sqxtun(const VRegister& vd,
+              const VRegister& vn);
+
+  // Signed saturating extract unsigned narrow (second part).
+  void sqxtun2(const VRegister& vd,
+               const VRegister& vn);
+
+  // Extract vector from pair of vectors.
+  void ext(const VRegister& vd,
+           const VRegister& vn,
+           const VRegister& vm,
+           int index);
+
+  // Duplicate vector element to vector or scalar.
+  void dup(const VRegister& vd,
+           const VRegister& vn,
+           int vn_index);
+
+  // Move vector element to scalar.
+  void mov(const VRegister& vd,
+           const VRegister& vn,
+           int vn_index);
+
+  // Duplicate general-purpose register to vector.
+  void dup(const VRegister& vd,
+           const Register& rn);
+
+  // Insert vector element from another vector element.
+  void ins(const VRegister& vd,
+           int vd_index,
+           const VRegister& vn,
+           int vn_index);
+
+  // Move vector element to another vector element.
+  void mov(const VRegister& vd,
+           int vd_index,
+           const VRegister& vn,
+           int vn_index);
+
+  // Insert vector element from general-purpose register.
+  void ins(const VRegister& vd,
+           int vd_index,
+           const Register& rn);
+
+  // Move general-purpose register to a vector element.
+  void mov(const VRegister& vd,
+           int vd_index,
+           const Register& rn);
+
+  // Unsigned move vector element to general-purpose register.
+  void umov(const Register& rd,
+            const VRegister& vn,
+            int vn_index);
+
+  // Move vector element to general-purpose register.
+  void mov(const Register& rd,
+           const VRegister& vn,
+           int vn_index);
+
+  // Signed move vector element to general-purpose register.
+  void smov(const Register& rd,
+            const VRegister& vn,
+            int vn_index);
+
+  // One-element structure load to one register.
+  void ld1(const VRegister& vt,
+           const MemOperand& src);
+
+  // One-element structure load to two registers.
+  void ld1(const VRegister& vt,
+           const VRegister& vt2,
+           const MemOperand& src);
+
+  // One-element structure load to three registers.
+  void ld1(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const MemOperand& src);
+
+  // One-element structure load to four registers.
+  void ld1(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const VRegister& vt4,
+           const MemOperand& src);
+
+  // One-element single structure load to one lane.
+  void ld1(const VRegister& vt,
+           int lane,
+           const MemOperand& src);
+
+  // One-element single structure load to all lanes.
+  void ld1r(const VRegister& vt,
+            const MemOperand& src);
+
+  // Two-element structure load.
+  void ld2(const VRegister& vt,
+           const VRegister& vt2,
+           const MemOperand& src);
+
+  // Two-element single structure load to one lane.
+  void ld2(const VRegister& vt,
+           const VRegister& vt2,
+           int lane,
+           const MemOperand& src);
+
+  // Two-element single structure load to all lanes.
+  void ld2r(const VRegister& vt,
+            const VRegister& vt2,
+            const MemOperand& src);
+
+  // Three-element structure load.
+  void ld3(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const MemOperand& src);
+
+  // Three-element single structure load to one lane.
+  void ld3(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           int lane,
+           const MemOperand& src);
+
+  // Three-element single structure load to all lanes.
+  void ld3r(const VRegister& vt,
+            const VRegister& vt2,
+            const VRegister& vt3,
+            const MemOperand& src);
+
+  // Four-element structure load.
+  void ld4(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const VRegister& vt4,
+           const MemOperand& src);
+
+  // Four-element single structure load to one lane.
+  void ld4(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const VRegister& vt4,
+           int lane,
+           const MemOperand& src);
+
+  // Four-element single structure load to all lanes.
+  void ld4r(const VRegister& vt,
+            const VRegister& vt2,
+            const VRegister& vt3,
+            const VRegister& vt4,
+            const MemOperand& src);
+
+  // Count leading sign bits.
+  void cls(const VRegister& vd,
+           const VRegister& vn);
+
+  // Count leading zero bits (vector).
+  void clz(const VRegister& vd,
+           const VRegister& vn);
+
+  // Population count per byte.
+  void cnt(const VRegister& vd,
+           const VRegister& vn);
+
+  // Reverse bit order.
+  void rbit(const VRegister& vd,
+            const VRegister& vn);
+
+  // Reverse elements in 16-bit halfwords.
+  void rev16(const VRegister& vd,
+             const VRegister& vn);
+
+  // Reverse elements in 32-bit words.
+  void rev32(const VRegister& vd,
+             const VRegister& vn);
+
+  // Reverse elements in 64-bit doublewords.
+  void rev64(const VRegister& vd,
+             const VRegister& vn);
+
+  // Unsigned reciprocal square root estimate.
+  void ursqrte(const VRegister& vd,
+               const VRegister& vn);
+
+  // Unsigned reciprocal estimate.
+  void urecpe(const VRegister& vd,
+              const VRegister& vn);
+
+  // Signed pairwise long add.
+  void saddlp(const VRegister& vd,
+              const VRegister& vn);
+
+  // Unsigned pairwise long add.
+  void uaddlp(const VRegister& vd,
+              const VRegister& vn);
+
+  // Signed pairwise long add and accumulate.
+  void sadalp(const VRegister& vd,
+              const VRegister& vn);
+
+  // Unsigned pairwise long add and accumulate.
+  void uadalp(const VRegister& vd,
+              const VRegister& vn);
+
+  // Shift left by immediate.
+  void shl(const VRegister& vd,
+           const VRegister& vn,
+           int shift);
+
+  // Signed saturating shift left by immediate.
+  void sqshl(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Signed saturating shift left unsigned by immediate.
+  void sqshlu(const VRegister& vd,
+              const VRegister& vn,
+              int shift);
+
+  // Unsigned saturating shift left by immediate.
+  void uqshl(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Signed shift left long by immediate.
+  void sshll(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Signed shift left long by immediate (second part).
+  void sshll2(const VRegister& vd,
+              const VRegister& vn,
+              int shift);
+
+  // Signed extend long.
+  void sxtl(const VRegister& vd,
+            const VRegister& vn);
+
+  // Signed extend long (second part).
+  void sxtl2(const VRegister& vd,
+             const VRegister& vn);
+
+  // Unsigned shift left long by immediate.
+  void ushll(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Unsigned shift left long by immediate (second part).
+  void ushll2(const VRegister& vd,
+              const VRegister& vn,
+              int shift);
+
+  // Shift left long by element size.
+  void shll(const VRegister& vd,
+            const VRegister& vn,
+            int shift);
+
+  // Shift left long by element size (second part).
+  void shll2(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Unsigned extend long.
+  void uxtl(const VRegister& vd,
+            const VRegister& vn);
+
+  // Unsigned extend long (second part).
+  void uxtl2(const VRegister& vd,
+             const VRegister& vn);
+
+  // Shift left by immediate and insert.
+  void sli(const VRegister& vd,
+           const VRegister& vn,
+           int shift);
+
+  // Shift right by immediate and insert.
+  void sri(const VRegister& vd,
+           const VRegister& vn,
+           int shift);
+
+  // Signed maximum.
+  void smax(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Signed pairwise maximum.
+  void smaxp(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Add across vector.
+  void addv(const VRegister& vd,
+            const VRegister& vn);
+
+  // Signed add long across vector.
+  void saddlv(const VRegister& vd,
+              const VRegister& vn);
+
+  // Unsigned add long across vector.
+  void uaddlv(const VRegister& vd,
+              const VRegister& vn);
+
+  // FP maximum number across vector.
+  void fmaxnmv(const VRegister& vd,
+               const VRegister& vn);
+
+  // FP maximum across vector.
+  void fmaxv(const VRegister& vd,
+             const VRegister& vn);
+
+  // FP minimum number across vector.
+  void fminnmv(const VRegister& vd,
+               const VRegister& vn);
+
+  // FP minimum across vector.
+  void fminv(const VRegister& vd,
+             const VRegister& vn);
+
+  // Signed maximum across vector.
+  void smaxv(const VRegister& vd,
+             const VRegister& vn);
+
+  // Signed minimum.
+  void smin(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Signed minimum pairwise.
+  void sminp(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed minimum across vector.
+  void sminv(const VRegister& vd,
+             const VRegister& vn);
+
+  // One-element structure store from one register.
+  void st1(const VRegister& vt,
+           const MemOperand& src);
+
+  // One-element structure store from two registers.
+  void st1(const VRegister& vt,
+           const VRegister& vt2,
+           const MemOperand& src);
+
+  // One-element structure store from three registers.
+  void st1(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const MemOperand& src);
+
+  // One-element structure store from four registers.
+  void st1(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const VRegister& vt4,
+           const MemOperand& src);
+
+  // One-element single structure store from one lane.
+  void st1(const VRegister& vt,
+           int lane,
+           const MemOperand& src);
+
+  // Two-element structure store from two registers.
+  void st2(const VRegister& vt,
+           const VRegister& vt2,
+           const MemOperand& src);
+
+  // Two-element single structure store from two lanes.
+  void st2(const VRegister& vt,
+           const VRegister& vt2,
+           int lane,
+           const MemOperand& src);
+
+  // Three-element structure store from three registers.
+  void st3(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const MemOperand& src);
+
+  // Three-element single structure store from three lanes.
+  void st3(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           int lane,
+           const MemOperand& src);
+
+  // Four-element structure store from four registers.
+  void st4(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const VRegister& vt4,
+           const MemOperand& src);
+
+  // Four-element single structure store from four lanes.
+  void st4(const VRegister& vt,
+           const VRegister& vt2,
+           const VRegister& vt3,
+           const VRegister& vt4,
+           int lane,
+           const MemOperand& src);
+
+  // Unsigned add long.
+  void uaddl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned add long (second part).
+  void uaddl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned add wide.
+  void uaddw(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned add wide (second part).
+  void uaddw2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed add long.
+  void saddl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed add long (second part).
+  void saddl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed add wide.
+  void saddw(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed add wide (second part).
+  void saddw2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned subtract long.
+  void usubl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned subtract long (second part).
+  void usubl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned subtract wide.
+  void usubw(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned subtract wide (second part).
+  void usubw2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed subtract long.
+  void ssubl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed subtract long (second part).
+  void ssubl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed integer subtract wide.
+  void ssubw(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed integer subtract wide (second part).
+  void ssubw2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned maximum.
+  void umax(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Unsigned pairwise maximum.
+  void umaxp(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned maximum across vector.
+  void umaxv(const VRegister& vd,
+             const VRegister& vn);
+
+  // Unsigned minimum.
+  void umin(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Unsigned pairwise minimum.
+  void uminp(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned minimum across vector.
+  void uminv(const VRegister& vd,
+             const VRegister& vn);
+
+  // Transpose vectors (primary).
+  void trn1(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Transpose vectors (secondary).
+  void trn2(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Unzip vectors (primary).
+  void uzp1(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Unzip vectors (secondary).
+  void uzp2(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Zip vectors (primary).
+  void zip1(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Zip vectors (secondary).
+  void zip2(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // Signed shift right by immediate.
+  void sshr(const VRegister& vd,
+            const VRegister& vn,
+            int shift);
+
+  // Unsigned shift right by immediate.
+  void ushr(const VRegister& vd,
+            const VRegister& vn,
+            int shift);
+
+  // Signed rounding shift right by immediate.
+  void srshr(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Unsigned rounding shift right by immediate.
+  void urshr(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Signed shift right by immediate and accumulate.
+  void ssra(const VRegister& vd,
+            const VRegister& vn,
+            int shift);
+
+  // Unsigned shift right by immediate and accumulate.
+  void usra(const VRegister& vd,
+            const VRegister& vn,
+            int shift);
+
+  // Signed rounding shift right by immediate and accumulate.
+  void srsra(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Unsigned rounding shift right by immediate and accumulate.
+  void ursra(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Shift right narrow by immediate.
+  void shrn(const VRegister& vd,
+            const VRegister& vn,
+            int shift);
+
+  // Shift right narrow by immediate (second part).
+  void shrn2(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Rounding shift right narrow by immediate.
+  void rshrn(const VRegister& vd,
+             const VRegister& vn,
+             int shift);
+
+  // Rounding shift right narrow by immediate (second part).
+  void rshrn2(const VRegister& vd,
+              const VRegister& vn,
+              int shift);
+
+  // Unsigned saturating shift right narrow by immediate.
+  void uqshrn(const VRegister& vd,
+              const VRegister& vn,
+              int shift);
+
+  // Unsigned saturating shift right narrow by immediate (second part).
+  void uqshrn2(const VRegister& vd,
+               const VRegister& vn,
+               int shift);
+
+  // Unsigned saturating rounding shift right narrow by immediate.
+  void uqrshrn(const VRegister& vd,
+               const VRegister& vn,
+               int shift);
+
+  // Unsigned saturating rounding shift right narrow by immediate (second part).
+  void uqrshrn2(const VRegister& vd,
+                const VRegister& vn,
+                int shift);
+
+  // Signed saturating shift right narrow by immediate.
+  void sqshrn(const VRegister& vd,
+              const VRegister& vn,
+              int shift);
+
+  // Signed saturating shift right narrow by immediate (second part).
+  void sqshrn2(const VRegister& vd,
+               const VRegister& vn,
+               int shift);
+
+  // Signed saturating rounded shift right narrow by immediate.
+  void sqrshrn(const VRegister& vd,
+               const VRegister& vn,
+               int shift);
+
+  // Signed saturating rounded shift right narrow by immediate (second part).
+  void sqrshrn2(const VRegister& vd,
+                const VRegister& vn,
+                int shift);
+
+  // Signed saturating shift right unsigned narrow by immediate.
+  void sqshrun(const VRegister& vd,
+               const VRegister& vn,
+               int shift);
+
+  // Signed saturating shift right unsigned narrow by immediate (second part).
+  void sqshrun2(const VRegister& vd,
+                const VRegister& vn,
+                int shift);
+
+  // Signed sat rounded shift right unsigned narrow by immediate.
+  void sqrshrun(const VRegister& vd,
+                const VRegister& vn,
+                int shift);
+
+  // Signed sat rounded shift right unsigned narrow by immediate (second part).
+  void sqrshrun2(const VRegister& vd,
+                 const VRegister& vn,
+                 int shift);
+
+  // FP reciprocal step.
+  void frecps(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // FP reciprocal estimate.
+  void frecpe(const VRegister& vd,
+              const VRegister& vn);
+
+  // FP reciprocal square root estimate.
+  void frsqrte(const VRegister& vd,
+               const VRegister& vn);
+
+  // FP reciprocal square root step.
+  void frsqrts(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // Signed absolute difference and accumulate long.
+  void sabal(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed absolute difference and accumulate long (second part).
+  void sabal2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned absolute difference and accumulate long.
+  void uabal(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned absolute difference and accumulate long (second part).
+  void uabal2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed absolute difference long.
+  void sabdl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed absolute difference long (second part).
+  void sabdl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned absolute difference long.
+  void uabdl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned absolute difference long (second part).
+  void uabdl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Polynomial multiply long.
+  void pmull(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Polynomial multiply long (second part).
+  void pmull2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed long multiply-add.
+  void smlal(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed long multiply-add (second part).
+  void smlal2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned long multiply-add.
+  void umlal(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned long multiply-add (second part).
+  void umlal2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed long multiply-sub.
+  void smlsl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed long multiply-sub (second part).
+  void smlsl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Unsigned long multiply-sub.
+  void umlsl(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned long multiply-sub (second part).
+  void umlsl2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed long multiply.
+  void smull(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Signed long multiply (second part).
+  void smull2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Signed saturating doubling long multiply-add.
+  void sqdmlal(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // Signed saturating doubling long multiply-add (second part).
+  void sqdmlal2(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm);
+
+  // Signed saturating doubling long multiply-subtract.
+  void sqdmlsl(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // Signed saturating doubling long multiply-subtract (second part).
+  void sqdmlsl2(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm);
+
+  // Signed saturating doubling long multiply.
+  void sqdmull(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // Signed saturating doubling long multiply (second part).
+  void sqdmull2(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm);
+
+  // Signed saturating doubling multiply returning high half.
+  void sqdmulh(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // Signed saturating rounding doubling multiply returning high half.
+  void sqrdmulh(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm);
+
+  // Signed saturating doubling multiply element returning high half.
+  void sqdmulh(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm,
+               int vm_index);
+
+  // Signed saturating rounding doubling multiply element returning high half.
+  void sqrdmulh(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm,
+                int vm_index);
+
+  // Unsigned long multiply long.
+  void umull(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Unsigned long multiply (second part).
+  void umull2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Add narrow returning high half.
+  void addhn(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Add narrow returning high half (second part).
+  void addhn2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Rounding add narrow returning high half.
+  void raddhn(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Rounding add narrow returning high half (second part).
+  void raddhn2(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // Subtract narrow returning high half.
+  void subhn(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // Subtract narrow returning high half (second part).
+  void subhn2(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Rounding subtract narrow returning high half.
+  void rsubhn(const VRegister& vd,
+              const VRegister& vn,
+              const VRegister& vm);
+
+  // Rounding subtract narrow returning high half (second part).
+  void rsubhn2(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // FP vector multiply accumulate.
+  void fmla(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // FP vector multiply subtract.
+  void fmls(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // FP vector multiply extended.
+  void fmulx(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP absolute greater than or equal.
+  void facge(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP absolute greater than.
+  void facgt(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP multiply by element.
+  void fmul(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm,
+            int vm_index);
+
+  // FP fused multiply-add to accumulator by element.
+  void fmla(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm,
+            int vm_index);
+
+  // FP fused multiply-sub from accumulator by element.
+  void fmls(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm,
+            int vm_index);
+
+  // FP multiply extended by element.
+  void fmulx(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm,
+             int vm_index);
+
+  // FP compare equal.
+  void fcmeq(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP greater than.
+  void fcmgt(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP greater than or equal.
+  void fcmge(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP compare equal to zero.
+  void fcmeq(const VRegister& vd,
+             const VRegister& vn,
+             double imm);
+
+  // FP greater than zero.
+  void fcmgt(const VRegister& vd,
+             const VRegister& vn,
+             double imm);
+
+  // FP greater than or equal to zero.
+  void fcmge(const VRegister& vd,
+             const VRegister& vn,
+             double imm);
+
+  // FP less than or equal to zero.
+  void fcmle(const VRegister& vd,
+             const VRegister& vn,
+             double imm);
+
+  // FP less than to zero.
+  void fcmlt(const VRegister& vd,
+             const VRegister& vn,
+             double imm);
+
+  // FP absolute difference.
+  void fabd(const VRegister& vd,
+            const VRegister& vn,
+            const VRegister& vm);
+
+  // FP pairwise add vector.
+  void faddp(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP pairwise add scalar.
+  void faddp(const VRegister& vd,
+             const VRegister& vn);
+
+  // FP pairwise maximum vector.
+  void fmaxp(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP pairwise maximum scalar.
+  void fmaxp(const VRegister& vd,
+             const VRegister& vn);
+
+  // FP pairwise minimum vector.
+  void fminp(const VRegister& vd,
+             const VRegister& vn,
+             const VRegister& vm);
+
+  // FP pairwise minimum scalar.
+  void fminp(const VRegister& vd,
+             const VRegister& vn);
+
+  // FP pairwise maximum number vector.
+  void fmaxnmp(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // FP pairwise maximum number scalar.
+  void fmaxnmp(const VRegister& vd,
+               const VRegister& vn);
+
+  // FP pairwise minimum number vector.
+  void fminnmp(const VRegister& vd,
+               const VRegister& vn,
+               const VRegister& vm);
+
+  // FP pairwise minimum number scalar.
+  void fminnmp(const VRegister& vd,
+               const VRegister& vn);
+
+  // Emit generic instructions.
+  // Emit raw instructions into the instruction stream.
+  void dci(Instr raw_inst) { Emit(raw_inst); }
+
+  // Emit 32 bits of data into the instruction stream.
+  void dc32(uint32_t data) {
+    VIXL_ASSERT(buffer_monitor_ > 0);
+    buffer_->Emit32(data);
+  }
+
+  // Emit 64 bits of data into the instruction stream.
+  void dc64(uint64_t data) {
+    VIXL_ASSERT(buffer_monitor_ > 0);
+    buffer_->Emit64(data);
+  }
+
+  // Copy a string into the instruction stream, including the terminating NULL
+  // character. The instruction pointer is then aligned correctly for
+  // subsequent instructions.
+  void EmitString(const char * string) {
+    VIXL_ASSERT(string != NULL);
+    VIXL_ASSERT(buffer_monitor_ > 0);
+
+    buffer_->EmitString(string);
+    buffer_->Align();
+  }
+
+  // Code generation helpers.
+
+  // Register encoding.
+  static Instr Rd(CPURegister rd) {
+    VIXL_ASSERT(rd.code() != kSPRegInternalCode);
+    return rd.code() << Rd_offset;
+  }
+
+  static Instr Rn(CPURegister rn) {
+    VIXL_ASSERT(rn.code() != kSPRegInternalCode);
+    return rn.code() << Rn_offset;
+  }
+
+  static Instr Rm(CPURegister rm) {
+    VIXL_ASSERT(rm.code() != kSPRegInternalCode);
+    return rm.code() << Rm_offset;
+  }
+
+  static Instr RmNot31(CPURegister rm) {
+    VIXL_ASSERT(rm.code() != kSPRegInternalCode);
+    VIXL_ASSERT(!rm.IsZero());
+    return Rm(rm);
+  }
+
+  static Instr Ra(CPURegister ra) {
+    VIXL_ASSERT(ra.code() != kSPRegInternalCode);
+    return ra.code() << Ra_offset;
+  }
+
+  static Instr Rt(CPURegister rt) {
+    VIXL_ASSERT(rt.code() != kSPRegInternalCode);
+    return rt.code() << Rt_offset;
+  }
+
+  static Instr Rt2(CPURegister rt2) {
+    VIXL_ASSERT(rt2.code() != kSPRegInternalCode);
+    return rt2.code() << Rt2_offset;
+  }
+
+  static Instr Rs(CPURegister rs) {
+    VIXL_ASSERT(rs.code() != kSPRegInternalCode);
+    return rs.code() << Rs_offset;
+  }
+
+  // These encoding functions allow the stack pointer to be encoded, and
+  // disallow the zero register.
+  static Instr RdSP(Register rd) {
+    VIXL_ASSERT(!rd.IsZero());
+    return (rd.code() & kRegCodeMask) << Rd_offset;
+  }
+
+  static Instr RnSP(Register rn) {
+    VIXL_ASSERT(!rn.IsZero());
+    return (rn.code() & kRegCodeMask) << Rn_offset;
+  }
+
+  // Flags encoding.
+  static Instr Flags(FlagsUpdate S) {
+    if (S == SetFlags) {
+      return 1 << FlagsUpdate_offset;
+    } else if (S == LeaveFlags) {
+      return 0 << FlagsUpdate_offset;
+    }
+    VIXL_UNREACHABLE();
+    return 0;
+  }
+
+  static Instr Cond(Condition cond) {
+    return cond << Condition_offset;
+  }
+
+  // PC-relative address encoding.
+  static Instr ImmPCRelAddress(int imm21) {
+    VIXL_ASSERT(is_int21(imm21));
+    Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
+    Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+    Instr immlo = imm << ImmPCRelLo_offset;
+    return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+  }
+
+  // Branch encoding.
+  static Instr ImmUncondBranch(int imm26) {
+    VIXL_ASSERT(is_int26(imm26));
+    return truncate_to_int26(imm26) << ImmUncondBranch_offset;
+  }
+
+  static Instr ImmCondBranch(int imm19) {
+    VIXL_ASSERT(is_int19(imm19));
+    return truncate_to_int19(imm19) << ImmCondBranch_offset;
+  }
+
+  static Instr ImmCmpBranch(int imm19) {
+    VIXL_ASSERT(is_int19(imm19));
+    return truncate_to_int19(imm19) << ImmCmpBranch_offset;
+  }
+
+  static Instr ImmTestBranch(int imm14) {
+    VIXL_ASSERT(is_int14(imm14));
+    return truncate_to_int14(imm14) << ImmTestBranch_offset;
+  }
+
+  static Instr ImmTestBranchBit(unsigned bit_pos) {
+    VIXL_ASSERT(is_uint6(bit_pos));
+    // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+    unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+    unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+    b5 &= ImmTestBranchBit5_mask;
+    b40 &= ImmTestBranchBit40_mask;
+    return b5 | b40;
+  }
+
+  // Data Processing encoding.
+  static Instr SF(Register rd) {
+      return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+  }
+
+  static Instr ImmAddSub(int imm) {
+    VIXL_ASSERT(IsImmAddSub(imm));
+    if (is_uint12(imm)) {  // No shift required.
+      imm <<= ImmAddSub_offset;
+    } else {
+      imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+    }
+    return imm;
+  }
+
+  static Instr ImmS(unsigned imms, unsigned reg_size) {
+    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
+           ((reg_size == kWRegSize) && is_uint5(imms)));
+    USE(reg_size);
+    return imms << ImmS_offset;
+  }
+
+  static Instr ImmR(unsigned immr, unsigned reg_size) {
+    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+           ((reg_size == kWRegSize) && is_uint5(immr)));
+    USE(reg_size);
+    VIXL_ASSERT(is_uint6(immr));
+    return immr << ImmR_offset;
+  }
+
+  static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
+    VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+    VIXL_ASSERT(is_uint6(imms));
+    VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
+    USE(reg_size);
+    return imms << ImmSetBits_offset;
+  }
+
+  static Instr ImmRotate(unsigned immr, unsigned reg_size) {
+    VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+    VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
+           ((reg_size == kWRegSize) && is_uint5(immr)));
+    USE(reg_size);
+    return immr << ImmRotate_offset;
+  }
+
+  static Instr ImmLLiteral(int imm19) {
+    VIXL_ASSERT(is_int19(imm19));
+    return truncate_to_int19(imm19) << ImmLLiteral_offset;
+  }
+
+  static Instr BitN(unsigned bitn, unsigned reg_size) {
+    VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+    VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
+    USE(reg_size);
+    return bitn << BitN_offset;
+  }
+
+  static Instr ShiftDP(Shift shift) {
+    VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+    return shift << ShiftDP_offset;
+  }
+
+  static Instr ImmDPShift(unsigned amount) {
+    VIXL_ASSERT(is_uint6(amount));
+    return amount << ImmDPShift_offset;
+  }
+
+  static Instr ExtendMode(Extend extend) {
+    return extend << ExtendMode_offset;
+  }
+
+  static Instr ImmExtendShift(unsigned left_shift) {
+    VIXL_ASSERT(left_shift <= 4);
+    return left_shift << ImmExtendShift_offset;
+  }
+
+  static Instr ImmCondCmp(unsigned imm) {
+    VIXL_ASSERT(is_uint5(imm));
+    return imm << ImmCondCmp_offset;
+  }
+
+  static Instr Nzcv(StatusFlags nzcv) {
+    return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+  }
+
+  // MemOperand offset encoding.
+  static Instr ImmLSUnsigned(int imm12) {
+    VIXL_ASSERT(is_uint12(imm12));
+    return imm12 << ImmLSUnsigned_offset;
+  }
+
+  static Instr ImmLS(int imm9) {
+    VIXL_ASSERT(is_int9(imm9));
+    return truncate_to_int9(imm9) << ImmLS_offset;
+  }
+
+  static Instr ImmLSPair(int imm7, unsigned access_size) {
+    VIXL_ASSERT(((imm7 >> access_size) << access_size) == imm7);
+    int scaled_imm7 = imm7 >> access_size;
+    VIXL_ASSERT(is_int7(scaled_imm7));
+    return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
+  }
+
+  static Instr ImmShiftLS(unsigned shift_amount) {
+    VIXL_ASSERT(is_uint1(shift_amount));
+    return shift_amount << ImmShiftLS_offset;
+  }
+
+  static Instr ImmPrefetchOperation(int imm5) {
+    VIXL_ASSERT(is_uint5(imm5));
+    return imm5 << ImmPrefetchOperation_offset;
+  }
+
+  static Instr ImmException(int imm16) {
+    VIXL_ASSERT(is_uint16(imm16));
+    return imm16 << ImmException_offset;
+  }
+
+  static Instr ImmSystemRegister(int imm15) {
+    VIXL_ASSERT(is_uint15(imm15));
+    return imm15 << ImmSystemRegister_offset;
+  }
+
+  static Instr ImmHint(int imm7) {
+    VIXL_ASSERT(is_uint7(imm7));
+    return imm7 << ImmHint_offset;
+  }
+
+  static Instr CRm(int imm4) {
+    VIXL_ASSERT(is_uint4(imm4));
+    return imm4 << CRm_offset;
+  }
+
+  static Instr CRn(int imm4) {
+    VIXL_ASSERT(is_uint4(imm4));
+    return imm4 << CRn_offset;
+  }
+
+  static Instr SysOp(int imm14) {
+    VIXL_ASSERT(is_uint14(imm14));
+    return imm14 << SysOp_offset;
+  }
+
+  static Instr ImmSysOp1(int imm3) {
+    VIXL_ASSERT(is_uint3(imm3));
+    return imm3 << SysOp1_offset;
+  }
+
+  static Instr ImmSysOp2(int imm3) {
+    VIXL_ASSERT(is_uint3(imm3));
+    return imm3 << SysOp2_offset;
+  }
+
+  static Instr ImmBarrierDomain(int imm2) {
+    VIXL_ASSERT(is_uint2(imm2));
+    return imm2 << ImmBarrierDomain_offset;
+  }
+
+  static Instr ImmBarrierType(int imm2) {
+    VIXL_ASSERT(is_uint2(imm2));
+    return imm2 << ImmBarrierType_offset;
+  }
+
+  // Move immediates encoding.
+  static Instr ImmMoveWide(uint64_t imm) {
+    VIXL_ASSERT(is_uint16(imm));
+    return static_cast<Instr>(imm << ImmMoveWide_offset);
+  }
+
+  static Instr ShiftMoveWide(int64_t shift) {
+    VIXL_ASSERT(is_uint2(shift));
+    return static_cast<Instr>(shift << ShiftMoveWide_offset);
+  }
+
+  // FP Immediates.
+  static Instr ImmFP32(float imm);
+  static Instr ImmFP64(double imm);
+
+  // FP register type.
+  static Instr FPType(FPRegister fd) {
+    return fd.Is64Bits() ? FP64 : FP32;
+  }
+
+  static Instr FPScale(unsigned scale) {
+    VIXL_ASSERT(is_uint6(scale));
+    return scale << FPScale_offset;
+  }
+
+  // Immediate field checking helpers.
+  static bool IsImmAddSub(int64_t immediate);
+  static bool IsImmConditionalCompare(int64_t immediate);
+  static bool IsImmFP32(float imm);
+  static bool IsImmFP64(double imm);
+  static bool IsImmLogical(uint64_t value,
+                           unsigned width,
+                           unsigned* n = NULL,
+                           unsigned* imm_s = NULL,
+                           unsigned* imm_r = NULL);
+  static bool IsImmLSPair(int64_t offset, unsigned access_size);
+  static bool IsImmLSScaled(int64_t offset, unsigned access_size);
+  static bool IsImmLSUnscaled(int64_t offset);
+  static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+  static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+
+  // Instruction bits for vector format in data processing operations.
+  static Instr VFormat(VRegister vd) {
+    if (vd.Is64Bits()) {
+      switch (vd.lanes()) {
+        case 2: return NEON_2S;
+        case 4: return NEON_4H;
+        case 8: return NEON_8B;
+        default: return 0xffffffff;
+      }
+    } else {
+      VIXL_ASSERT(vd.Is128Bits());
+      switch (vd.lanes()) {
+        case 2: return NEON_2D;
+        case 4: return NEON_4S;
+        case 8: return NEON_8H;
+        case 16: return NEON_16B;
+        default: return 0xffffffff;
+      }
+    }
+  }
+
+  // Instruction bits for vector format in floating point data processing
+  // operations.
+  static Instr FPFormat(VRegister vd) {
+    if (vd.lanes() == 1) {
+      // Floating point scalar formats.
+      VIXL_ASSERT(vd.Is32Bits() || vd.Is64Bits());
+      return vd.Is64Bits() ? FP64 : FP32;
+    }
+
+    // Two lane floating point vector formats.
+    if (vd.lanes() == 2) {
+      VIXL_ASSERT(vd.Is64Bits() || vd.Is128Bits());
+      return vd.Is128Bits() ? NEON_FP_2D : NEON_FP_2S;
+    }
+
+    // Four lane floating point vector format.
+    VIXL_ASSERT((vd.lanes() == 4) && vd.Is128Bits());
+    return NEON_FP_4S;
+  }
+
+  // Instruction bits for vector format in load and store operations.
+  static Instr LSVFormat(VRegister vd) {
+    if (vd.Is64Bits()) {
+      switch (vd.lanes()) {
+        case 1: return LS_NEON_1D;
+        case 2: return LS_NEON_2S;
+        case 4: return LS_NEON_4H;
+        case 8: return LS_NEON_8B;
+        default: return 0xffffffff;
+      }
+    } else {
+      VIXL_ASSERT(vd.Is128Bits());
+      switch (vd.lanes()) {
+        case 2: return LS_NEON_2D;
+        case 4: return LS_NEON_4S;
+        case 8: return LS_NEON_8H;
+        case 16: return LS_NEON_16B;
+        default: return 0xffffffff;
+      }
+    }
+  }
+
+  // Instruction bits for scalar format in data processing operations.
+  static Instr SFormat(VRegister vd) {
+    VIXL_ASSERT(vd.lanes() == 1);
+    switch (vd.SizeInBytes()) {
+      case 1: return NEON_B;
+      case 2: return NEON_H;
+      case 4: return NEON_S;
+      case 8: return NEON_D;
+      default: return 0xffffffff;
+    }
+  }
+
+  static Instr ImmNEONHLM(int index, int num_bits) {
+    int h, l, m;
+    if (num_bits == 3) {
+      VIXL_ASSERT(is_uint3(index));
+      h  = (index >> 2) & 1;
+      l  = (index >> 1) & 1;
+      m  = (index >> 0) & 1;
+    } else if (num_bits == 2) {
+      VIXL_ASSERT(is_uint2(index));
+      h  = (index >> 1) & 1;
+      l  = (index >> 0) & 1;
+      m  = 0;
+    } else {
+      VIXL_ASSERT(is_uint1(index) && (num_bits == 1));
+      h  = (index >> 0) & 1;
+      l  = 0;
+      m  = 0;
+    }
+    return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset);
+  }
+
+  static Instr ImmNEONExt(int imm4) {
+    VIXL_ASSERT(is_uint4(imm4));
+    return imm4 << ImmNEONExt_offset;
+  }
+
+  static Instr ImmNEON5(Instr format, int index) {
+    VIXL_ASSERT(is_uint4(index));
+    int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+    int imm5 = (index << (s + 1)) | (1 << s);
+    return imm5 << ImmNEON5_offset;
+  }
+
+  static Instr ImmNEON4(Instr format, int index) {
+    VIXL_ASSERT(is_uint4(index));
+    int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+    int imm4 = index << s;
+    return imm4 << ImmNEON4_offset;
+  }
+
+  static Instr ImmNEONabcdefgh(int imm8) {
+    VIXL_ASSERT(is_uint8(imm8));
+    Instr instr;
+    instr  = ((imm8 >> 5) & 7) << ImmNEONabc_offset;
+    instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset;
+    return instr;
+  }
+
+  static Instr NEONCmode(int cmode) {
+    VIXL_ASSERT(is_uint4(cmode));
+    return cmode << NEONCmode_offset;
+  }
+
+  static Instr NEONModImmOp(int op) {
+    VIXL_ASSERT(is_uint1(op));
+    return op << NEONModImmOp_offset;
+  }
+
+  // Size of the code generated since label to the current position.
+  size_t SizeOfCodeGeneratedSince(Label* label) const {
+    VIXL_ASSERT(label->IsBound());
+    return buffer_->OffsetFrom(label->location());
+  }
+
+  size_t SizeOfCodeGenerated() const {
+    return buffer_->CursorOffset();
+  }
+
+  size_t BufferCapacity() const { return buffer_->capacity(); }
+
+  size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); }
+
+  void EnsureSpaceFor(size_t amount) {
+    if (buffer_->RemainingBytes() < amount) {
+      size_t capacity = buffer_->capacity();
+      size_t size = buffer_->CursorOffset();
+      do {
+        // TODO(all): refine.
+        capacity *= 2;
+      } while ((capacity - size) <  amount);
+      buffer_->Grow(capacity);
+    }
+  }
+
+#ifdef VIXL_DEBUG
+  void AcquireBuffer() {
+    VIXL_ASSERT(buffer_monitor_ >= 0);
+    buffer_monitor_++;
+  }
+
+  void ReleaseBuffer() {
+    buffer_monitor_--;
+    VIXL_ASSERT(buffer_monitor_ >= 0);
+  }
+#endif
+
+  PositionIndependentCodeOption pic() const {
+    return pic_;
+  }
+
+  bool AllowPageOffsetDependentCode() const {
+    return (pic() == PageOffsetDependentCode) ||
+           (pic() == PositionDependentCode);
+  }
+
+  static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
+    return reg.Is64Bits() ? xzr : wzr;
+  }
+
+
+ protected:
+  void LoadStore(const CPURegister& rt,
+                 const MemOperand& addr,
+                 LoadStoreOp op,
+                 LoadStoreScalingOption option = PreferScaledOffset);
+
+  void LoadStorePair(const CPURegister& rt,
+                     const CPURegister& rt2,
+                     const MemOperand& addr,
+                     LoadStorePairOp op);
+  void LoadStoreStruct(const VRegister& vt,
+                       const MemOperand& addr,
+                       NEONLoadStoreMultiStructOp op);
+  void LoadStoreStruct1(const VRegister& vt,
+                        int reg_count,
+                        const MemOperand& addr);
+  void LoadStoreStructSingle(const VRegister& vt,
+                             uint32_t lane,
+                             const MemOperand& addr,
+                             NEONLoadStoreSingleStructOp op);
+  void LoadStoreStructSingleAllLanes(const VRegister& vt,
+                                     const MemOperand& addr,
+                                     NEONLoadStoreSingleStructOp op);
+  void LoadStoreStructVerify(const VRegister& vt,
+                             const MemOperand& addr,
+                             Instr op);
+
+  void Prefetch(PrefetchOperation op,
+                const MemOperand& addr,
+                LoadStoreScalingOption option = PreferScaledOffset);
+
+  // TODO(all): The third parameter should be passed by reference but gcc 4.8.2
+  // reports a bogus uninitialised warning then.
+  void Logical(const Register& rd,
+               const Register& rn,
+               const Operand operand,
+               LogicalOp op);
+  void LogicalImmediate(const Register& rd,
+                        const Register& rn,
+                        unsigned n,
+                        unsigned imm_s,
+                        unsigned imm_r,
+                        LogicalOp op);
+
+  void ConditionalCompare(const Register& rn,
+                          const Operand& operand,
+                          StatusFlags nzcv,
+                          Condition cond,
+                          ConditionalCompareOp op);
+
+  void AddSubWithCarry(const Register& rd,
+                       const Register& rn,
+                       const Operand& operand,
+                       FlagsUpdate S,
+                       AddSubWithCarryOp op);
+
+
+  // Functions for emulating operands not directly supported by the instruction
+  // set.
+  void EmitShift(const Register& rd,
+                 const Register& rn,
+                 Shift shift,
+                 unsigned amount);
+  void EmitExtendShift(const Register& rd,
+                       const Register& rn,
+                       Extend extend,
+                       unsigned left_shift);
+
+  void AddSub(const Register& rd,
+              const Register& rn,
+              const Operand& operand,
+              FlagsUpdate S,
+              AddSubOp op);
+
+  void NEONTable(const VRegister& vd,
+                 const VRegister& vn,
+                 const VRegister& vm,
+                 NEONTableOp op);
+
+  // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+  // registers. Only simple loads are supported; sign- and zero-extension (such
+  // as in LDPSW_x or LDRB_w) are not supported.
+  static LoadStoreOp LoadOpFor(const CPURegister& rt);
+  static LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+                                       const CPURegister& rt2);
+  static LoadStoreOp StoreOpFor(const CPURegister& rt);
+  static LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+                                        const CPURegister& rt2);
+  static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+    const CPURegister& rt, const CPURegister& rt2);
+  static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+    const CPURegister& rt, const CPURegister& rt2);
+  static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
+
+
+ private:
+  static uint32_t FP32ToImm8(float imm);
+  static uint32_t FP64ToImm8(double imm);
+
+  // Instruction helpers.
+  void MoveWide(const Register& rd,
+                uint64_t imm,
+                int shift,
+                MoveWideImmediateOp mov_op);
+  void DataProcShiftedRegister(const Register& rd,
+                               const Register& rn,
+                               const Operand& operand,
+                               FlagsUpdate S,
+                               Instr op);
+  void DataProcExtendedRegister(const Register& rd,
+                                const Register& rn,
+                                const Operand& operand,
+                                FlagsUpdate S,
+                                Instr op);
+  void LoadStorePairNonTemporal(const CPURegister& rt,
+                                const CPURegister& rt2,
+                                const MemOperand& addr,
+                                LoadStorePairNonTemporalOp op);
+  void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op);
+  void ConditionalSelect(const Register& rd,
+                         const Register& rn,
+                         const Register& rm,
+                         Condition cond,
+                         ConditionalSelectOp op);
+  void DataProcessing1Source(const Register& rd,
+                             const Register& rn,
+                             DataProcessing1SourceOp op);
+  void DataProcessing3Source(const Register& rd,
+                             const Register& rn,
+                             const Register& rm,
+                             const Register& ra,
+                             DataProcessing3SourceOp op);
+  void FPDataProcessing1Source(const VRegister& fd,
+                               const VRegister& fn,
+                               FPDataProcessing1SourceOp op);
+  void FPDataProcessing3Source(const VRegister& fd,
+                               const VRegister& fn,
+                               const VRegister& fm,
+                               const VRegister& fa,
+                               FPDataProcessing3SourceOp op);
+  void NEONAcrossLanesL(const VRegister& vd,
+                        const VRegister& vn,
+                        NEONAcrossLanesOp op);
+  void NEONAcrossLanes(const VRegister& vd,
+                       const VRegister& vn,
+                       NEONAcrossLanesOp op);
+  void NEONModifiedImmShiftLsl(const VRegister& vd,
+                               const int imm8,
+                               const int left_shift,
+                               NEONModifiedImmediateOp op);
+  void NEONModifiedImmShiftMsl(const VRegister& vd,
+                               const int imm8,
+                               const int shift_amount,
+                               NEONModifiedImmediateOp op);
+  void NEONFP2Same(const VRegister& vd,
+                   const VRegister& vn,
+                   Instr vop);
+  void NEON3Same(const VRegister& vd,
+                 const VRegister& vn,
+                 const VRegister& vm,
+                 NEON3SameOp vop);
+  void NEONFP3Same(const VRegister& vd,
+                   const VRegister& vn,
+                   const VRegister& vm,
+                   Instr op);
+  void NEON3DifferentL(const VRegister& vd,
+                       const VRegister& vn,
+                       const VRegister& vm,
+                       NEON3DifferentOp vop);
+  void NEON3DifferentW(const VRegister& vd,
+                       const VRegister& vn,
+                       const VRegister& vm,
+                       NEON3DifferentOp vop);
+  void NEON3DifferentHN(const VRegister& vd,
+                        const VRegister& vn,
+                        const VRegister& vm,
+                        NEON3DifferentOp vop);
+  void NEONFP2RegMisc(const VRegister& vd,
+                      const VRegister& vn,
+                      NEON2RegMiscOp vop,
+                      double value = 0.0);
+  void NEON2RegMisc(const VRegister& vd,
+                    const VRegister& vn,
+                    NEON2RegMiscOp vop,
+                    int value = 0);
+  void NEONFP2RegMisc(const VRegister& vd,
+                      const VRegister& vn,
+                      Instr op);
+  void NEONAddlp(const VRegister& vd,
+                 const VRegister& vn,
+                 NEON2RegMiscOp op);
+  void NEONPerm(const VRegister& vd,
+                const VRegister& vn,
+                const VRegister& vm,
+                NEONPermOp op);
+  void NEONFPByElement(const VRegister& vd,
+                       const VRegister& vn,
+                       const VRegister& vm,
+                       int vm_index,
+                       NEONByIndexedElementOp op);
+  void NEONByElement(const VRegister& vd,
+                     const VRegister& vn,
+                     const VRegister& vm,
+                     int vm_index,
+                     NEONByIndexedElementOp op);
+  void NEONByElementL(const VRegister& vd,
+                      const VRegister& vn,
+                      const VRegister& vm,
+                      int vm_index,
+                      NEONByIndexedElementOp op);
+  void NEONShiftImmediate(const VRegister& vd,
+                          const VRegister& vn,
+                          NEONShiftImmediateOp op,
+                          int immh_immb);
+  void NEONShiftLeftImmediate(const VRegister& vd,
+                              const VRegister& vn,
+                              int shift,
+                              NEONShiftImmediateOp op);
+  void NEONShiftRightImmediate(const VRegister& vd,
+                               const VRegister& vn,
+                               int shift,
+                               NEONShiftImmediateOp op);
+  void NEONShiftImmediateL(const VRegister& vd,
+                           const VRegister& vn,
+                           int shift,
+                           NEONShiftImmediateOp op);
+  void NEONShiftImmediateN(const VRegister& vd,
+                           const VRegister& vn,
+                           int shift,
+                           NEONShiftImmediateOp op);
+  void NEONXtn(const VRegister& vd,
+               const VRegister& vn,
+               NEON2RegMiscOp vop);
+
+  Instr LoadStoreStructAddrModeField(const MemOperand& addr);
+
+  // Encode the specified MemOperand for the specified access size and scaling
+  // preference.
+  Instr LoadStoreMemOperand(const MemOperand& addr,
+                            unsigned access_size,
+                            LoadStoreScalingOption option);
+
+  // Link the current (not-yet-emitted) instruction to the specified label, then
+  // return an offset to be encoded in the instruction. If the label is not yet
+  // bound, an offset of 0 is returned.
+  ptrdiff_t LinkAndGetByteOffsetTo(Label * label);
+  ptrdiff_t LinkAndGetInstructionOffsetTo(Label * label);
+  ptrdiff_t LinkAndGetPageOffsetTo(Label * label);
+
+  // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
+  template <int element_shift>
+  ptrdiff_t LinkAndGetOffsetTo(Label* label);
+
+  // Literal load offset are in words (32-bit).
+  ptrdiff_t LinkAndGetWordOffsetTo(RawLiteral* literal);
+
+  // Emit the instruction in buffer_.
+  void Emit(Instr instruction) {
+    VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+    VIXL_ASSERT(buffer_monitor_ > 0);
+    buffer_->Emit32(instruction);
+  }
+
+  // Buffer where the code is emitted.
+  CodeBuffer* buffer_;
+  PositionIndependentCodeOption pic_;
+
+#ifdef VIXL_DEBUG
+  int64_t buffer_monitor_;
+#endif
+};
+
+
+// All Assembler emits MUST acquire/release the underlying code buffer. The
+// helper scope below will do so and optionally ensure the buffer is big enough
+// to receive the emit. It is possible to request the scope not to perform any
+// checks (kNoCheck) if for example it is known in advance the buffer size is
+// adequate or there is some other size checking mechanism in place.
+class CodeBufferCheckScope {
+ public:
+  // Tell whether or not the scope needs to ensure the associated CodeBuffer
+  // has enough space for the requested size.
+  enum CheckPolicy {
+    kNoCheck,
+    kCheck
+  };
+
+  // Tell whether or not the scope should assert the amount of code emitted
+  // within the scope is consistent with the requested amount.
+  enum AssertPolicy {
+    kNoAssert,    // No assert required.
+    kExactSize,   // The code emitted must be exactly size bytes.
+    kMaximumSize  // The code emitted must be at most size bytes.
+  };
+
+  CodeBufferCheckScope(Assembler* assm,
+                       size_t size,
+                       CheckPolicy check_policy = kCheck,
+                       AssertPolicy assert_policy = kMaximumSize)
+      : assm_(assm) {
+    if (check_policy == kCheck) assm->EnsureSpaceFor(size);
+#ifdef VIXL_DEBUG
+    assm->bind(&start_);
+    size_ = size;
+    assert_policy_ = assert_policy;
+    assm->AcquireBuffer();
+#else
+    USE(assert_policy);
+#endif
+  }
+
+  // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
+  explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) {
+#ifdef VIXL_DEBUG
+    size_ = 0;
+    assert_policy_ = kNoAssert;
+    assm->AcquireBuffer();
+#endif
+  }
+
+  ~CodeBufferCheckScope() {
+#ifdef VIXL_DEBUG
+    assm_->ReleaseBuffer();
+    switch (assert_policy_) {
+      case kNoAssert: break;
+      case kExactSize:
+        VIXL_ASSERT(assm_->SizeOfCodeGeneratedSince(&start_) == size_);
+        break;
+      case kMaximumSize:
+        VIXL_ASSERT(assm_->SizeOfCodeGeneratedSince(&start_) <= size_);
+        break;
+      default:
+        VIXL_UNREACHABLE();
+    }
+#endif
+  }
+
+ protected:
+  Assembler* assm_;
+#ifdef VIXL_DEBUG
+  Label start_;
+  size_t size_;
+  AssertPolicy assert_policy_;
+#endif
+};
+
+
+template <typename T>
+void Literal<T>::UpdateValue(T new_value, const Assembler* assembler) {
+  return UpdateValue(new_value, assembler->GetStartAddress<uint8_t*>());
+}
+
+
+template <typename T>
+void Literal<T>::UpdateValue(T high64, T low64, const Assembler* assembler) {
+  return UpdateValue(high64, low64, assembler->GetStartAddress<uint8_t*>());
+}
+
+
+}  // namespace vixl
+
+#endif  // VIXL_A64_ASSEMBLER_A64_H_
diff --git a/disas/libvixl/a64/constants-a64.h b/disas/libvixl/vixl/a64/constants-a64.h
index bc1a2c4b9b..2caa73af87 100644
--- a/disas/libvixl/a64/constants-a64.h
+++ b/disas/libvixl/vixl/a64/constants-a64.h
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2015, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,14 @@
 namespace vixl {
 
 const unsigned kNumberOfRegisters = 32;
-const unsigned kNumberOfFPRegisters = 32;
+const unsigned kNumberOfVRegisters = 32;
+const unsigned kNumberOfFPRegisters = kNumberOfVRegisters;
+// Callee saved registers are x21-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 10;
+const int kFirstCalleeSavedRegisterIndex = 21;
+// Callee saved FP registers are d8-d15.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
 
 #define REGISTER_CODE_LIST(R)                                                  \
 R(0)  R(1)  R(2)  R(3)  R(4)  R(5)  R(6)  R(7)                                 \
@@ -100,8 +107,10 @@ V_(FPScale, 15, 10, Bits)                                                      \
 V_(ImmLS, 20, 12, SignedBits)                                                  \
 V_(ImmLSUnsigned, 21, 10, Bits)                                                \
 V_(ImmLSPair, 21, 15, SignedBits)                                              \
-V_(SizeLS, 31, 30, Bits)                                                       \
 V_(ImmShiftLS, 12, 12, Bits)                                                   \
+V_(LSOpc, 23, 22, Bits)                                                        \
+V_(LSVector, 26, 26, Bits)                                                     \
+V_(LSSize, 31, 30, Bits)                                                       \
 V_(ImmPrefetchOperation, 4, 0, Bits)                                           \
 V_(PrefetchHint, 4, 3, Bits)                                                   \
 V_(PrefetchTarget, 2, 1, Bits)                                                 \
@@ -116,9 +125,10 @@ V_(ImmHint, 11, 5, Bits)                                                       \
 V_(ImmBarrierDomain, 11, 10, Bits)                                             \
 V_(ImmBarrierType, 9, 8, Bits)                                                 \
                                                                                \
-/* System (MRS, MSR) */                                                        \
+/* System (MRS, MSR, SYS) */                                                   \
 V_(ImmSystemRegister, 19, 5, Bits)                                             \
 V_(SysO0, 19, 19, Bits)                                                        \
+V_(SysOp, 18, 5, Bits)                                                         \
 V_(SysOp1, 18, 16, Bits)                                                       \
 V_(SysOp2, 7, 5, Bits)                                                         \
 V_(CRn, 15, 12, Bits)                                                          \
@@ -130,7 +140,29 @@ V_(LdStXNotExclusive, 23, 23, Bits)                                            \
 V_(LdStXAcquireRelease, 15, 15, Bits)                                          \
 V_(LdStXSizeLog2, 31, 30, Bits)                                                \
 V_(LdStXPair, 21, 21, Bits)                                                    \
-
+                                                                               \
+/* NEON generic fields */                                                      \
+V_(NEONQ, 30, 30, Bits)                                                        \
+V_(NEONSize, 23, 22, Bits)                                                     \
+V_(NEONLSSize, 11, 10, Bits)                                                   \
+V_(NEONS, 12, 12, Bits)                                                        \
+V_(NEONL, 21, 21, Bits)                                                        \
+V_(NEONM, 20, 20, Bits)                                                        \
+V_(NEONH, 11, 11, Bits)                                                        \
+V_(ImmNEONExt, 14, 11, Bits)                                                   \
+V_(ImmNEON5, 20, 16, Bits)                                                     \
+V_(ImmNEON4, 14, 11, Bits)                                                     \
+                                                                               \
+/* NEON Modified Immediate fields */                                           \
+V_(ImmNEONabc, 18, 16, Bits)                                                   \
+V_(ImmNEONdefgh, 9, 5, Bits)                                                   \
+V_(NEONModImmOp, 29, 29, Bits)                                                 \
+V_(NEONCmode, 15, 12, Bits)                                                    \
+                                                                               \
+/* NEON Shift Immediate fields */                                              \
+V_(ImmNEONImmhImmb, 22, 16, Bits)                                              \
+V_(ImmNEONImmh, 22, 19, Bits)                                                  \
+V_(ImmNEONImmb, 18, 16, Bits)
 
 #define SYSTEM_REGISTER_FIELDS_LIST(V_, M_)                                    \
 /* NZCV */                                                                     \
@@ -140,7 +172,6 @@ V_(Z, 30, 30, Bits)                                                            \
 V_(C, 29, 29, Bits)                                                            \
 V_(V, 28, 28, Bits)                                                            \
 M_(NZCV, Flags_mask)                                                           \
-                                                                               \
 /* FPCR */                                                                     \
 V_(AHP, 26, 26, Bits)                                                          \
 V_(DN, 25, 25, Bits)                                                           \
@@ -148,7 +179,6 @@ V_(FZ, 24, 24, Bits)                                                           \
 V_(RMode, 23, 22, Bits)                                                        \
 M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
 
-
 // Fields offsets.
 #define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X)                       \
 const int Name##_offset = LowBit;                                              \
@@ -166,22 +196,26 @@ const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
 
 // Condition codes.
 enum Condition {
-  eq = 0,
-  ne = 1,
-  hs = 2,
-  lo = 3,
-  mi = 4,
-  pl = 5,
-  vs = 6,
-  vc = 7,
-  hi = 8,
-  ls = 9,
-  ge = 10,
-  lt = 11,
-  gt = 12,
-  le = 13,
-  al = 14,
-  nv = 15  // Behaves as always/al.
+  eq = 0,   // Z set            Equal.
+  ne = 1,   // Z clear          Not equal.
+  cs = 2,   // C set            Carry set.
+  cc = 3,   // C clear          Carry clear.
+  mi = 4,   // N set            Negative.
+  pl = 5,   // N clear          Positive or zero.
+  vs = 6,   // V set            Overflow.
+  vc = 7,   // V clear          No overflow.
+  hi = 8,   // C set, Z clear   Unsigned higher.
+  ls = 9,   // C clear or Z set Unsigned lower or same.
+  ge = 10,  // N == V           Greater or equal.
+  lt = 11,  // N != V           Less than.
+  gt = 12,  // Z clear, N == V  Greater than.
+  le = 13,  // Z set or N != V  Less then or equal
+  al = 14,  //                  Always.
+  nv = 15,  // Behaves as always/al.
+
+  // Aliases.
+  hs = cs,  // C set            Unsigned higher or same.
+  lo = cc   // C clear          Unsigned lower.
 };
 
 inline Condition InvertCondition(Condition cond) {
@@ -191,6 +225,11 @@ inline Condition InvertCondition(Condition cond) {
   return static_cast<Condition>(cond ^ 1);
 }
 
+enum FPTrapFlags {
+  EnableTrap   = 1,
+  DisableTrap = 0
+};
+
 enum FlagsUpdate {
   SetFlags   = 1,
   LeaveFlags = 0
@@ -228,7 +267,8 @@ enum Shift {
   LSL = 0x0,
   LSR = 0x1,
   ASR = 0x2,
-  ROR = 0x3
+  ROR = 0x3,
+  MSL = 0x4
 };
 
 enum Extend {
@@ -305,6 +345,32 @@ enum SystemRegister {
           (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
 };
 
+enum InstructionCacheOp {
+  IVAU = ((0x3 << SysOp1_offset) |
+          (0x7 << CRn_offset) |
+          (0x5 << CRm_offset) |
+          (0x1 << SysOp2_offset)) >> SysOp_offset
+};
+
+enum DataCacheOp {
+  CVAC  = ((0x3 << SysOp1_offset) |
+           (0x7 << CRn_offset) |
+           (0xa << CRm_offset) |
+           (0x1 << SysOp2_offset)) >> SysOp_offset,
+  CVAU  = ((0x3 << SysOp1_offset) |
+           (0x7 << CRn_offset) |
+           (0xb << CRm_offset) |
+           (0x1 << SysOp2_offset)) >> SysOp_offset,
+  CIVAC = ((0x3 << SysOp1_offset) |
+           (0x7 << CRn_offset) |
+           (0xe << CRm_offset) |
+           (0x1 << SysOp2_offset)) >> SysOp_offset,
+  ZVA   = ((0x3 << SysOp1_offset) |
+           (0x7 << CRn_offset) |
+           (0x4 << CRm_offset) |
+           (0x1 << SysOp2_offset)) >> SysOp_offset
+};
+
 // Instruction enumerations.
 //
 // These are the masks that define a class of instructions, and the list of
@@ -333,6 +399,47 @@ enum GenericInstrField {
   FP64                 = 0x00400000
 };
 
+enum NEONFormatField {
+  NEONFormatFieldMask   = 0x40C00000,
+  NEON_Q                = 0x40000000,
+  NEON_8B               = 0x00000000,
+  NEON_16B              = NEON_8B | NEON_Q,
+  NEON_4H               = 0x00400000,
+  NEON_8H               = NEON_4H | NEON_Q,
+  NEON_2S               = 0x00800000,
+  NEON_4S               = NEON_2S | NEON_Q,
+  NEON_1D               = 0x00C00000,
+  NEON_2D               = 0x00C00000 | NEON_Q
+};
+
+enum NEONFPFormatField {
+  NEONFPFormatFieldMask = 0x40400000,
+  NEON_FP_2S            = FP32,
+  NEON_FP_4S            = FP32 | NEON_Q,
+  NEON_FP_2D            = FP64 | NEON_Q
+};
+
+enum NEONLSFormatField {
+  NEONLSFormatFieldMask = 0x40000C00,
+  LS_NEON_8B            = 0x00000000,
+  LS_NEON_16B           = LS_NEON_8B | NEON_Q,
+  LS_NEON_4H            = 0x00000400,
+  LS_NEON_8H            = LS_NEON_4H | NEON_Q,
+  LS_NEON_2S            = 0x00000800,
+  LS_NEON_4S            = LS_NEON_2S | NEON_Q,
+  LS_NEON_1D            = 0x00000C00,
+  LS_NEON_2D            = LS_NEON_1D | NEON_Q
+};
+
+enum NEONScalarFormatField {
+  NEONScalarFormatFieldMask = 0x00C00000,
+  NEONScalar                = 0x10000000,
+  NEON_B                    = 0x00000000,
+  NEON_H                    = 0x00400000,
+  NEON_S                    = 0x00800000,
+  NEON_D                    = 0x00C00000
+};
+
 // PC relative addressing.
 enum PCRelAddressingOp {
   PCRelAddressingFixed = 0x10000000,
@@ -588,6 +695,13 @@ enum SystemHintOp {
   HINT            = SystemHintFixed | 0x00000000
 };
 
+enum SystemSysOp {
+  SystemSysFixed  = 0xD5080000,
+  SystemSysFMask  = 0xFFF80000,
+  SystemSysMask   = 0xFFF80000,
+  SYS             = SystemSysFixed | 0x00000000
+};
+
 // Exception.
 enum ExceptionOp {
   ExceptionFixed = 0xD4000000,
@@ -640,7 +754,9 @@ enum LoadStorePairAnyOp {
   V(STP, s,   0x04000000),          \
   V(LDP, s,   0x04400000),          \
   V(STP, d,   0x44000000),          \
-  V(LDP, d,   0x44400000)
+  V(LDP, d,   0x44400000),          \
+  V(STP, q,   0x84000000),          \
+  V(LDP, q,   0x84400000)
 
 // Load/store pair (post, pre and offset.)
 enum LoadStorePairOp {
@@ -686,6 +802,7 @@ enum LoadStorePairNonTemporalOp {
   LoadStorePairNonTemporalFixed = 0x28000000,
   LoadStorePairNonTemporalFMask = 0x3B800000,
   LoadStorePairNonTemporalMask  = 0xFFC00000,
+  LoadStorePairNonTemporalLBit = 1 << 22,
   STNP_w = LoadStorePairNonTemporalFixed | STP_w,
   LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
   STNP_x = LoadStorePairNonTemporalFixed | STP_x,
@@ -693,7 +810,9 @@ enum LoadStorePairNonTemporalOp {
   STNP_s = LoadStorePairNonTemporalFixed | STP_s,
   LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
   STNP_d = LoadStorePairNonTemporalFixed | STP_d,
-  LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
+  LDNP_d = LoadStorePairNonTemporalFixed | LDP_d,
+  STNP_q = LoadStorePairNonTemporalFixed | STP_q,
+  LDNP_q = LoadStorePairNonTemporalFixed | LDP_q
 };
 
 // Load literal.
@@ -706,7 +825,8 @@ enum LoadLiteralOp {
   LDRSW_x_lit      = LoadLiteralFixed | 0x80000000,
   PRFM_lit         = LoadLiteralFixed | 0xC0000000,
   LDR_s_lit        = LoadLiteralFixed | 0x04000000,
-  LDR_d_lit        = LoadLiteralFixed | 0x44000000
+  LDR_d_lit        = LoadLiteralFixed | 0x44000000,
+  LDR_q_lit        = LoadLiteralFixed | 0x84000000
 };
 
 #define LOAD_STORE_OP_LIST(V)     \
@@ -723,15 +843,21 @@ enum LoadLiteralOp {
   V(LD, RSW, x, 0x80800000),  \
   V(LD, RSB, w, 0x00C00000),  \
   V(LD, RSH, w, 0x40C00000),  \
+  V(ST, R, b,   0x04000000),  \
+  V(ST, R, h,   0x44000000),  \
   V(ST, R, s,   0x84000000),  \
   V(ST, R, d,   0xC4000000),  \
+  V(ST, R, q,   0x04800000),  \
+  V(LD, R, b,   0x04400000),  \
+  V(LD, R, h,   0x44400000),  \
   V(LD, R, s,   0x84400000),  \
-  V(LD, R, d,   0xC4400000)
-
+  V(LD, R, d,   0xC4400000),  \
+  V(LD, R, q,   0x04C00000)
 
 // Load/store (post, pre, offset and unsigned.)
 enum LoadStoreOp {
-  LoadStoreOpMask = 0xC4C00000,
+  LoadStoreMask = 0xC4C00000,
+  LoadStoreVMask = 0x04000000,
   #define LOAD_STORE(A, B, C, D)  \
   A##B##_##C = D
   LOAD_STORE_OP_LIST(LOAD_STORE),
@@ -971,8 +1097,10 @@ enum FPCompareOp {
   FCMP_zero      = FCMP_s_zero,
   FCMPE_s        = FPCompareFixed | 0x00000010,
   FCMPE_d        = FPCompareFixed | FP64 | 0x00000010,
+  FCMPE          = FCMPE_s,
   FCMPE_s_zero   = FPCompareFixed | 0x00000018,
-  FCMPE_d_zero   = FPCompareFixed | FP64 | 0x00000018
+  FCMPE_d_zero   = FPCompareFixed | FP64 | 0x00000018,
+  FCMPE_zero     = FCMPE_s_zero
 };
 
 // Floating point conditional compare.
@@ -1026,6 +1154,10 @@ enum FPDataProcessing1SourceOp {
   FSQRT    = FSQRT_s,
   FCVT_ds  = FPDataProcessing1SourceFixed | 0x00028000,
   FCVT_sd  = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+  FCVT_hs  = FPDataProcessing1SourceFixed | 0x00038000,
+  FCVT_hd  = FPDataProcessing1SourceFixed | FP64 | 0x00038000,
+  FCVT_sh  = FPDataProcessing1SourceFixed | 0x00C20000,
+  FCVT_dh  = FPDataProcessing1SourceFixed | 0x00C28000,
   FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
   FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
   FRINTN   = FRINTN_s,
@@ -1166,7 +1298,9 @@ enum FPIntegerConvertOp {
   FMOV_ws   = FPIntegerConvertFixed | 0x00060000,
   FMOV_sw   = FPIntegerConvertFixed | 0x00070000,
   FMOV_xd   = FMOV_ws | SixtyFourBits | FP64,
-  FMOV_dx   = FMOV_sw | SixtyFourBits | FP64
+  FMOV_dx   = FMOV_sw | SixtyFourBits | FP64,
+  FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000,
+  FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000
 };
 
 // Conversion between fixed point and floating point.
@@ -1196,6 +1330,775 @@ enum FPFixedPointConvertOp {
   UCVTF_dx_fixed  = UCVTF_fixed | SixtyFourBits | FP64
 };
 
+// Crypto - two register SHA.
+enum Crypto2RegSHAOp {
+  Crypto2RegSHAFixed = 0x5E280800,
+  Crypto2RegSHAFMask = 0xFF3E0C00
+};
+
+// Crypto - three register SHA.
+enum Crypto3RegSHAOp {
+  Crypto3RegSHAFixed = 0x5E000000,
+  Crypto3RegSHAFMask = 0xFF208C00
+};
+
+// Crypto - AES.
+enum CryptoAESOp {
+  CryptoAESFixed = 0x4E280800,
+  CryptoAESFMask = 0xFF3E0C00
+};
+
+// NEON instructions with two register operands.
+enum NEON2RegMiscOp {
+  NEON2RegMiscFixed = 0x0E200800,
+  NEON2RegMiscFMask = 0x9F3E0C00,
+  NEON2RegMiscMask  = 0xBF3FFC00,
+  NEON2RegMiscUBit  = 0x20000000,
+  NEON_REV64     = NEON2RegMiscFixed | 0x00000000,
+  NEON_REV32     = NEON2RegMiscFixed | 0x20000000,
+  NEON_REV16     = NEON2RegMiscFixed | 0x00001000,
+  NEON_SADDLP    = NEON2RegMiscFixed | 0x00002000,
+  NEON_UADDLP    = NEON_SADDLP | NEON2RegMiscUBit,
+  NEON_SUQADD    = NEON2RegMiscFixed | 0x00003000,
+  NEON_USQADD    = NEON_SUQADD | NEON2RegMiscUBit,
+  NEON_CLS       = NEON2RegMiscFixed | 0x00004000,
+  NEON_CLZ       = NEON2RegMiscFixed | 0x20004000,
+  NEON_CNT       = NEON2RegMiscFixed | 0x00005000,
+  NEON_RBIT_NOT  = NEON2RegMiscFixed | 0x20005000,
+  NEON_SADALP    = NEON2RegMiscFixed | 0x00006000,
+  NEON_UADALP    = NEON_SADALP | NEON2RegMiscUBit,
+  NEON_SQABS     = NEON2RegMiscFixed | 0x00007000,
+  NEON_SQNEG     = NEON2RegMiscFixed | 0x20007000,
+  NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000,
+  NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000,
+  NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000,
+  NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000,
+  NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000,
+  NEON_ABS       = NEON2RegMiscFixed | 0x0000B000,
+  NEON_NEG       = NEON2RegMiscFixed | 0x2000B000,
+  NEON_XTN       = NEON2RegMiscFixed | 0x00012000,
+  NEON_SQXTUN    = NEON2RegMiscFixed | 0x20012000,
+  NEON_SHLL      = NEON2RegMiscFixed | 0x20013000,
+  NEON_SQXTN     = NEON2RegMiscFixed | 0x00014000,
+  NEON_UQXTN     = NEON_SQXTN | NEON2RegMiscUBit,
+
+  NEON2RegMiscOpcode = 0x0001F000,
+  NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode,
+  NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode,
+  NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode,
+  NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode,
+
+  // These instructions use only one bit of the size field. The other bit is
+  // used to distinguish between instructions.
+  NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000,
+  NEON_FABS   = NEON2RegMiscFixed | 0x0080F000,
+  NEON_FNEG   = NEON2RegMiscFixed | 0x2080F000,
+  NEON_FCVTN  = NEON2RegMiscFixed | 0x00016000,
+  NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000,
+  NEON_FCVTL  = NEON2RegMiscFixed | 0x00017000,
+  NEON_FRINTN = NEON2RegMiscFixed | 0x00018000,
+  NEON_FRINTA = NEON2RegMiscFixed | 0x20018000,
+  NEON_FRINTP = NEON2RegMiscFixed | 0x00818000,
+  NEON_FRINTM = NEON2RegMiscFixed | 0x00019000,
+  NEON_FRINTX = NEON2RegMiscFixed | 0x20019000,
+  NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000,
+  NEON_FRINTI = NEON2RegMiscFixed | 0x20819000,
+  NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000,
+  NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit,
+  NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000,
+  NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit,
+  NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000,
+  NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit,
+  NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000,
+  NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit,
+  NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000,
+  NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit,
+  NEON_FSQRT  = NEON2RegMiscFixed | 0x2081F000,
+  NEON_SCVTF  = NEON2RegMiscFixed | 0x0001D000,
+  NEON_UCVTF  = NEON_SCVTF | NEON2RegMiscUBit,
+  NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000,
+  NEON_URECPE  = NEON2RegMiscFixed | 0x0081C000,
+  NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000,
+  NEON_FRECPE  = NEON2RegMiscFixed | 0x0081D000,
+  NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000,
+  NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000,
+  NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000,
+  NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000,
+  NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000,
+
+  NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode,
+  NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode
+};
+
+// NEON instructions with three same-type operands.
+enum NEON3SameOp {
+  NEON3SameFixed = 0x0E200400,
+  NEON3SameFMask = 0x9F200400,
+  NEON3SameMask = 0xBF20FC00,
+  NEON3SameUBit = 0x20000000,
+  NEON_ADD    = NEON3SameFixed | 0x00008000,
+  NEON_ADDP   = NEON3SameFixed | 0x0000B800,
+  NEON_SHADD  = NEON3SameFixed | 0x00000000,
+  NEON_SHSUB  = NEON3SameFixed | 0x00002000,
+  NEON_SRHADD = NEON3SameFixed | 0x00001000,
+  NEON_CMEQ   = NEON3SameFixed | NEON3SameUBit | 0x00008800,
+  NEON_CMGE   = NEON3SameFixed | 0x00003800,
+  NEON_CMGT   = NEON3SameFixed | 0x00003000,
+  NEON_CMHI   = NEON3SameFixed | NEON3SameUBit | NEON_CMGT,
+  NEON_CMHS   = NEON3SameFixed | NEON3SameUBit | NEON_CMGE,
+  NEON_CMTST  = NEON3SameFixed | 0x00008800,
+  NEON_MLA    = NEON3SameFixed | 0x00009000,
+  NEON_MLS    = NEON3SameFixed | 0x20009000,
+  NEON_MUL    = NEON3SameFixed | 0x00009800,
+  NEON_PMUL   = NEON3SameFixed | 0x20009800,
+  NEON_SRSHL  = NEON3SameFixed | 0x00005000,
+  NEON_SQSHL  = NEON3SameFixed | 0x00004800,
+  NEON_SQRSHL = NEON3SameFixed | 0x00005800,
+  NEON_SSHL   = NEON3SameFixed | 0x00004000,
+  NEON_SMAX   = NEON3SameFixed | 0x00006000,
+  NEON_SMAXP  = NEON3SameFixed | 0x0000A000,
+  NEON_SMIN   = NEON3SameFixed | 0x00006800,
+  NEON_SMINP  = NEON3SameFixed | 0x0000A800,
+  NEON_SABD   = NEON3SameFixed | 0x00007000,
+  NEON_SABA   = NEON3SameFixed | 0x00007800,
+  NEON_UABD   = NEON3SameFixed | NEON3SameUBit | NEON_SABD,
+  NEON_UABA   = NEON3SameFixed | NEON3SameUBit | NEON_SABA,
+  NEON_SQADD  = NEON3SameFixed | 0x00000800,
+  NEON_SQSUB  = NEON3SameFixed | 0x00002800,
+  NEON_SUB    = NEON3SameFixed | NEON3SameUBit | 0x00008000,
+  NEON_UHADD  = NEON3SameFixed | NEON3SameUBit | NEON_SHADD,
+  NEON_UHSUB  = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB,
+  NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD,
+  NEON_UMAX   = NEON3SameFixed | NEON3SameUBit | NEON_SMAX,
+  NEON_UMAXP  = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP,
+  NEON_UMIN   = NEON3SameFixed | NEON3SameUBit | NEON_SMIN,
+  NEON_UMINP  = NEON3SameFixed | NEON3SameUBit | NEON_SMINP,
+  NEON_URSHL  = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL,
+  NEON_UQADD  = NEON3SameFixed | NEON3SameUBit | NEON_SQADD,
+  NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL,
+  NEON_UQSHL  = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL,
+  NEON_UQSUB  = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB,
+  NEON_USHL   = NEON3SameFixed | NEON3SameUBit | NEON_SSHL,
+  NEON_SQDMULH  = NEON3SameFixed | 0x0000B000,
+  NEON_SQRDMULH = NEON3SameFixed | 0x2000B000,
+
+  // NEON floating point instructions with three same-type operands.
+  NEON3SameFPFixed = NEON3SameFixed | 0x0000C000,
+  NEON3SameFPFMask = NEON3SameFMask | 0x0000C000,
+  NEON3SameFPMask = NEON3SameMask | 0x00800000,
+  NEON_FADD    = NEON3SameFixed | 0x0000D000,
+  NEON_FSUB    = NEON3SameFixed | 0x0080D000,
+  NEON_FMUL    = NEON3SameFixed | 0x2000D800,
+  NEON_FDIV    = NEON3SameFixed | 0x2000F800,
+  NEON_FMAX    = NEON3SameFixed | 0x0000F000,
+  NEON_FMAXNM  = NEON3SameFixed | 0x0000C000,
+  NEON_FMAXP   = NEON3SameFixed | 0x2000F000,
+  NEON_FMAXNMP = NEON3SameFixed | 0x2000C000,
+  NEON_FMIN    = NEON3SameFixed | 0x0080F000,
+  NEON_FMINNM  = NEON3SameFixed | 0x0080C000,
+  NEON_FMINP   = NEON3SameFixed | 0x2080F000,
+  NEON_FMINNMP = NEON3SameFixed | 0x2080C000,
+  NEON_FMLA    = NEON3SameFixed | 0x0000C800,
+  NEON_FMLS    = NEON3SameFixed | 0x0080C800,
+  NEON_FMULX   = NEON3SameFixed | 0x0000D800,
+  NEON_FRECPS  = NEON3SameFixed | 0x0000F800,
+  NEON_FRSQRTS = NEON3SameFixed | 0x0080F800,
+  NEON_FABD    = NEON3SameFixed | 0x2080D000,
+  NEON_FADDP   = NEON3SameFixed | 0x2000D000,
+  NEON_FCMEQ   = NEON3SameFixed | 0x0000E000,
+  NEON_FCMGE   = NEON3SameFixed | 0x2000E000,
+  NEON_FCMGT   = NEON3SameFixed | 0x2080E000,
+  NEON_FACGE   = NEON3SameFixed | 0x2000E800,
+  NEON_FACGT   = NEON3SameFixed | 0x2080E800,
+
+  // NEON logical instructions with three same-type operands.
+  NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800,
+  NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800,
+  NEON3SameLogicalMask = 0xBFE0FC00,
+  NEON3SameLogicalFormatMask = NEON_Q,
+  NEON_AND = NEON3SameLogicalFixed | 0x00000000,
+  NEON_ORR = NEON3SameLogicalFixed | 0x00A00000,
+  NEON_ORN = NEON3SameLogicalFixed | 0x00C00000,
+  NEON_EOR = NEON3SameLogicalFixed | 0x20000000,
+  NEON_BIC = NEON3SameLogicalFixed | 0x00400000,
+  NEON_BIF = NEON3SameLogicalFixed | 0x20C00000,
+  NEON_BIT = NEON3SameLogicalFixed | 0x20800000,
+  NEON_BSL = NEON3SameLogicalFixed | 0x20400000
+};
+
+// NEON instructions with three different-type operands.
+enum NEON3DifferentOp {
+  NEON3DifferentFixed = 0x0E200000,
+  NEON3DifferentFMask = 0x9F200C00,
+  NEON3DifferentMask  = 0xFF20FC00,
+  NEON_ADDHN    = NEON3DifferentFixed | 0x00004000,
+  NEON_ADDHN2   = NEON_ADDHN | NEON_Q,
+  NEON_PMULL    = NEON3DifferentFixed | 0x0000E000,
+  NEON_PMULL2   = NEON_PMULL | NEON_Q,
+  NEON_RADDHN   = NEON3DifferentFixed | 0x20004000,
+  NEON_RADDHN2  = NEON_RADDHN | NEON_Q,
+  NEON_RSUBHN   = NEON3DifferentFixed | 0x20006000,
+  NEON_RSUBHN2  = NEON_RSUBHN | NEON_Q,
+  NEON_SABAL    = NEON3DifferentFixed | 0x00005000,
+  NEON_SABAL2   = NEON_SABAL | NEON_Q,
+  NEON_SABDL    = NEON3DifferentFixed | 0x00007000,
+  NEON_SABDL2   = NEON_SABDL | NEON_Q,
+  NEON_SADDL    = NEON3DifferentFixed | 0x00000000,
+  NEON_SADDL2   = NEON_SADDL | NEON_Q,
+  NEON_SADDW    = NEON3DifferentFixed | 0x00001000,
+  NEON_SADDW2   = NEON_SADDW | NEON_Q,
+  NEON_SMLAL    = NEON3DifferentFixed | 0x00008000,
+  NEON_SMLAL2   = NEON_SMLAL | NEON_Q,
+  NEON_SMLSL    = NEON3DifferentFixed | 0x0000A000,
+  NEON_SMLSL2   = NEON_SMLSL | NEON_Q,
+  NEON_SMULL    = NEON3DifferentFixed | 0x0000C000,
+  NEON_SMULL2   = NEON_SMULL | NEON_Q,
+  NEON_SSUBL    = NEON3DifferentFixed | 0x00002000,
+  NEON_SSUBL2   = NEON_SSUBL | NEON_Q,
+  NEON_SSUBW    = NEON3DifferentFixed | 0x00003000,
+  NEON_SSUBW2   = NEON_SSUBW | NEON_Q,
+  NEON_SQDMLAL  = NEON3DifferentFixed | 0x00009000,
+  NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q,
+  NEON_SQDMLSL  = NEON3DifferentFixed | 0x0000B000,
+  NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q,
+  NEON_SQDMULL  = NEON3DifferentFixed | 0x0000D000,
+  NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q,
+  NEON_SUBHN    = NEON3DifferentFixed | 0x00006000,
+  NEON_SUBHN2   = NEON_SUBHN | NEON_Q,
+  NEON_UABAL    = NEON_SABAL | NEON3SameUBit,
+  NEON_UABAL2   = NEON_UABAL | NEON_Q,
+  NEON_UABDL    = NEON_SABDL | NEON3SameUBit,
+  NEON_UABDL2   = NEON_UABDL | NEON_Q,
+  NEON_UADDL    = NEON_SADDL | NEON3SameUBit,
+  NEON_UADDL2   = NEON_UADDL | NEON_Q,
+  NEON_UADDW    = NEON_SADDW | NEON3SameUBit,
+  NEON_UADDW2   = NEON_UADDW | NEON_Q,
+  NEON_UMLAL    = NEON_SMLAL | NEON3SameUBit,
+  NEON_UMLAL2   = NEON_UMLAL | NEON_Q,
+  NEON_UMLSL    = NEON_SMLSL | NEON3SameUBit,
+  NEON_UMLSL2   = NEON_UMLSL | NEON_Q,
+  NEON_UMULL    = NEON_SMULL | NEON3SameUBit,
+  NEON_UMULL2   = NEON_UMULL | NEON_Q,
+  NEON_USUBL    = NEON_SSUBL | NEON3SameUBit,
+  NEON_USUBL2   = NEON_USUBL | NEON_Q,
+  NEON_USUBW    = NEON_SSUBW | NEON3SameUBit,
+  NEON_USUBW2   = NEON_USUBW | NEON_Q
+};
+
+// NEON instructions operating across vectors.
+enum NEONAcrossLanesOp {
+  NEONAcrossLanesFixed = 0x0E300800,
+  NEONAcrossLanesFMask = 0x9F3E0C00,
+  NEONAcrossLanesMask  = 0xBF3FFC00,
+  NEON_ADDV   = NEONAcrossLanesFixed | 0x0001B000,
+  NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000,
+  NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000,
+  NEON_SMAXV  = NEONAcrossLanesFixed | 0x0000A000,
+  NEON_SMINV  = NEONAcrossLanesFixed | 0x0001A000,
+  NEON_UMAXV  = NEONAcrossLanesFixed | 0x2000A000,
+  NEON_UMINV  = NEONAcrossLanesFixed | 0x2001A000,
+
+  // NEON floating point across instructions.
+  NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x0000C000,
+  NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x0000C000,
+  NEONAcrossLanesFPMask  = NEONAcrossLanesMask  | 0x00800000,
+
+  NEON_FMAXV   = NEONAcrossLanesFPFixed | 0x2000F000,
+  NEON_FMINV   = NEONAcrossLanesFPFixed | 0x2080F000,
+  NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000,
+  NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000
+};
+
+// NEON instructions with indexed element operand.
+enum NEONByIndexedElementOp {
+  NEONByIndexedElementFixed = 0x0F000000,
+  NEONByIndexedElementFMask = 0x9F000400,
+  NEONByIndexedElementMask  = 0xBF00F400,
+  NEON_MUL_byelement   = NEONByIndexedElementFixed | 0x00008000,
+  NEON_MLA_byelement   = NEONByIndexedElementFixed | 0x20000000,
+  NEON_MLS_byelement   = NEONByIndexedElementFixed | 0x20004000,
+  NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000,
+  NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000,
+  NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000,
+  NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000,
+  NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000,
+  NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000,
+  NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000,
+  NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000,
+  NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000,
+  NEON_SQDMULH_byelement  = NEONByIndexedElementFixed | 0x0000C000,
+  NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000,
+
+  // Floating point instructions.
+  NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000,
+  NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000,
+  NEON_FMLA_byelement  = NEONByIndexedElementFPFixed | 0x00001000,
+  NEON_FMLS_byelement  = NEONByIndexedElementFPFixed | 0x00005000,
+  NEON_FMUL_byelement  = NEONByIndexedElementFPFixed | 0x00009000,
+  NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000
+};
+
+// NEON register copy.
+enum NEONCopyOp {
+  NEONCopyFixed = 0x0E000400,
+  NEONCopyFMask = 0x9FE08400,
+  NEONCopyMask  = 0x3FE08400,
+  NEONCopyInsElementMask = NEONCopyMask | 0x40000000,
+  NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800,
+  NEONCopyDupElementMask = NEONCopyMask | 0x20007800,
+  NEONCopyDupGeneralMask = NEONCopyDupElementMask,
+  NEONCopyUmovMask       = NEONCopyMask | 0x20007800,
+  NEONCopySmovMask       = NEONCopyMask | 0x20007800,
+  NEON_INS_ELEMENT       = NEONCopyFixed | 0x60000000,
+  NEON_INS_GENERAL       = NEONCopyFixed | 0x40001800,
+  NEON_DUP_ELEMENT       = NEONCopyFixed | 0x00000000,
+  NEON_DUP_GENERAL       = NEONCopyFixed | 0x00000800,
+  NEON_SMOV              = NEONCopyFixed | 0x00002800,
+  NEON_UMOV              = NEONCopyFixed | 0x00003800
+};
+
+// NEON extract.
+enum NEONExtractOp {
+  NEONExtractFixed = 0x2E000000,
+  NEONExtractFMask = 0xBF208400,
+  NEONExtractMask = 0xBFE08400,
+  NEON_EXT = NEONExtractFixed | 0x00000000
+};
+
+enum NEONLoadStoreMultiOp {
+  NEONLoadStoreMultiL    = 0x00400000,
+  NEONLoadStoreMulti1_1v = 0x00007000,
+  NEONLoadStoreMulti1_2v = 0x0000A000,
+  NEONLoadStoreMulti1_3v = 0x00006000,
+  NEONLoadStoreMulti1_4v = 0x00002000,
+  NEONLoadStoreMulti2    = 0x00008000,
+  NEONLoadStoreMulti3    = 0x00004000,
+  NEONLoadStoreMulti4    = 0x00000000
+};
+
+// NEON load/store multiple structures.
+enum NEONLoadStoreMultiStructOp {
+  NEONLoadStoreMultiStructFixed = 0x0C000000,
+  NEONLoadStoreMultiStructFMask = 0xBFBF0000,
+  NEONLoadStoreMultiStructMask  = 0xBFFFF000,
+  NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed,
+  NEONLoadStoreMultiStructLoad  = NEONLoadStoreMultiStructFixed |
+                                  NEONLoadStoreMultiL,
+  NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v,
+  NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v,
+  NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v,
+  NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v,
+  NEON_LD2    = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2,
+  NEON_LD3    = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3,
+  NEON_LD4    = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4,
+  NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v,
+  NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v,
+  NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v,
+  NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v,
+  NEON_ST2    = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2,
+  NEON_ST3    = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3,
+  NEON_ST4    = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4
+};
+
+// NEON load/store multiple structures with post-index addressing.
+enum NEONLoadStoreMultiStructPostIndexOp {
+  NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000,
+  NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000,
+  NEONLoadStoreMultiStructPostIndexMask  = 0xBFE0F000,
+  NEONLoadStoreMultiStructPostIndex = 0x00800000,
+  NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex,
+  NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex,
+  NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex,
+  NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex,
+  NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex,
+  NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex,
+  NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex,
+  NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex,
+  NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex,
+  NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex,
+  NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex,
+  NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex,
+  NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex,
+  NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex
+};
+
+enum NEONLoadStoreSingleOp {
+  NEONLoadStoreSingle1        = 0x00000000,
+  NEONLoadStoreSingle2        = 0x00200000,
+  NEONLoadStoreSingle3        = 0x00002000,
+  NEONLoadStoreSingle4        = 0x00202000,
+  NEONLoadStoreSingleL        = 0x00400000,
+  NEONLoadStoreSingle_b       = 0x00000000,
+  NEONLoadStoreSingle_h       = 0x00004000,
+  NEONLoadStoreSingle_s       = 0x00008000,
+  NEONLoadStoreSingle_d       = 0x00008400,
+  NEONLoadStoreSingleAllLanes = 0x0000C000,
+  NEONLoadStoreSingleLenMask  = 0x00202000
+};
+
+// NEON load/store single structure.
+enum NEONLoadStoreSingleStructOp {
+  NEONLoadStoreSingleStructFixed = 0x0D000000,
+  NEONLoadStoreSingleStructFMask = 0xBF9F0000,
+  NEONLoadStoreSingleStructMask  = 0xBFFFE000,
+  NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed,
+  NEONLoadStoreSingleStructLoad  = NEONLoadStoreSingleStructFixed |
+                                   NEONLoadStoreSingleL,
+  NEONLoadStoreSingleStructLoad1 = NEONLoadStoreSingle1 |
+                                   NEONLoadStoreSingleStructLoad,
+  NEONLoadStoreSingleStructLoad2 = NEONLoadStoreSingle2 |
+                                   NEONLoadStoreSingleStructLoad,
+  NEONLoadStoreSingleStructLoad3 = NEONLoadStoreSingle3 |
+                                   NEONLoadStoreSingleStructLoad,
+  NEONLoadStoreSingleStructLoad4 = NEONLoadStoreSingle4 |
+                                   NEONLoadStoreSingleStructLoad,
+  NEONLoadStoreSingleStructStore1 = NEONLoadStoreSingle1 |
+                                    NEONLoadStoreSingleStructFixed,
+  NEONLoadStoreSingleStructStore2 = NEONLoadStoreSingle2 |
+                                    NEONLoadStoreSingleStructFixed,
+  NEONLoadStoreSingleStructStore3 = NEONLoadStoreSingle3 |
+                                    NEONLoadStoreSingleStructFixed,
+  NEONLoadStoreSingleStructStore4 = NEONLoadStoreSingle4 |
+                                    NEONLoadStoreSingleStructFixed,
+  NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b,
+  NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h,
+  NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s,
+  NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d,
+  NEON_LD1R  = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes,
+  NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b,
+  NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h,
+  NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s,
+  NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d,
+
+  NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b,
+  NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h,
+  NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s,
+  NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d,
+  NEON_LD2R  = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes,
+  NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b,
+  NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h,
+  NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s,
+  NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d,
+
+  NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b,
+  NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h,
+  NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s,
+  NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d,
+  NEON_LD3R  = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes,
+  NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b,
+  NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h,
+  NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s,
+  NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d,
+
+  NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b,
+  NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h,
+  NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s,
+  NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d,
+  NEON_LD4R  = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes,
+  NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b,
+  NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h,
+  NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s,
+  NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d
+};
+
+// NEON load/store single structure with post-index addressing.
+enum NEONLoadStoreSingleStructPostIndexOp {
+  NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000,
+  NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000,
+  NEONLoadStoreSingleStructPostIndexMask  = 0xBFE0E000,
+  NEONLoadStoreSingleStructPostIndex = 0x00800000,
+  NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD1R_post  = NEON_LD1R | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex,
+
+  NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD2R_post  = NEON_LD2R | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex,
+
+  NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD3R_post  = NEON_LD3R | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex,
+
+  NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex,
+  NEON_LD4R_post  = NEON_LD4R | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex,
+  NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex
+};
+
+// NEON modified immediate.
+enum NEONModifiedImmediateOp {
+  NEONModifiedImmediateFixed = 0x0F000400,
+  NEONModifiedImmediateFMask = 0x9FF80400,
+  NEONModifiedImmediateOpBit = 0x20000000,
+  NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000,
+  NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000,
+  NEONModifiedImmediate_ORR  = NEONModifiedImmediateFixed | 0x00001000,
+  NEONModifiedImmediate_BIC  = NEONModifiedImmediateFixed | 0x20001000
+};
+
+// NEON shift immediate.
+enum NEONShiftImmediateOp {
+  NEONShiftImmediateFixed = 0x0F000400,
+  NEONShiftImmediateFMask = 0x9F800400,
+  NEONShiftImmediateMask  = 0xBF80FC00,
+  NEONShiftImmediateUBit  = 0x20000000,
+  NEON_SHL      = NEONShiftImmediateFixed | 0x00005000,
+  NEON_SSHLL    = NEONShiftImmediateFixed | 0x0000A000,
+  NEON_USHLL    = NEONShiftImmediateFixed | 0x2000A000,
+  NEON_SLI      = NEONShiftImmediateFixed | 0x20005000,
+  NEON_SRI      = NEONShiftImmediateFixed | 0x20004000,
+  NEON_SHRN     = NEONShiftImmediateFixed | 0x00008000,
+  NEON_RSHRN    = NEONShiftImmediateFixed | 0x00008800,
+  NEON_UQSHRN   = NEONShiftImmediateFixed | 0x20009000,
+  NEON_UQRSHRN  = NEONShiftImmediateFixed | 0x20009800,
+  NEON_SQSHRN   = NEONShiftImmediateFixed | 0x00009000,
+  NEON_SQRSHRN  = NEONShiftImmediateFixed | 0x00009800,
+  NEON_SQSHRUN  = NEONShiftImmediateFixed | 0x20008000,
+  NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800,
+  NEON_SSHR     = NEONShiftImmediateFixed | 0x00000000,
+  NEON_SRSHR    = NEONShiftImmediateFixed | 0x00002000,
+  NEON_USHR     = NEONShiftImmediateFixed | 0x20000000,
+  NEON_URSHR    = NEONShiftImmediateFixed | 0x20002000,
+  NEON_SSRA     = NEONShiftImmediateFixed | 0x00001000,
+  NEON_SRSRA    = NEONShiftImmediateFixed | 0x00003000,
+  NEON_USRA     = NEONShiftImmediateFixed | 0x20001000,
+  NEON_URSRA    = NEONShiftImmediateFixed | 0x20003000,
+  NEON_SQSHLU   = NEONShiftImmediateFixed | 0x20006000,
+  NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000,
+  NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000,
+  NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800,
+  NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800,
+  NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000,
+  NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000
+};
+
+// NEON table.
+enum NEONTableOp {
+  NEONTableFixed = 0x0E000000,
+  NEONTableFMask = 0xBF208C00,
+  NEONTableExt   = 0x00001000,
+  NEONTableMask  = 0xBF20FC00,
+  NEON_TBL_1v    = NEONTableFixed | 0x00000000,
+  NEON_TBL_2v    = NEONTableFixed | 0x00002000,
+  NEON_TBL_3v    = NEONTableFixed | 0x00004000,
+  NEON_TBL_4v    = NEONTableFixed | 0x00006000,
+  NEON_TBX_1v    = NEON_TBL_1v | NEONTableExt,
+  NEON_TBX_2v    = NEON_TBL_2v | NEONTableExt,
+  NEON_TBX_3v    = NEON_TBL_3v | NEONTableExt,
+  NEON_TBX_4v    = NEON_TBL_4v | NEONTableExt
+};
+
+// NEON perm.
+enum NEONPermOp {
+  NEONPermFixed = 0x0E000800,
+  NEONPermFMask = 0xBF208C00,
+  NEONPermMask  = 0x3F20FC00,
+  NEON_UZP1 = NEONPermFixed | 0x00001000,
+  NEON_TRN1 = NEONPermFixed | 0x00002000,
+  NEON_ZIP1 = NEONPermFixed | 0x00003000,
+  NEON_UZP2 = NEONPermFixed | 0x00005000,
+  NEON_TRN2 = NEONPermFixed | 0x00006000,
+  NEON_ZIP2 = NEONPermFixed | 0x00007000
+};
+
+// NEON scalar instructions with two register operands.
+enum NEONScalar2RegMiscOp {
+  NEONScalar2RegMiscFixed = 0x5E200800,
+  NEONScalar2RegMiscFMask = 0xDF3E0C00,
+  NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask,
+  NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero,
+  NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero,
+  NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero,
+  NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero,
+  NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero,
+  NEON_ABS_scalar       = NEON_Q | NEONScalar | NEON_ABS,
+  NEON_SQABS_scalar     = NEON_Q | NEONScalar | NEON_SQABS,
+  NEON_NEG_scalar       = NEON_Q | NEONScalar | NEON_NEG,
+  NEON_SQNEG_scalar     = NEON_Q | NEONScalar | NEON_SQNEG,
+  NEON_SQXTN_scalar     = NEON_Q | NEONScalar | NEON_SQXTN,
+  NEON_UQXTN_scalar     = NEON_Q | NEONScalar | NEON_UQXTN,
+  NEON_SQXTUN_scalar    = NEON_Q | NEONScalar | NEON_SQXTUN,
+  NEON_SUQADD_scalar    = NEON_Q | NEONScalar | NEON_SUQADD,
+  NEON_USQADD_scalar    = NEON_Q | NEONScalar | NEON_USQADD,
+
+  NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode,
+  NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode,
+
+  NEONScalar2RegMiscFPMask  = NEONScalar2RegMiscMask | 0x00800000,
+  NEON_FRSQRTE_scalar    = NEON_Q | NEONScalar | NEON_FRSQRTE,
+  NEON_FRECPE_scalar     = NEON_Q | NEONScalar | NEON_FRECPE,
+  NEON_SCVTF_scalar      = NEON_Q | NEONScalar | NEON_SCVTF,
+  NEON_UCVTF_scalar      = NEON_Q | NEONScalar | NEON_UCVTF,
+  NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero,
+  NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero,
+  NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero,
+  NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero,
+  NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero,
+  NEON_FRECPX_scalar     = NEONScalar2RegMiscFixed | 0x0081F000,
+  NEON_FCVTNS_scalar     = NEON_Q | NEONScalar | NEON_FCVTNS,
+  NEON_FCVTNU_scalar     = NEON_Q | NEONScalar | NEON_FCVTNU,
+  NEON_FCVTPS_scalar     = NEON_Q | NEONScalar | NEON_FCVTPS,
+  NEON_FCVTPU_scalar     = NEON_Q | NEONScalar | NEON_FCVTPU,
+  NEON_FCVTMS_scalar     = NEON_Q | NEONScalar | NEON_FCVTMS,
+  NEON_FCVTMU_scalar     = NEON_Q | NEONScalar | NEON_FCVTMU,
+  NEON_FCVTZS_scalar     = NEON_Q | NEONScalar | NEON_FCVTZS,
+  NEON_FCVTZU_scalar     = NEON_Q | NEONScalar | NEON_FCVTZU,
+  NEON_FCVTAS_scalar     = NEON_Q | NEONScalar | NEON_FCVTAS,
+  NEON_FCVTAU_scalar     = NEON_Q | NEONScalar | NEON_FCVTAU,
+  NEON_FCVTXN_scalar     = NEON_Q | NEONScalar | NEON_FCVTXN
+};
+
+// NEON scalar instructions with three same-type operands.
+enum NEONScalar3SameOp {
+  NEONScalar3SameFixed = 0x5E200400,
+  NEONScalar3SameFMask = 0xDF200400,
+  NEONScalar3SameMask  = 0xFF20FC00,
+  NEON_ADD_scalar    = NEON_Q | NEONScalar | NEON_ADD,
+  NEON_CMEQ_scalar   = NEON_Q | NEONScalar | NEON_CMEQ,
+  NEON_CMGE_scalar   = NEON_Q | NEONScalar | NEON_CMGE,
+  NEON_CMGT_scalar   = NEON_Q | NEONScalar | NEON_CMGT,
+  NEON_CMHI_scalar   = NEON_Q | NEONScalar | NEON_CMHI,
+  NEON_CMHS_scalar   = NEON_Q | NEONScalar | NEON_CMHS,
+  NEON_CMTST_scalar  = NEON_Q | NEONScalar | NEON_CMTST,
+  NEON_SUB_scalar    = NEON_Q | NEONScalar | NEON_SUB,
+  NEON_UQADD_scalar  = NEON_Q | NEONScalar | NEON_UQADD,
+  NEON_SQADD_scalar  = NEON_Q | NEONScalar | NEON_SQADD,
+  NEON_UQSUB_scalar  = NEON_Q | NEONScalar | NEON_UQSUB,
+  NEON_SQSUB_scalar  = NEON_Q | NEONScalar | NEON_SQSUB,
+  NEON_USHL_scalar   = NEON_Q | NEONScalar | NEON_USHL,
+  NEON_SSHL_scalar   = NEON_Q | NEONScalar | NEON_SSHL,
+  NEON_UQSHL_scalar  = NEON_Q | NEONScalar | NEON_UQSHL,
+  NEON_SQSHL_scalar  = NEON_Q | NEONScalar | NEON_SQSHL,
+  NEON_URSHL_scalar  = NEON_Q | NEONScalar | NEON_URSHL,
+  NEON_SRSHL_scalar  = NEON_Q | NEONScalar | NEON_SRSHL,
+  NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL,
+  NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL,
+  NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH,
+  NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH,
+
+  // NEON floating point scalar instructions with three same-type operands.
+  NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000,
+  NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000,
+  NEONScalar3SameFPMask  = NEONScalar3SameMask | 0x00800000,
+  NEON_FACGE_scalar   = NEON_Q | NEONScalar | NEON_FACGE,
+  NEON_FACGT_scalar   = NEON_Q | NEONScalar | NEON_FACGT,
+  NEON_FCMEQ_scalar   = NEON_Q | NEONScalar | NEON_FCMEQ,
+  NEON_FCMGE_scalar   = NEON_Q | NEONScalar | NEON_FCMGE,
+  NEON_FCMGT_scalar   = NEON_Q | NEONScalar | NEON_FCMGT,
+  NEON_FMULX_scalar   = NEON_Q | NEONScalar | NEON_FMULX,
+  NEON_FRECPS_scalar  = NEON_Q | NEONScalar | NEON_FRECPS,
+  NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS,
+  NEON_FABD_scalar    = NEON_Q | NEONScalar | NEON_FABD
+};
+
+// NEON scalar instructions with three different-type operands.
+enum NEONScalar3DiffOp {
+  NEONScalar3DiffFixed = 0x5E200000,
+  NEONScalar3DiffFMask = 0xDF200C00,
+  NEONScalar3DiffMask  = NEON_Q | NEONScalar | NEON3DifferentMask,
+  NEON_SQDMLAL_scalar  = NEON_Q | NEONScalar | NEON_SQDMLAL,
+  NEON_SQDMLSL_scalar  = NEON_Q | NEONScalar | NEON_SQDMLSL,
+  NEON_SQDMULL_scalar  = NEON_Q | NEONScalar | NEON_SQDMULL
+};
+
+// NEON scalar instructions with indexed element operand.
+enum NEONScalarByIndexedElementOp {
+  NEONScalarByIndexedElementFixed = 0x5F000000,
+  NEONScalarByIndexedElementFMask = 0xDF000400,
+  NEONScalarByIndexedElementMask  = 0xFF00F400,
+  NEON_SQDMLAL_byelement_scalar  = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement,
+  NEON_SQDMLSL_byelement_scalar  = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement,
+  NEON_SQDMULL_byelement_scalar  = NEON_Q | NEONScalar | NEON_SQDMULL_byelement,
+  NEON_SQDMULH_byelement_scalar  = NEON_Q | NEONScalar | NEON_SQDMULH_byelement,
+  NEON_SQRDMULH_byelement_scalar
+    = NEON_Q | NEONScalar | NEON_SQRDMULH_byelement,
+
+  // Floating point instructions.
+  NEONScalarByIndexedElementFPFixed
+    = NEONScalarByIndexedElementFixed | 0x00800000,
+  NEONScalarByIndexedElementFPMask
+    = NEONScalarByIndexedElementMask | 0x00800000,
+  NEON_FMLA_byelement_scalar  = NEON_Q | NEONScalar | NEON_FMLA_byelement,
+  NEON_FMLS_byelement_scalar  = NEON_Q | NEONScalar | NEON_FMLS_byelement,
+  NEON_FMUL_byelement_scalar  = NEON_Q | NEONScalar | NEON_FMUL_byelement,
+  NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement
+};
+
+// NEON scalar register copy.
+enum NEONScalarCopyOp {
+  NEONScalarCopyFixed = 0x5E000400,
+  NEONScalarCopyFMask = 0xDFE08400,
+  NEONScalarCopyMask  = 0xFFE0FC00,
+  NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT
+};
+
+// NEON scalar pairwise instructions.
+enum NEONScalarPairwiseOp {
+  NEONScalarPairwiseFixed = 0x5E300800,
+  NEONScalarPairwiseFMask = 0xDF3E0C00,
+  NEONScalarPairwiseMask  = 0xFFB1F800,
+  NEON_ADDP_scalar    = NEONScalarPairwiseFixed | 0x0081B000,
+  NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000,
+  NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000,
+  NEON_FADDP_scalar   = NEONScalarPairwiseFixed | 0x2000D000,
+  NEON_FMAXP_scalar   = NEONScalarPairwiseFixed | 0x2000F000,
+  NEON_FMINP_scalar   = NEONScalarPairwiseFixed | 0x2080F000
+};
+
+// NEON scalar shift immediate.
+enum NEONScalarShiftImmediateOp {
+  NEONScalarShiftImmediateFixed = 0x5F000400,
+  NEONScalarShiftImmediateFMask = 0xDF800400,
+  NEONScalarShiftImmediateMask  = 0xFF80FC00,
+  NEON_SHL_scalar  = NEON_Q | NEONScalar | NEON_SHL,
+  NEON_SLI_scalar  = NEON_Q | NEONScalar | NEON_SLI,
+  NEON_SRI_scalar  = NEON_Q | NEONScalar | NEON_SRI,
+  NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR,
+  NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR,
+  NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR,
+  NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR,
+  NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA,
+  NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA,
+  NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA,
+  NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA,
+  NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN,
+  NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN,
+  NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN,
+  NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN,
+  NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN,
+  NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN,
+  NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU,
+  NEON_SQSHL_imm_scalar  = NEON_Q | NEONScalar | NEON_SQSHL_imm,
+  NEON_UQSHL_imm_scalar  = NEON_Q | NEONScalar | NEON_UQSHL_imm,
+  NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm,
+  NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm,
+  NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm,
+  NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm
+};
+
 // Unimplemented and unallocated instructions. These are defined to make fixed
 // bit assertion easier.
 enum UnimplementedOp {
diff --git a/disas/libvixl/a64/cpu-a64.h b/disas/libvixl/vixl/a64/cpu-a64.h
index 59b7974a19..cdf09a6af1 100644
--- a/disas/libvixl/a64/cpu-a64.h
+++ b/disas/libvixl/vixl/a64/cpu-a64.h
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2014, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -27,8 +27,8 @@
 #ifndef VIXL_CPU_A64_H
 #define VIXL_CPU_A64_H
 
-#include "globals.h"
-#include "instructions-a64.h"
+#include "vixl/globals.h"
+#include "vixl/a64/instructions-a64.h"
 
 namespace vixl {
 
diff --git a/disas/libvixl/a64/decoder-a64.cc b/disas/libvixl/vixl/a64/decoder-a64.cc
index 82591ca309..5ba2d3ce04 100644
--- a/disas/libvixl/a64/decoder-a64.cc
+++ b/disas/libvixl/vixl/a64/decoder-a64.cc
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2014, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -24,9 +24,9 @@
 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "globals.h"
-#include "utils.h"
-#include "a64/decoder-a64.h"
+#include "vixl/globals.h"
+#include "vixl/utils.h"
+#include "vixl/a64/decoder-a64.h"
 
 namespace vixl {
 
@@ -271,6 +271,11 @@ void Decoder::DecodeLoadStore(const Instruction* instr) {
               (instr->Bits(27, 24) == 0x9) ||
               (instr->Bits(27, 24) == 0xC) ||
               (instr->Bits(27, 24) == 0xD) );
+  // TODO(all): rearrange the tree to integrate this branch.
+  if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
+    DecodeNEONLoadStore(instr);
+    return;
+  }
 
   if (instr->Bit(24) == 0) {
     if (instr->Bit(28) == 0) {
@@ -278,7 +283,7 @@ void Decoder::DecodeLoadStore(const Instruction* instr) {
         if (instr->Bit(26) == 0) {
           VisitLoadStoreExclusive(instr);
         } else {
-          DecodeAdvSIMDLoadStore(instr);
+          VIXL_UNREACHABLE();
         }
       } else {
         if ((instr->Bits(31, 30) == 0x3) ||
@@ -483,6 +488,7 @@ void Decoder::DecodeDataProcessing(const Instruction* instr) {
         case 6: {
           if (instr->Bit(29) == 0x1) {
             VisitUnallocated(instr);
+            VIXL_FALLTHROUGH();
           } else {
             if (instr->Bit(30) == 0) {
               if ((instr->Bit(15) == 0x1) ||
@@ -556,18 +562,15 @@ void Decoder::DecodeDataProcessing(const Instruction* instr) {
 void Decoder::DecodeFP(const Instruction* instr) {
   VIXL_ASSERT((instr->Bits(27, 24) == 0xE) ||
               (instr->Bits(27, 24) == 0xF));
-
   if (instr->Bit(28) == 0) {
-    DecodeAdvSIMDDataProcessing(instr);
+    DecodeNEONVectorDataProcessing(instr);
   } else {
-    if (instr->Bit(29) == 1) {
+    if (instr->Bits(31, 30) == 0x3) {
       VisitUnallocated(instr);
+    } else if (instr->Bits(31, 30) == 0x1) {
+      DecodeNEONScalarDataProcessing(instr);
     } else {
-      if (instr->Bits(31, 30) == 0x3) {
-        VisitUnallocated(instr);
-      } else if (instr->Bits(31, 30) == 0x1) {
-        DecodeAdvSIMDDataProcessing(instr);
-      } else {
+      if (instr->Bit(29) == 0) {
         if (instr->Bit(24) == 0) {
           if (instr->Bit(21) == 0) {
             if ((instr->Bit(23) == 1) ||
@@ -674,23 +677,190 @@ void Decoder::DecodeFP(const Instruction* instr) {
             VisitFPDataProcessing3Source(instr);
           }
         }
+      } else {
+        VisitUnallocated(instr);
       }
     }
   }
 }
 
 
-void Decoder::DecodeAdvSIMDLoadStore(const Instruction* instr) {
-  // TODO: Implement Advanced SIMD load/store instruction decode.
+void Decoder::DecodeNEONLoadStore(const Instruction* instr) {
   VIXL_ASSERT(instr->Bits(29, 25) == 0x6);
-  VisitUnimplemented(instr);
+  if (instr->Bit(31) == 0) {
+    if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
+      VisitUnallocated(instr);
+      return;
+    }
+
+    if (instr->Bit(23) == 0) {
+      if (instr->Bits(20, 16) == 0) {
+        if (instr->Bit(24) == 0) {
+          VisitNEONLoadStoreMultiStruct(instr);
+        } else {
+          VisitNEONLoadStoreSingleStruct(instr);
+        }
+      } else {
+        VisitUnallocated(instr);
+      }
+    } else {
+      if (instr->Bit(24) == 0) {
+        VisitNEONLoadStoreMultiStructPostIndex(instr);
+      } else {
+        VisitNEONLoadStoreSingleStructPostIndex(instr);
+      }
+    }
+  } else {
+    VisitUnallocated(instr);
+  }
+}
+
+
+void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) {
+  VIXL_ASSERT(instr->Bits(28, 25) == 0x7);
+  if (instr->Bit(31) == 0) {
+    if (instr->Bit(24) == 0) {
+      if (instr->Bit(21) == 0) {
+        if (instr->Bit(15) == 0) {
+          if (instr->Bit(10) == 0) {
+            if (instr->Bit(29) == 0) {
+              if (instr->Bit(11) == 0) {
+                VisitNEONTable(instr);
+              } else {
+                VisitNEONPerm(instr);
+              }
+            } else {
+              VisitNEONExtract(instr);
+            }
+          } else {
+            if (instr->Bits(23, 22) == 0) {
+              VisitNEONCopy(instr);
+            } else {
+              VisitUnallocated(instr);
+            }
+          }
+        } else {
+          VisitUnallocated(instr);
+        }
+      } else {
+        if (instr->Bit(10) == 0) {
+          if (instr->Bit(11) == 0) {
+            VisitNEON3Different(instr);
+          } else {
+            if (instr->Bits(18, 17) == 0) {
+              if (instr->Bit(20) == 0) {
+                if (instr->Bit(19) == 0) {
+                  VisitNEON2RegMisc(instr);
+                } else {
+                  if (instr->Bits(30, 29) == 0x2) {
+                    VisitCryptoAES(instr);
+                  } else {
+                    VisitUnallocated(instr);
+                  }
+                }
+              } else {
+                if (instr->Bit(19) == 0) {
+                  VisitNEONAcrossLanes(instr);
+                } else {
+                  VisitUnallocated(instr);
+                }
+              }
+            } else {
+              VisitUnallocated(instr);
+            }
+          }
+        } else {
+          VisitNEON3Same(instr);
+        }
+      }
+    } else {
+      if (instr->Bit(10) == 0) {
+        VisitNEONByIndexedElement(instr);
+      } else {
+        if (instr->Bit(23) == 0) {
+          if (instr->Bits(22, 19) == 0) {
+            VisitNEONModifiedImmediate(instr);
+          } else {
+            VisitNEONShiftImmediate(instr);
+          }
+        } else {
+          VisitUnallocated(instr);
+        }
+      }
+    }
+  } else {
+    VisitUnallocated(instr);
+  }
 }
 
 
-void Decoder::DecodeAdvSIMDDataProcessing(const Instruction* instr) {
-  // TODO: Implement Advanced SIMD data processing instruction decode.
-  VIXL_ASSERT(instr->Bits(27, 25) == 0x7);
-  VisitUnimplemented(instr);
+void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) {
+  VIXL_ASSERT(instr->Bits(28, 25) == 0xF);
+  if (instr->Bit(24) == 0) {
+    if (instr->Bit(21) == 0) {
+      if (instr->Bit(15) == 0) {
+        if (instr->Bit(10) == 0) {
+          if (instr->Bit(29) == 0) {
+            if (instr->Bit(11) == 0) {
+              VisitCrypto3RegSHA(instr);
+            } else {
+              VisitUnallocated(instr);
+            }
+          } else {
+            VisitUnallocated(instr);
+          }
+        } else {
+          if (instr->Bits(23, 22) == 0) {
+            VisitNEONScalarCopy(instr);
+          } else {
+            VisitUnallocated(instr);
+          }
+        }
+      } else {
+        VisitUnallocated(instr);
+      }
+    } else {
+      if (instr->Bit(10) == 0) {
+        if (instr->Bit(11) == 0) {
+          VisitNEONScalar3Diff(instr);
+        } else {
+          if (instr->Bits(18, 17) == 0) {
+            if (instr->Bit(20) == 0) {
+              if (instr->Bit(19) == 0) {
+                VisitNEONScalar2RegMisc(instr);
+              } else {
+                if (instr->Bit(29) == 0) {
+                  VisitCrypto2RegSHA(instr);
+                } else {
+                  VisitUnallocated(instr);
+                }
+              }
+            } else {
+              if (instr->Bit(19) == 0) {
+                VisitNEONScalarPairwise(instr);
+              } else {
+                VisitUnallocated(instr);
+              }
+            }
+          } else {
+            VisitUnallocated(instr);
+          }
+        }
+      } else {
+        VisitNEONScalar3Same(instr);
+      }
+    }
+  } else {
+    if (instr->Bit(10) == 0) {
+      VisitNEONScalarByIndexedElement(instr);
+    } else {
+      if (instr->Bit(23) == 0) {
+        VisitNEONScalarShiftImmediate(instr);
+      } else {
+        VisitUnallocated(instr);
+      }
+    }
+  }
 }
 
 
diff --git a/disas/libvixl/a64/decoder-a64.h b/disas/libvixl/vixl/a64/decoder-a64.h
index fd08d6c1f4..b3f04f68fc 100644
--- a/disas/libvixl/a64/decoder-a64.h
+++ b/disas/libvixl/vixl/a64/decoder-a64.h
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2014, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -29,13 +29,13 @@
 
 #include <list>
 
-#include "globals.h"
-#include "a64/instructions-a64.h"
+#include "vixl/globals.h"
+#include "vixl/a64/instructions-a64.h"
 
 
 // List macro containing all visitors needed by the decoder class.
 
-#define VISITOR_LIST(V)             \
+#define VISITOR_LIST_THAT_RETURN(V) \
   V(PCRelAddressing)                \
   V(AddSubImmediate)                \
   V(LogicalImmediate)               \
@@ -79,8 +79,39 @@
   V(FPDataProcessing3Source)        \
   V(FPIntegerConvert)               \
   V(FPFixedPointConvert)            \
-  V(Unallocated)                    \
-  V(Unimplemented)
+  V(Crypto2RegSHA)                  \
+  V(Crypto3RegSHA)                  \
+  V(CryptoAES)                      \
+  V(NEON2RegMisc)                   \
+  V(NEON3Different)                 \
+  V(NEON3Same)                      \
+  V(NEONAcrossLanes)                \
+  V(NEONByIndexedElement)           \
+  V(NEONCopy)                       \
+  V(NEONExtract)                    \
+  V(NEONLoadStoreMultiStruct)       \
+  V(NEONLoadStoreMultiStructPostIndex)  \
+  V(NEONLoadStoreSingleStruct)      \
+  V(NEONLoadStoreSingleStructPostIndex) \
+  V(NEONModifiedImmediate)          \
+  V(NEONScalar2RegMisc)             \
+  V(NEONScalar3Diff)                \
+  V(NEONScalar3Same)                \
+  V(NEONScalarByIndexedElement)     \
+  V(NEONScalarCopy)                 \
+  V(NEONScalarPairwise)             \
+  V(NEONScalarShiftImmediate)       \
+  V(NEONShiftImmediate)             \
+  V(NEONTable)                      \
+  V(NEONPerm)                       \
+
+#define VISITOR_LIST_THAT_DONT_RETURN(V)  \
+  V(Unallocated)                          \
+  V(Unimplemented)                        \
+
+#define VISITOR_LIST(V)             \
+  VISITOR_LIST_THAT_RETURN(V)       \
+  VISITOR_LIST_THAT_DONT_RETURN(V)  \
 
 namespace vixl {
 
@@ -222,12 +253,17 @@ class Decoder {
   // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
   // and call the corresponding visitors.
   // On entry, instruction bits 29:25 = 0x6.
-  void DecodeAdvSIMDLoadStore(const Instruction* instr);
+  void DecodeNEONLoadStore(const Instruction* instr);
 
-  // Decode the Advanced SIMD (NEON) data processing part of the instruction
-  // tree, and call the corresponding visitors.
-  // On entry, instruction bits 27:25 = 0x7.
-  void DecodeAdvSIMDDataProcessing(const Instruction* instr);
+  // Decode the Advanced SIMD (NEON) vector data processing part of the
+  // instruction tree, and call the corresponding visitors.
+  // On entry, instruction bits 28:25 = 0x7.
+  void DecodeNEONVectorDataProcessing(const Instruction* instr);
+
+  // Decode the Advanced SIMD (NEON) scalar data processing part of the
+  // instruction tree, and call the corresponding visitors.
+  // On entry, instruction bits 28:25 = 0xF.
+  void DecodeNEONScalarDataProcessing(const Instruction* instr);
 
  private:
   // Visitors are registered in a list.
diff --git a/disas/libvixl/vixl/a64/disasm-a64.cc b/disas/libvixl/vixl/a64/disasm-a64.cc
new file mode 100644
index 0000000000..20caba4317
--- /dev/null
+++ b/disas/libvixl/vixl/a64/disasm-a64.cc
@@ -0,0 +1,3487 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//   * Redistributions of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//   * Redistributions in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//   * Neither the name of ARM Limited nor the names of its contributors may be
+//     used to endorse or promote products derived from this software without
+//     specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <cstdlib>
+#include "vixl/a64/disasm-a64.h"
+
+namespace vixl {
+
+Disassembler::Disassembler() {
+  buffer_size_ = 256;
+  buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+  buffer_pos_ = 0;
+  own_buffer_ = true;
+  code_address_offset_ = 0;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+  buffer_size_ = buffer_size;
+  buffer_ = text_buffer;
+  buffer_pos_ = 0;
+  own_buffer_ = false;
+  code_address_offset_ = 0;
+}
+
+
+Disassembler::~Disassembler() {
+  if (own_buffer_) {
+    free(buffer_);
+  }
+}
+
+
+char* Disassembler::GetOutput() {
+  return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(const Instruction* instr) {
+  bool rd_is_zr = RdIsZROrSP(instr);
+  bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+                  (instr->ImmAddSub() == 0) ? true : false;
+  const char *mnemonic = "";
+  const char *form = "'Rds, 'Rns, 'IAddSub";
+  const char *form_cmp = "'Rns, 'IAddSub";
+  const char *form_mov = "'Rds, 'Rns";
+
+  switch (instr->Mask(AddSubImmediateMask)) {
+    case ADD_w_imm:
+    case ADD_x_imm: {
+      mnemonic = "add";
+      if (stack_op) {
+        mnemonic = "mov";
+        form = form_mov;
+      }
+      break;
+    }
+    case ADDS_w_imm:
+    case ADDS_x_imm: {
+      mnemonic = "adds";
+      if (rd_is_zr) {
+        mnemonic = "cmn";
+        form = form_cmp;
+      }
+      break;
+    }
+    case SUB_w_imm:
+    case SUB_x_imm: mnemonic = "sub"; break;
+    case SUBS_w_imm:
+    case SUBS_x_imm: {
+      mnemonic = "subs";
+      if (rd_is_zr) {
+        mnemonic = "cmp";
+        form = form_cmp;
+      }
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(const Instruction* instr) {
+  bool rd_is_zr = RdIsZROrSP(instr);
+  bool rn_is_zr = RnIsZROrSP(instr);
+  const char *mnemonic = "";
+  const char *form = "'Rd, 'Rn, 'Rm'NDP";
+  const char *form_cmp = "'Rn, 'Rm'NDP";
+  const char *form_neg = "'Rd, 'Rm'NDP";
+
+  switch (instr->Mask(AddSubShiftedMask)) {
+    case ADD_w_shift:
+    case ADD_x_shift: mnemonic = "add"; break;
+    case ADDS_w_shift:
+    case ADDS_x_shift: {
+      mnemonic = "adds";
+      if (rd_is_zr) {
+        mnemonic = "cmn";
+        form = form_cmp;
+      }
+      break;
+    }
+    case SUB_w_shift:
+    case SUB_x_shift: {
+      mnemonic = "sub";
+      if (rn_is_zr) {
+        mnemonic = "neg";
+        form = form_neg;
+      }
+      break;
+    }
+    case SUBS_w_shift:
+    case SUBS_x_shift: {
+      mnemonic = "subs";
+      if (rd_is_zr) {
+        mnemonic = "cmp";
+        form = form_cmp;
+      } else if (rn_is_zr) {
+        mnemonic = "negs";
+        form = form_neg;
+      }
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(const Instruction* instr) {
+  bool rd_is_zr = RdIsZROrSP(instr);
+  const char *mnemonic = "";
+  Extend mode = static_cast<Extend>(instr->ExtendMode());
+  const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+                     "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+  const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+                         "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+  switch (instr->Mask(AddSubExtendedMask)) {
+    case ADD_w_ext:
+    case ADD_x_ext: mnemonic = "add"; break;
+    case ADDS_w_ext:
+    case ADDS_x_ext: {
+      mnemonic = "adds";
+      if (rd_is_zr) {
+        mnemonic = "cmn";
+        form = form_cmp;
+      }
+      break;
+    }
+    case SUB_w_ext:
+    case SUB_x_ext: mnemonic = "sub"; break;
+    case SUBS_w_ext:
+    case SUBS_x_ext: {
+      mnemonic = "subs";
+      if (rd_is_zr) {
+        mnemonic = "cmp";
+        form = form_cmp;
+      }
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(const Instruction* instr) {
+  bool rn_is_zr = RnIsZROrSP(instr);
+  const char *mnemonic = "";
+  const char *form = "'Rd, 'Rn, 'Rm";
+  const char *form_neg = "'Rd, 'Rm";
+
+  switch (instr->Mask(AddSubWithCarryMask)) {
+    case ADC_w:
+    case ADC_x: mnemonic = "adc"; break;
+    case ADCS_w:
+    case ADCS_x: mnemonic = "adcs"; break;
+    case SBC_w:
+    case SBC_x: {
+      mnemonic = "sbc";
+      if (rn_is_zr) {
+        mnemonic = "ngc";
+        form = form_neg;
+      }
+      break;
+    }
+    case SBCS_w:
+    case SBCS_x: {
+      mnemonic = "sbcs";
+      if (rn_is_zr) {
+        mnemonic = "ngcs";
+        form = form_neg;
+      }
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(const Instruction* instr) {
+  bool rd_is_zr = RdIsZROrSP(instr);
+  bool rn_is_zr = RnIsZROrSP(instr);
+  const char *mnemonic = "";
+  const char *form = "'Rds, 'Rn, 'ITri";
+
+  if (instr->ImmLogical() == 0) {
+    // The immediate encoded in the instruction is not in the expected format.
+    Format(instr, "unallocated", "(LogicalImmediate)");
+    return;
+  }
+
+  switch (instr->Mask(LogicalImmediateMask)) {
+    case AND_w_imm:
+    case AND_x_imm: mnemonic = "and"; break;
+    case ORR_w_imm:
+    case ORR_x_imm: {
+      mnemonic = "orr";
+      unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+                                                        : kWRegSize;
+      if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+        mnemonic = "mov";
+        form = "'Rds, 'ITri";
+      }
+      break;
+    }
+    case EOR_w_imm:
+    case EOR_x_imm: mnemonic = "eor"; break;
+    case ANDS_w_imm:
+    case ANDS_x_imm: {
+      mnemonic = "ands";
+      if (rd_is_zr) {
+        mnemonic = "tst";
+        form = "'Rn, 'ITri";
+      }
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+  VIXL_ASSERT((reg_size == kXRegSize) ||
+              ((reg_size == kWRegSize) && (value <= 0xffffffff)));
+
+  // Test for movz: 16 bits set at positions 0, 16, 32 or 48.
+  if (((value & UINT64_C(0xffffffffffff0000)) == 0) ||
+      ((value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+      ((value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+      ((value & UINT64_C(0x0000ffffffffffff)) == 0)) {
+    return true;
+  }
+
+  // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48).
+  if ((reg_size == kXRegSize) &&
+      (((~value & UINT64_C(0xffffffffffff0000)) == 0) ||
+       ((~value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+       ((~value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+       ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) {
+    return true;
+  }
+  if ((reg_size == kWRegSize) &&
+      (((value & 0xffff0000) == 0xffff0000) ||
+       ((value & 0x0000ffff) == 0x0000ffff))) {
+    return true;
+  }
+  return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(const Instruction* instr) {
+  bool rd_is_zr = RdIsZROrSP(instr);
+  bool rn_is_zr = RnIsZROrSP(instr);
+  const char *mnemonic = "";
+  const char *form = "'Rd, 'Rn, 'Rm'NLo";
+
+  switch (instr->Mask(LogicalShiftedMask)) {
+    case AND_w:
+    case AND_x: mnemonic = "and"; break;
+    case BIC_w:
+    case BIC_x: mnemonic = "bic"; break;
+    case EOR_w:
+    case EOR_x: mnemonic = "eor"; break;
+    case EON_w:
+    case EON_x: mnemonic = "eon"; break;
+    case BICS_w:
+    case BICS_x: mnemonic = "bics"; break;
+    case ANDS_w:
+    case ANDS_x: {
+      mnemonic = "ands";
+      if (rd_is_zr) {
+        mnemonic = "tst";
+        form = "'Rn, 'Rm'NLo";
+      }
+      break;
+    }
+    case ORR_w:
+    case ORR_x: {
+      mnemonic = "orr";
+      if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+        mnemonic = "mov";
+        form = "'Rd, 'Rm";
+      }
+      break;
+    }
+    case ORN_w:
+    case ORN_x: {
+      mnemonic = "orn";
+      if (rn_is_zr) {
+        mnemonic = "mvn";
+        form = "'Rd, 'Rm'NLo";
+      }
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+  switch (instr->Mask(ConditionalCompareRegisterMask)) {
+    case CCMN_w:
+    case CCMN_x: mnemonic = "ccmn"; break;
+    case CCMP_w:
+    case CCMP_x: mnemonic = "ccmp"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+  switch (instr->Mask(ConditionalCompareImmediateMask)) {
+    case CCMN_w_imm:
+    case CCMN_x_imm: mnemonic = "ccmn"; break;
+    case CCMP_w_imm:
+    case CCMP_x_imm: mnemonic = "ccmp"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(const Instruction* instr) {
+  bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+  bool rn_is_rm = (instr->Rn() == instr->Rm());
+  const char *mnemonic = "";
+  const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+  const char *form_test = "'Rd, 'CInv";
+  const char *form_update = "'Rd, 'Rn, 'CInv";
+
+  Condition cond = static_cast<Condition>(instr->Condition());
+  bool invertible_cond = (cond != al) && (cond != nv);
+
+  switch (instr->Mask(ConditionalSelectMask)) {
+    case CSEL_w:
+    case CSEL_x: mnemonic = "csel"; break;
+    case CSINC_w:
+    case CSINC_x: {
+      mnemonic = "csinc";
+      if (rnm_is_zr && invertible_cond) {
+        mnemonic = "cset";
+        form = form_test;
+      } else if (rn_is_rm && invertible_cond) {
+        mnemonic = "cinc";
+        form = form_update;
+      }
+      break;
+    }
+    case CSINV_w:
+    case CSINV_x: {
+      mnemonic = "csinv";
+      if (rnm_is_zr && invertible_cond) {
+        mnemonic = "csetm";
+        form = form_test;
+      } else if (rn_is_rm && invertible_cond) {
+        mnemonic = "cinv";
+        form = form_update;
+      }
+      break;
+    }
+    case CSNEG_w:
+    case CSNEG_x: {
+      mnemonic = "csneg";
+      if (rn_is_rm && invertible_cond) {
+        mnemonic = "cneg";
+        form = form_update;
+      }
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(const Instruction* instr) {
+  unsigned s = instr->ImmS();
+  unsigned r = instr->ImmR();
+  unsigned rd_size_minus_1 =
+    ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
+  const char *mnemonic = "";
+  const char *form = "";
+  const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+  const char *form_extend = "'Rd, 'Wn";
+  const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+  const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+  const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+  switch (instr->Mask(BitfieldMask)) {
+    case SBFM_w:
+    case SBFM_x: {
+      mnemonic = "sbfx";
+      form = form_bfx;
+      if (r == 0) {
+        form = form_extend;
+        if (s == 7) {
+          mnemonic = "sxtb";
+        } else if (s == 15) {
+          mnemonic = "sxth";
+        } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+          mnemonic = "sxtw";
+        } else {
+          form = form_bfx;
+        }
+      } else if (s == rd_size_minus_1) {
+        mnemonic = "asr";
+        form = form_shift_right;
+      } else if (s < r) {
+        mnemonic = "sbfiz";
+        form = form_bfiz;
+      }
+      break;
+    }
+    case UBFM_w:
+    case UBFM_x: {
+      mnemonic = "ubfx";
+      form = form_bfx;
+      if (r == 0) {
+        form = form_extend;
+        if (s == 7) {
+          mnemonic = "uxtb";
+        } else if (s == 15) {
+          mnemonic = "uxth";
+        } else {
+          form = form_bfx;
+        }
+      }
+      if (s == rd_size_minus_1) {
+        mnemonic = "lsr";
+        form = form_shift_right;
+      } else if (r == s + 1) {
+        mnemonic = "lsl";
+        form = form_lsl;
+      } else if (s < r) {
+        mnemonic = "ubfiz";
+        form = form_bfiz;
+      }
+      break;
+    }
+    case BFM_w:
+    case BFM_x: {
+      mnemonic = "bfxil";
+      form = form_bfx;
+      if (s < r) {
+        mnemonic = "bfi";
+        form = form_bfiz;
+      }
+    }
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+  switch (instr->Mask(ExtractMask)) {
+    case EXTR_w:
+    case EXTR_x: {
+      if (instr->Rn() == instr->Rm()) {
+        mnemonic = "ror";
+        form = "'Rd, 'Rn, 'IExtract";
+      } else {
+        mnemonic = "extr";
+      }
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(const Instruction* instr) {
+  switch (instr->Mask(PCRelAddressingMask)) {
+    case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+    case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break;
+    default: Format(instr, "unimplemented", "(PCRelAddressing)");
+  }
+}
+
+
+void Disassembler::VisitConditionalBranch(const Instruction* instr) {
+  switch (instr->Mask(ConditionalBranchMask)) {
+    case B_cond: Format(instr, "b.'CBrn", "'TImmCond"); break;
+    default: VIXL_UNREACHABLE();
+  }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(
+    const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Xn";
+
+  switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+    case BR: mnemonic = "br"; break;
+    case BLR: mnemonic = "blr"; break;
+    case RET: {
+      mnemonic = "ret";
+      if (instr->Rn() == kLinkRegCode) {
+        form = NULL;
+      }
+      break;
+    }
+    default: form = "(UnconditionalBranchToRegister)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'TImmUncn";
+
+  switch (instr->Mask(UnconditionalBranchMask)) {
+    case B: mnemonic = "b"; break;
+    case BL: mnemonic = "bl"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Rd, 'Rn";
+
+  switch (instr->Mask(DataProcessing1SourceMask)) {
+    #define FORMAT(A, B)  \
+    case A##_w:           \
+    case A##_x: mnemonic = B; break;
+    FORMAT(RBIT, "rbit");
+    FORMAT(REV16, "rev16");
+    FORMAT(REV, "rev");
+    FORMAT(CLZ, "clz");
+    FORMAT(CLS, "cls");
+    #undef FORMAT
+    case REV32_x: mnemonic = "rev32"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Rd, 'Rn, 'Rm";
+  const char *form_wwx = "'Wd, 'Wn, 'Xm";
+
+  switch (instr->Mask(DataProcessing2SourceMask)) {
+    #define FORMAT(A, B)  \
+    case A##_w:           \
+    case A##_x: mnemonic = B; break;
+    FORMAT(UDIV, "udiv");
+    FORMAT(SDIV, "sdiv");
+    FORMAT(LSLV, "lsl");
+    FORMAT(LSRV, "lsr");
+    FORMAT(ASRV, "asr");
+    FORMAT(RORV, "ror");
+    #undef FORMAT
+    case CRC32B: mnemonic = "crc32b"; break;
+    case CRC32H: mnemonic = "crc32h"; break;
+    case CRC32W: mnemonic = "crc32w"; break;
+    case CRC32X: mnemonic = "crc32x"; form = form_wwx; break;
+    case CRC32CB: mnemonic = "crc32cb"; break;
+    case CRC32CH: mnemonic = "crc32ch"; break;
+    case CRC32CW: mnemonic = "crc32cw"; break;
+    case CRC32CX: mnemonic = "crc32cx"; form = form_wwx; break;
+    default: form = "(DataProcessing2Source)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(const Instruction* instr) {
+  bool ra_is_zr = RaIsZROrSP(instr);
+  const char *mnemonic = "";
+  const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+  const char *form_rrr = "'Rd, 'Rn, 'Rm";
+  const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+  const char *form_xww = "'Xd, 'Wn, 'Wm";
+  const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+  switch (instr->Mask(DataProcessing3SourceMask)) {
+    case MADD_w:
+    case MADD_x: {
+      mnemonic = "madd";
+      form = form_rrrr;
+      if (ra_is_zr) {
+        mnemonic = "mul";
+        form = form_rrr;
+      }
+      break;
+    }
+    case MSUB_w:
+    case MSUB_x: {
+      mnemonic = "msub";
+      form = form_rrrr;
+      if (ra_is_zr) {
+        mnemonic = "mneg";
+        form = form_rrr;
+      }
+      break;
+    }
+    case SMADDL_x: {
+      mnemonic = "smaddl";
+      if (ra_is_zr) {
+        mnemonic = "smull";
+        form = form_xww;
+      }
+      break;
+    }
+    case SMSUBL_x: {
+      mnemonic = "smsubl";
+      if (ra_is_zr) {
+        mnemonic = "smnegl";
+        form = form_xww;
+      }
+      break;
+    }
+    case UMADDL_x: {
+      mnemonic = "umaddl";
+      if (ra_is_zr) {
+        mnemonic = "umull";
+        form = form_xww;
+      }
+      break;
+    }
+    case UMSUBL_x: {
+      mnemonic = "umsubl";
+      if (ra_is_zr) {
+        mnemonic = "umnegl";
+        form = form_xww;
+      }
+      break;
+    }
+    case SMULH_x: {
+      mnemonic = "smulh";
+      form = form_xxx;
+      break;
+    }
+    case UMULH_x: {
+      mnemonic = "umulh";
+      form = form_xxx;
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Rt, 'TImmCmpa";
+
+  switch (instr->Mask(CompareBranchMask)) {
+    case CBZ_w:
+    case CBZ_x: mnemonic = "cbz"; break;
+    case CBNZ_w:
+    case CBNZ_x: mnemonic = "cbnz"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(const Instruction* instr) {
+  const char *mnemonic = "";
+  // If the top bit of the immediate is clear, the tested register is
+  // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+  // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+  // uses bit 31 (normally "sf") to choose the register size.
+  const char *form = "'Rt, 'IS, 'TImmTest";
+
+  switch (instr->Mask(TestBranchMask)) {
+    case TBZ: mnemonic = "tbz"; break;
+    case TBNZ: mnemonic = "tbnz"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Rd, 'IMoveImm";
+
+  // Print the shift separately for movk, to make it clear which half word will
+  // be overwritten. Movn and movz print the computed immediate, which includes
+  // shift calculation.
+  switch (instr->Mask(MoveWideImmediateMask)) {
+    case MOVN_w:
+    case MOVN_x:
+      if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
+        if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
+          mnemonic = "movn";
+        } else {
+          mnemonic = "mov";
+          form = "'Rd, 'IMoveNeg";
+        }
+      } else {
+        mnemonic = "movn";
+      }
+      break;
+    case MOVZ_w:
+    case MOVZ_x:
+      if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
+        mnemonic = "mov";
+      else
+        mnemonic = "movz";
+      break;
+    case MOVK_w:
+    case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V)    \
+  V(STRB_w, "strb", "'Wt")    \
+  V(STRH_w, "strh", "'Wt")    \
+  V(STR_w, "str", "'Wt")      \
+  V(STR_x, "str", "'Xt")      \
+  V(LDRB_w, "ldrb", "'Wt")    \
+  V(LDRH_w, "ldrh", "'Wt")    \
+  V(LDR_w, "ldr", "'Wt")      \
+  V(LDR_x, "ldr", "'Xt")      \
+  V(LDRSB_x, "ldrsb", "'Xt")  \
+  V(LDRSH_x, "ldrsh", "'Xt")  \
+  V(LDRSW_x, "ldrsw", "'Xt")  \
+  V(LDRSB_w, "ldrsb", "'Wt")  \
+  V(LDRSH_w, "ldrsh", "'Wt")  \
+  V(STR_b, "str", "'Bt")      \
+  V(STR_h, "str", "'Ht")      \
+  V(STR_s, "str", "'St")      \
+  V(STR_d, "str", "'Dt")      \
+  V(LDR_b, "ldr", "'Bt")      \
+  V(LDR_h, "ldr", "'Ht")      \
+  V(LDR_s, "ldr", "'St")      \
+  V(LDR_d, "ldr", "'Dt")      \
+  V(STR_q, "str", "'Qt")      \
+  V(LDR_q, "ldr", "'Qt")
+
+void Disassembler::VisitLoadStorePreIndex(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(LoadStorePreIndex)";
+
+  switch (instr->Mask(LoadStorePreIndexMask)) {
+    #define LS_PREINDEX(A, B, C) \
+    case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+    LOAD_STORE_LIST(LS_PREINDEX)
+    #undef LS_PREINDEX
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(LoadStorePostIndex)";
+
+  switch (instr->Mask(LoadStorePostIndexMask)) {
+    #define LS_POSTINDEX(A, B, C) \
+    case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+    LOAD_STORE_LIST(LS_POSTINDEX)
+    #undef LS_POSTINDEX
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(LoadStoreUnsignedOffset)";
+
+  switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+    #define LS_UNSIGNEDOFFSET(A, B, C) \
+    case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+    LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+    #undef LS_UNSIGNEDOFFSET
+    case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(LoadStoreRegisterOffset)";
+
+  switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+    #define LS_REGISTEROFFSET(A, B, C) \
+    case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+    LOAD_STORE_LIST(LS_REGISTEROFFSET)
+    #undef LS_REGISTEROFFSET
+    case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Wt, ['Xns'ILS]";
+  const char *form_x = "'Xt, ['Xns'ILS]";
+  const char *form_b = "'Bt, ['Xns'ILS]";
+  const char *form_h = "'Ht, ['Xns'ILS]";
+  const char *form_s = "'St, ['Xns'ILS]";
+  const char *form_d = "'Dt, ['Xns'ILS]";
+  const char *form_q = "'Qt, ['Xns'ILS]";
+  const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
+
+  switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+    case STURB_w:  mnemonic = "sturb"; break;
+    case STURH_w:  mnemonic = "sturh"; break;
+    case STUR_w:   mnemonic = "stur"; break;
+    case STUR_x:   mnemonic = "stur"; form = form_x; break;
+    case STUR_b:   mnemonic = "stur"; form = form_b; break;
+    case STUR_h:   mnemonic = "stur"; form = form_h; break;
+    case STUR_s:   mnemonic = "stur"; form = form_s; break;
+    case STUR_d:   mnemonic = "stur"; form = form_d; break;
+    case STUR_q:   mnemonic = "stur"; form = form_q; break;
+    case LDURB_w:  mnemonic = "ldurb"; break;
+    case LDURH_w:  mnemonic = "ldurh"; break;
+    case LDUR_w:   mnemonic = "ldur"; break;
+    case LDUR_x:   mnemonic = "ldur"; form = form_x; break;
+    case LDUR_b:   mnemonic = "ldur"; form = form_b; break;
+    case LDUR_h:   mnemonic = "ldur"; form = form_h; break;
+    case LDUR_s:   mnemonic = "ldur"; form = form_s; break;
+    case LDUR_d:   mnemonic = "ldur"; form = form_d; break;
+    case LDUR_q:   mnemonic = "ldur"; form = form_q; break;
+    case LDURSB_x: form = form_x; VIXL_FALLTHROUGH();
+    case LDURSB_w: mnemonic = "ldursb"; break;
+    case LDURSH_x: form = form_x; VIXL_FALLTHROUGH();
+    case LDURSH_w: mnemonic = "ldursh"; break;
+    case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+    case PRFUM:    mnemonic = "prfum"; form = form_prefetch; break;
+    default: form = "(LoadStoreUnscaledOffset)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(const Instruction* instr) {
+  const char *mnemonic = "ldr";
+  const char *form = "(LoadLiteral)";
+
+  switch (instr->Mask(LoadLiteralMask)) {
+    case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+    case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+    case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+    case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+    case LDR_q_lit: form = "'Qt, 'ILLiteral 'LValue"; break;
+    case LDRSW_x_lit: {
+      mnemonic = "ldrsw";
+      form = "'Xt, 'ILLiteral 'LValue";
+      break;
+    }
+    case PRFM_lit: {
+      mnemonic = "prfm";
+      form = "'PrefOp, 'ILLiteral 'LValue";
+      break;
+    }
+    default: mnemonic = "unimplemented";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V)         \
+  V(STP_w, "stp", "'Wt, 'Wt2", "2")     \
+  V(LDP_w, "ldp", "'Wt, 'Wt2", "2")     \
+  V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \
+  V(STP_x, "stp", "'Xt, 'Xt2", "3")     \
+  V(LDP_x, "ldp", "'Xt, 'Xt2", "3")     \
+  V(STP_s, "stp", "'St, 'St2", "2")     \
+  V(LDP_s, "ldp", "'St, 'St2", "2")     \
+  V(STP_d, "stp", "'Dt, 'Dt2", "3")     \
+  V(LDP_d, "ldp", "'Dt, 'Dt2", "3")     \
+  V(LDP_q, "ldp", "'Qt, 'Qt2", "4")     \
+  V(STP_q, "stp", "'Qt, 'Qt2", "4")
+
+void Disassembler::VisitLoadStorePairPostIndex(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(LoadStorePairPostIndex)";
+
+  switch (instr->Mask(LoadStorePairPostIndexMask)) {
+    #define LSP_POSTINDEX(A, B, C, D) \
+    case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+    LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+    #undef LSP_POSTINDEX
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(LoadStorePairPreIndex)";
+
+  switch (instr->Mask(LoadStorePairPreIndexMask)) {
+    #define LSP_PREINDEX(A, B, C, D) \
+    case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+    LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+    #undef LSP_PREINDEX
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(LoadStorePairOffset)";
+
+  switch (instr->Mask(LoadStorePairOffsetMask)) {
+    #define LSP_OFFSET(A, B, C, D) \
+    case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+    LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+    #undef LSP_OFFSET
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form;
+
+  switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+    case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break;
+    case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break;
+    case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break;
+    case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break;
+    case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP2]"; break;
+    case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP2]"; break;
+    case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break;
+    case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break;
+    case STNP_q: mnemonic = "stnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break;
+    case LDNP_q: mnemonic = "ldnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break;
+    default: form = "(LoadStorePairNonTemporal)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreExclusive(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form;
+
+  switch (instr->Mask(LoadStoreExclusiveMask)) {
+    case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
+    case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
+    case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break;
+    case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break;
+    case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break;
+    case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break;
+    case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break;
+    case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break;
+    case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
+    case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
+    case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
+    case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
+    case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
+    case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
+    case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break;
+    case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break;
+    case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break;
+    case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break;
+    case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break;
+    case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break;
+    case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
+    case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
+    case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
+    case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
+    case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break;
+    case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break;
+    case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break;
+    case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break;
+    case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break;
+    case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break;
+    case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break;
+    case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break;
+    default: form = "(LoadStoreExclusive)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Fn, 'Fm";
+  const char *form_zero = "'Fn, #0.0";
+
+  switch (instr->Mask(FPCompareMask)) {
+    case FCMP_s_zero:
+    case FCMP_d_zero: form = form_zero; VIXL_FALLTHROUGH();
+    case FCMP_s:
+    case FCMP_d: mnemonic = "fcmp"; break;
+    case FCMPE_s_zero:
+    case FCMPE_d_zero: form = form_zero; VIXL_FALLTHROUGH();
+    case FCMPE_s:
+    case FCMPE_d: mnemonic = "fcmpe"; break;
+    default: form = "(FPCompare)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(const Instruction* instr) {
+  const char *mnemonic = "unmplemented";
+  const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+  switch (instr->Mask(FPConditionalCompareMask)) {
+    case FCCMP_s:
+    case FCCMP_d: mnemonic = "fccmp"; break;
+    case FCCMPE_s:
+    case FCCMPE_d: mnemonic = "fccmpe"; break;
+    default: form = "(FPConditionalCompare)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+  switch (instr->Mask(FPConditionalSelectMask)) {
+    case FCSEL_s:
+    case FCSEL_d: mnemonic = "fcsel"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Fd, 'Fn";
+
+  switch (instr->Mask(FPDataProcessing1SourceMask)) {
+    #define FORMAT(A, B)  \
+    case A##_s:           \
+    case A##_d: mnemonic = B; break;
+    FORMAT(FMOV, "fmov");
+    FORMAT(FABS, "fabs");
+    FORMAT(FNEG, "fneg");
+    FORMAT(FSQRT, "fsqrt");
+    FORMAT(FRINTN, "frintn");
+    FORMAT(FRINTP, "frintp");
+    FORMAT(FRINTM, "frintm");
+    FORMAT(FRINTZ, "frintz");
+    FORMAT(FRINTA, "frinta");
+    FORMAT(FRINTX, "frintx");
+    FORMAT(FRINTI, "frinti");
+    #undef FORMAT
+    case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+    case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+    case FCVT_hs: mnemonic = "fcvt"; form = "'Hd, 'Sn"; break;
+    case FCVT_sh: mnemonic = "fcvt"; form = "'Sd, 'Hn"; break;
+    case FCVT_dh: mnemonic = "fcvt"; form = "'Dd, 'Hn"; break;
+    case FCVT_hd: mnemonic = "fcvt"; form = "'Hd, 'Dn"; break;
+    default: form = "(FPDataProcessing1Source)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Fd, 'Fn, 'Fm";
+
+  switch (instr->Mask(FPDataProcessing2SourceMask)) {
+    #define FORMAT(A, B)  \
+    case A##_s:           \
+    case A##_d: mnemonic = B; break;
+    FORMAT(FMUL, "fmul");
+    FORMAT(FDIV, "fdiv");
+    FORMAT(FADD, "fadd");
+    FORMAT(FSUB, "fsub");
+    FORMAT(FMAX, "fmax");
+    FORMAT(FMIN, "fmin");
+    FORMAT(FMAXNM, "fmaxnm");
+    FORMAT(FMINNM, "fminnm");
+    FORMAT(FNMUL, "fnmul");
+    #undef FORMAT
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+  switch (instr->Mask(FPDataProcessing3SourceMask)) {
+    #define FORMAT(A, B)  \
+    case A##_s:           \
+    case A##_d: mnemonic = B; break;
+    FORMAT(FMADD, "fmadd");
+    FORMAT(FMSUB, "fmsub");
+    FORMAT(FNMADD, "fnmadd");
+    FORMAT(FNMSUB, "fnmsub");
+    #undef FORMAT
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "(FPImmediate)";
+
+  switch (instr->Mask(FPImmediateMask)) {
+    case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+    case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(FPIntegerConvert)";
+  const char *form_rf = "'Rd, 'Fn";
+  const char *form_fr = "'Fd, 'Rn";
+
+  switch (instr->Mask(FPIntegerConvertMask)) {
+    case FMOV_ws:
+    case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+    case FMOV_sw:
+    case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+    case FMOV_d1_x: mnemonic = "fmov"; form = "'Vd.D[1], 'Rn"; break;
+    case FMOV_x_d1: mnemonic = "fmov"; form = "'Rd, 'Vn.D[1]"; break;
+    case FCVTAS_ws:
+    case FCVTAS_xs:
+    case FCVTAS_wd:
+    case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+    case FCVTAU_ws:
+    case FCVTAU_xs:
+    case FCVTAU_wd:
+    case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+    case FCVTMS_ws:
+    case FCVTMS_xs:
+    case FCVTMS_wd:
+    case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+    case FCVTMU_ws:
+    case FCVTMU_xs:
+    case FCVTMU_wd:
+    case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+    case FCVTNS_ws:
+    case FCVTNS_xs:
+    case FCVTNS_wd:
+    case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+    case FCVTNU_ws:
+    case FCVTNU_xs:
+    case FCVTNU_wd:
+    case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+    case FCVTZU_xd:
+    case FCVTZU_ws:
+    case FCVTZU_wd:
+    case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+    case FCVTZS_xd:
+    case FCVTZS_wd:
+    case FCVTZS_xs:
+    case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+    case FCVTPU_xd:
+    case FCVTPU_ws:
+    case FCVTPU_wd:
+    case FCVTPU_xs: mnemonic = "fcvtpu"; form = form_rf; break;
+    case FCVTPS_xd:
+    case FCVTPS_wd:
+    case FCVTPS_xs:
+    case FCVTPS_ws: mnemonic = "fcvtps"; form = form_rf; break;
+    case SCVTF_sw:
+    case SCVTF_sx:
+    case SCVTF_dw:
+    case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+    case UCVTF_sw:
+    case UCVTF_sx:
+    case UCVTF_dw:
+    case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(const Instruction* instr) {
+  const char *mnemonic = "";
+  const char *form = "'Rd, 'Fn, 'IFPFBits";
+  const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+  switch (instr->Mask(FPFixedPointConvertMask)) {
+    case FCVTZS_ws_fixed:
+    case FCVTZS_xs_fixed:
+    case FCVTZS_wd_fixed:
+    case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+    case FCVTZU_ws_fixed:
+    case FCVTZU_xs_fixed:
+    case FCVTZU_wd_fixed:
+    case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+    case SCVTF_sw_fixed:
+    case SCVTF_sx_fixed:
+    case SCVTF_dw_fixed:
+    case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+    case UCVTF_sw_fixed:
+    case UCVTF_sx_fixed:
+    case UCVTF_dw_fixed:
+    case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+    default: VIXL_UNREACHABLE();
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(const Instruction* instr) {
+  // Some system instructions hijack their Op and Cp fields to represent a
+  // range of immediates instead of indicating a different instruction. This
+  // makes the decoding tricky.
+  const char *mnemonic = "unimplemented";
+  const char *form = "(System)";
+
+  if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
+    switch (instr->Mask(SystemExclusiveMonitorMask)) {
+      case CLREX: {
+        mnemonic = "clrex";
+        form = (instr->CRm() == 0xf) ? NULL : "'IX";
+        break;
+      }
+    }
+  } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+    switch (instr->Mask(SystemSysRegMask)) {
+      case MRS: {
+        mnemonic = "mrs";
+        switch (instr->ImmSystemRegister()) {
+          case NZCV: form = "'Xt, nzcv"; break;
+          case FPCR: form = "'Xt, fpcr"; break;
+          default: form = "'Xt, (unknown)"; break;
+        }
+        break;
+      }
+      case MSR: {
+        mnemonic = "msr";
+        switch (instr->ImmSystemRegister()) {
+          case NZCV: form = "nzcv, 'Xt"; break;
+          case FPCR: form = "fpcr, 'Xt"; break;
+          default: form = "(unknown), 'Xt"; break;
+        }
+        break;
+      }
+    }
+  } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+    switch (instr->ImmHint()) {
+      case NOP: {
+        mnemonic = "nop";
+        form = NULL;
+        break;
+      }
+    }
+  } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+    switch (instr->Mask(MemBarrierMask)) {
+      case DMB: {
+        mnemonic = "dmb";
+        form = "'M";
+        break;
+      }
+      case DSB: {
+        mnemonic = "dsb";
+        form = "'M";
+        break;
+      }
+      case ISB: {
+        mnemonic = "isb";
+        form = NULL;
+        break;
+      }
+    }
+  } else if (instr->Mask(SystemSysFMask) == SystemSysFixed) {
+    switch (instr->SysOp()) {
+      case IVAU:
+        mnemonic = "ic";
+        form = "ivau, 'Xt";
+        break;
+      case CVAC:
+        mnemonic = "dc";
+        form = "cvac, 'Xt";
+        break;
+      case CVAU:
+        mnemonic = "dc";
+        form = "cvau, 'Xt";
+        break;
+      case CIVAC:
+        mnemonic = "dc";
+        form = "civac, 'Xt";
+        break;
+      case ZVA:
+        mnemonic = "dc";
+        form = "zva, 'Xt";
+        break;
+      default:
+        mnemonic = "sys";
+        if (instr->Rt() == 31) {
+          form = "'G1, 'Kn, 'Km, 'G2";
+        } else {
+          form = "'G1, 'Kn, 'Km, 'G2, 'Xt";
+        }
+        break;
+      }
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'IDebug";
+
+  switch (instr->Mask(ExceptionMask)) {
+    case HLT: mnemonic = "hlt"; break;
+    case BRK: mnemonic = "brk"; break;
+    case SVC: mnemonic = "svc"; break;
+    case HVC: mnemonic = "hvc"; break;
+    case SMC: mnemonic = "smc"; break;
+    case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+    case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+    case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+    default: form = "(Exception)";
+  }
+  Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCrypto2RegSHA(const Instruction* instr) {
+  VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitCrypto3RegSHA(const Instruction* instr) {
+  VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitCryptoAES(const Instruction* instr) {
+  VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitNEON2RegMisc(const Instruction* instr) {
+  const char *mnemonic       = "unimplemented";
+  const char *form           = "'Vd.%s, 'Vn.%s";
+  const char *form_cmp_zero  = "'Vd.%s, 'Vn.%s, #0";
+  const char *form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0";
+  NEONFormatDecoder nfd(instr);
+
+  static const NEONFormatMap map_lp_ta = {
+    {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+  };
+
+  static const NEONFormatMap map_cvt_ta = {
+    {22}, {NF_4S, NF_2D}
+  };
+
+  static const NEONFormatMap map_cvt_tb = {
+    {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S}
+  };
+
+  if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+    // These instructions all use a two bit size field, except NOT and RBIT,
+    // which use the field to encode the operation.
+    switch (instr->Mask(NEON2RegMiscMask)) {
+      case NEON_REV64:     mnemonic = "rev64"; break;
+      case NEON_REV32:     mnemonic = "rev32"; break;
+      case NEON_REV16:     mnemonic = "rev16"; break;
+      case NEON_SADDLP:
+        mnemonic = "saddlp";
+        nfd.SetFormatMap(0, &map_lp_ta);
+        break;
+      case NEON_UADDLP:
+        mnemonic = "uaddlp";
+        nfd.SetFormatMap(0, &map_lp_ta);
+        break;
+      case NEON_SUQADD:    mnemonic = "suqadd"; break;
+      case NEON_USQADD:    mnemonic = "usqadd"; break;
+      case NEON_CLS:       mnemonic = "cls"; break;
+      case NEON_CLZ:       mnemonic = "clz"; break;
+      case NEON_CNT:       mnemonic = "cnt"; break;
+      case NEON_SADALP:
+        mnemonic = "sadalp";
+        nfd.SetFormatMap(0, &map_lp_ta);
+        break;
+      case NEON_UADALP:
+        mnemonic = "uadalp";
+        nfd.SetFormatMap(0, &map_lp_ta);
+        break;
+      case NEON_SQABS:     mnemonic = "sqabs"; break;
+      case NEON_SQNEG:     mnemonic = "sqneg"; break;
+      case NEON_CMGT_zero: mnemonic = "cmgt"; form = form_cmp_zero; break;
+      case NEON_CMGE_zero: mnemonic = "cmge"; form = form_cmp_zero; break;
+      case NEON_CMEQ_zero: mnemonic = "cmeq"; form = form_cmp_zero; break;
+      case NEON_CMLE_zero: mnemonic = "cmle"; form = form_cmp_zero; break;
+      case NEON_CMLT_zero: mnemonic = "cmlt"; form = form_cmp_zero; break;
+      case NEON_ABS:       mnemonic = "abs"; break;
+      case NEON_NEG:       mnemonic = "neg"; break;
+      case NEON_RBIT_NOT:
+        switch (instr->FPType()) {
+          case 0: mnemonic = "mvn"; break;
+          case 1: mnemonic = "rbit"; break;
+          default: form = "(NEON2RegMisc)";
+        }
+        nfd.SetFormatMaps(nfd.LogicalFormatMap());
+        break;
+    }
+  } else {
+    // These instructions all use a one bit size field, except XTN, SQXTUN,
+    // SHLL, SQXTN and UQXTN, which use a two bit size field.
+    nfd.SetFormatMaps(nfd.FPFormatMap());
+    switch (instr->Mask(NEON2RegMiscFPMask)) {
+      case NEON_FABS:   mnemonic = "fabs"; break;
+      case NEON_FNEG:   mnemonic = "fneg"; break;
+      case NEON_FCVTN:
+        mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn";
+        nfd.SetFormatMap(0, &map_cvt_tb);
+        nfd.SetFormatMap(1, &map_cvt_ta);
+        break;
+      case NEON_FCVTXN:
+        mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn";
+        nfd.SetFormatMap(0, &map_cvt_tb);
+        nfd.SetFormatMap(1, &map_cvt_ta);
+        break;
+      case NEON_FCVTL:
+        mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl";
+        nfd.SetFormatMap(0, &map_cvt_ta);
+        nfd.SetFormatMap(1, &map_cvt_tb);
+        break;
+      case NEON_FRINTN: mnemonic = "frintn"; break;
+      case NEON_FRINTA: mnemonic = "frinta"; break;
+      case NEON_FRINTP: mnemonic = "frintp"; break;
+      case NEON_FRINTM: mnemonic = "frintm"; break;
+      case NEON_FRINTX: mnemonic = "frintx"; break;
+      case NEON_FRINTZ: mnemonic = "frintz"; break;
+      case NEON_FRINTI: mnemonic = "frinti"; break;
+      case NEON_FCVTNS: mnemonic = "fcvtns"; break;
+      case NEON_FCVTNU: mnemonic = "fcvtnu"; break;
+      case NEON_FCVTPS: mnemonic = "fcvtps"; break;
+      case NEON_FCVTPU: mnemonic = "fcvtpu"; break;
+      case NEON_FCVTMS: mnemonic = "fcvtms"; break;
+      case NEON_FCVTMU: mnemonic = "fcvtmu"; break;
+      case NEON_FCVTZS: mnemonic = "fcvtzs"; break;
+      case NEON_FCVTZU: mnemonic = "fcvtzu"; break;
+      case NEON_FCVTAS: mnemonic = "fcvtas"; break;
+      case NEON_FCVTAU: mnemonic = "fcvtau"; break;
+      case NEON_FSQRT:  mnemonic = "fsqrt"; break;
+      case NEON_SCVTF:  mnemonic = "scvtf"; break;
+      case NEON_UCVTF:  mnemonic = "ucvtf"; break;
+      case NEON_URSQRTE: mnemonic = "ursqrte"; break;
+      case NEON_URECPE:  mnemonic = "urecpe";  break;
+      case NEON_FRSQRTE: mnemonic = "frsqrte"; break;
+      case NEON_FRECPE:  mnemonic = "frecpe";  break;
+      case NEON_FCMGT_zero: mnemonic = "fcmgt"; form = form_fcmp_zero; break;
+      case NEON_FCMGE_zero: mnemonic = "fcmge"; form = form_fcmp_zero; break;
+      case NEON_FCMEQ_zero: mnemonic = "fcmeq"; form = form_fcmp_zero; break;
+      case NEON_FCMLE_zero: mnemonic = "fcmle"; form = form_fcmp_zero; break;
+      case NEON_FCMLT_zero: mnemonic = "fcmlt"; form = form_fcmp_zero; break;
+      default:
+        if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+            (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+          nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+          nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+
+          switch (instr->Mask(NEON2RegMiscMask)) {
+            case NEON_XTN:    mnemonic = "xtn"; break;
+            case NEON_SQXTN:  mnemonic = "sqxtn"; break;
+            case NEON_UQXTN:  mnemonic = "uqxtn"; break;
+            case NEON_SQXTUN: mnemonic = "sqxtun"; break;
+            case NEON_SHLL:
+              mnemonic = "shll";
+              nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+              nfd.SetFormatMap(1, nfd.IntegerFormatMap());
+              switch (instr->NEONSize()) {
+                case 0: form = "'Vd.%s, 'Vn.%s, #8"; break;
+                case 1: form = "'Vd.%s, 'Vn.%s, #16"; break;
+                case 2: form = "'Vd.%s, 'Vn.%s, #32"; break;
+                default: form = "(NEON2RegMisc)";
+              }
+          }
+          Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+          return;
+        } else {
+          form = "(NEON2RegMisc)";
+        }
+    }
+  }
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEON3Same(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+  NEONFormatDecoder nfd(instr);
+
+  if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+    switch (instr->Mask(NEON3SameLogicalMask)) {
+      case NEON_AND: mnemonic = "and"; break;
+      case NEON_ORR:
+        mnemonic = "orr";
+        if (instr->Rm() == instr->Rn()) {
+          mnemonic = "mov";
+          form = "'Vd.%s, 'Vn.%s";
+        }
+        break;
+      case NEON_ORN: mnemonic = "orn"; break;
+      case NEON_EOR: mnemonic = "eor"; break;
+      case NEON_BIC: mnemonic = "bic"; break;
+      case NEON_BIF: mnemonic = "bif"; break;
+      case NEON_BIT: mnemonic = "bit"; break;
+      case NEON_BSL: mnemonic = "bsl"; break;
+      default: form = "(NEON3Same)";
+    }
+    nfd.SetFormatMaps(nfd.LogicalFormatMap());
+  } else {
+    static const char *mnemonics[] = {
+        "shadd", "uhadd", "shadd", "uhadd",
+        "sqadd", "uqadd", "sqadd", "uqadd",
+        "srhadd", "urhadd", "srhadd", "urhadd",
+        NULL, NULL, NULL, NULL,  // Handled by logical cases above.
+        "shsub", "uhsub", "shsub", "uhsub",
+        "sqsub", "uqsub", "sqsub", "uqsub",
+        "cmgt", "cmhi", "cmgt", "cmhi",
+        "cmge", "cmhs", "cmge", "cmhs",
+        "sshl", "ushl", "sshl", "ushl",
+        "sqshl", "uqshl", "sqshl", "uqshl",
+        "srshl", "urshl", "srshl", "urshl",
+        "sqrshl", "uqrshl", "sqrshl", "uqrshl",
+        "smax", "umax", "smax", "umax",
+        "smin", "umin", "smin", "umin",
+        "sabd", "uabd", "sabd", "uabd",
+        "saba", "uaba", "saba", "uaba",
+        "add", "sub", "add", "sub",
+        "cmtst", "cmeq", "cmtst", "cmeq",
+        "mla", "mls", "mla", "mls",
+        "mul", "pmul", "mul", "pmul",
+        "smaxp", "umaxp", "smaxp", "umaxp",
+        "sminp", "uminp", "sminp", "uminp",
+        "sqdmulh", "sqrdmulh", "sqdmulh", "sqrdmulh",
+        "addp", "unallocated", "addp", "unallocated",
+        "fmaxnm", "fmaxnmp", "fminnm", "fminnmp",
+        "fmla", "unallocated", "fmls", "unallocated",
+        "fadd", "faddp", "fsub", "fabd",
+        "fmulx", "fmul", "unallocated", "unallocated",
+        "fcmeq", "fcmge", "unallocated", "fcmgt",
+        "unallocated", "facge", "unallocated", "facgt",
+        "fmax", "fmaxp", "fmin", "fminp",
+        "frecps", "fdiv", "frsqrts", "unallocated"};
+
+    // Operation is determined by the opcode bits (15-11), the top bit of
+    // size (23) and the U bit (29).
+    unsigned index = (instr->Bits(15, 11) << 2) | (instr->Bit(23) << 1) |
+                     instr->Bit(29);
+    VIXL_ASSERT(index < (sizeof(mnemonics) / sizeof(mnemonics[0])));
+    mnemonic = mnemonics[index];
+    // Assert that index is not one of the previously handled logical
+    // instructions.
+    VIXL_ASSERT(mnemonic != NULL);
+
+    if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+      nfd.SetFormatMaps(nfd.FPFormatMap());
+    }
+  }
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEON3Different(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+
+  NEONFormatDecoder nfd(instr);
+  nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+
+  // Ignore the Q bit. Appending a "2" suffix is handled later.
+  switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) {
+    case NEON_PMULL:    mnemonic = "pmull";   break;
+    case NEON_SABAL:    mnemonic = "sabal";   break;
+    case NEON_SABDL:    mnemonic = "sabdl";   break;
+    case NEON_SADDL:    mnemonic = "saddl";   break;
+    case NEON_SMLAL:    mnemonic = "smlal";   break;
+    case NEON_SMLSL:    mnemonic = "smlsl";   break;
+    case NEON_SMULL:    mnemonic = "smull";   break;
+    case NEON_SSUBL:    mnemonic = "ssubl";   break;
+    case NEON_SQDMLAL:  mnemonic = "sqdmlal"; break;
+    case NEON_SQDMLSL:  mnemonic = "sqdmlsl"; break;
+    case NEON_SQDMULL:  mnemonic = "sqdmull"; break;
+    case NEON_UABAL:    mnemonic = "uabal";   break;
+    case NEON_UABDL:    mnemonic = "uabdl";   break;
+    case NEON_UADDL:    mnemonic = "uaddl";   break;
+    case NEON_UMLAL:    mnemonic = "umlal";   break;
+    case NEON_UMLSL:    mnemonic = "umlsl";   break;
+    case NEON_UMULL:    mnemonic = "umull";   break;
+    case NEON_USUBL:    mnemonic = "usubl";   break;
+    case NEON_SADDW:
+      mnemonic = "saddw";
+      nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+      break;
+    case NEON_SSUBW:
+      mnemonic = "ssubw";
+      nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+      break;
+    case NEON_UADDW:
+      mnemonic = "uaddw";
+      nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+      break;
+    case NEON_USUBW:
+      mnemonic = "usubw";
+      nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+      break;
+    case NEON_ADDHN:
+      mnemonic = "addhn";
+      nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+      nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+      break;
+    case NEON_RADDHN:
+      mnemonic = "raddhn";
+      nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+      nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+      break;
+    case NEON_RSUBHN:
+      mnemonic = "rsubhn";
+      nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+      nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+      break;
+    case NEON_SUBHN:
+      mnemonic = "subhn";
+      nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+      nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+      break;
+    default: form = "(NEON3Different)";
+  }
+  Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONAcrossLanes(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "%sd, 'Vn.%s";
+
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap(),
+                               NEONFormatDecoder::IntegerFormatMap());
+
+  if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+    nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+    nfd.SetFormatMap(1, nfd.FPFormatMap());
+    switch (instr->Mask(NEONAcrossLanesFPMask)) {
+      case NEON_FMAXV: mnemonic = "fmaxv"; break;
+      case NEON_FMINV: mnemonic = "fminv"; break;
+      case NEON_FMAXNMV: mnemonic = "fmaxnmv"; break;
+      case NEON_FMINNMV: mnemonic = "fminnmv"; break;
+      default: form = "(NEONAcrossLanes)"; break;
+    }
+  } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) {
+    switch (instr->Mask(NEONAcrossLanesMask)) {
+      case NEON_ADDV:  mnemonic = "addv"; break;
+      case NEON_SMAXV: mnemonic = "smaxv"; break;
+      case NEON_SMINV: mnemonic = "sminv"; break;
+      case NEON_UMAXV: mnemonic = "umaxv"; break;
+      case NEON_UMINV: mnemonic = "uminv"; break;
+      case NEON_SADDLV:
+        mnemonic = "saddlv";
+        nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+        break;
+      case NEON_UADDLV:
+        mnemonic = "uaddlv";
+        nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+        break;
+      default: form = "(NEONAcrossLanes)"; break;
+    }
+  }
+  Format(instr, mnemonic, nfd.Substitute(form,
+      NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat));
+}
+
+
+void Disassembler::VisitNEONByIndexedElement(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  bool l_instr = false;
+  bool fp_instr = false;
+
+  const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]";
+
+  static const NEONFormatMap map_ta = {
+    {23, 22}, {NF_UNDEF, NF_4S, NF_2D}
+  };
+  NEONFormatDecoder nfd(instr, &map_ta,
+                        NEONFormatDecoder::IntegerFormatMap(),
+                        NEONFormatDecoder::ScalarFormatMap());
+
+  switch (instr->Mask(NEONByIndexedElementMask)) {
+    case NEON_SMULL_byelement:    mnemonic = "smull"; l_instr = true; break;
+    case NEON_UMULL_byelement:    mnemonic = "umull"; l_instr = true; break;
+    case NEON_SMLAL_byelement:    mnemonic = "smlal"; l_instr = true; break;
+    case NEON_UMLAL_byelement:    mnemonic = "umlal"; l_instr = true; break;
+    case NEON_SMLSL_byelement:    mnemonic = "smlsl"; l_instr = true; break;
+    case NEON_UMLSL_byelement:    mnemonic = "umlsl"; l_instr = true; break;
+    case NEON_SQDMULL_byelement:  mnemonic = "sqdmull"; l_instr = true; break;
+    case NEON_SQDMLAL_byelement:  mnemonic = "sqdmlal"; l_instr = true; break;
+    case NEON_SQDMLSL_byelement:  mnemonic = "sqdmlsl"; l_instr = true; break;
+    case NEON_MUL_byelement:      mnemonic = "mul"; break;
+    case NEON_MLA_byelement:      mnemonic = "mla"; break;
+    case NEON_MLS_byelement:      mnemonic = "mls"; break;
+    case NEON_SQDMULH_byelement:  mnemonic = "sqdmulh";  break;
+    case NEON_SQRDMULH_byelement: mnemonic = "sqrdmulh"; break;
+    default:
+      switch (instr->Mask(NEONByIndexedElementFPMask)) {
+        case NEON_FMUL_byelement:  mnemonic = "fmul";  fp_instr = true; break;
+        case NEON_FMLA_byelement:  mnemonic = "fmla";  fp_instr = true; break;
+        case NEON_FMLS_byelement:  mnemonic = "fmls";  fp_instr = true; break;
+        case NEON_FMULX_byelement: mnemonic = "fmulx"; fp_instr = true; break;
+      }
+  }
+
+  if (l_instr) {
+    Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+  } else if (fp_instr) {
+    nfd.SetFormatMap(0, nfd.FPFormatMap());
+    Format(instr, mnemonic, nfd.Substitute(form));
+  } else {
+    nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+    Format(instr, mnemonic, nfd.Substitute(form));
+  }
+}
+
+
+void Disassembler::VisitNEONCopy(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(NEONCopy)";
+
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap(),
+                               NEONFormatDecoder::TriangularScalarFormatMap());
+
+  if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+    mnemonic = "mov";
+    nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+    form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]";
+  } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+    mnemonic = "mov";
+    nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+    if (nfd.GetVectorFormat() == kFormatD) {
+      form = "'Vd.%s['IVInsIndex1], 'Xn";
+    } else {
+      form = "'Vd.%s['IVInsIndex1], 'Wn";
+    }
+  } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+    if (instr->Mask(NEON_Q) || ((instr->ImmNEON5() & 7) == 4)) {
+      mnemonic = "mov";
+    } else {
+      mnemonic = "umov";
+    }
+    nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+    if (nfd.GetVectorFormat() == kFormatD) {
+      form = "'Xd, 'Vn.%s['IVInsIndex1]";
+    } else {
+      form = "'Wd, 'Vn.%s['IVInsIndex1]";
+    }
+  } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) {
+    mnemonic = "smov";
+    nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+    form = "'Rdq, 'Vn.%s['IVInsIndex1]";
+  } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+    mnemonic = "dup";
+    form = "'Vd.%s, 'Vn.%s['IVInsIndex1]";
+  } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+    mnemonic = "dup";
+    if (nfd.GetVectorFormat() == kFormat2D) {
+      form = "'Vd.%s, 'Xn";
+    } else {
+      form = "'Vd.%s, 'Wn";
+    }
+  }
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONExtract(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(NEONExtract)";
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+  if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+    mnemonic = "ext";
+    form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract";
+  }
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(NEONLoadStoreMultiStruct)";
+  const char *form_1v = "{'Vt.%1$s}, ['Xns]";
+  const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]";
+  const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]";
+  const char *form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+  switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
+    case NEON_LD1_1v: mnemonic = "ld1"; form = form_1v; break;
+    case NEON_LD1_2v: mnemonic = "ld1"; form = form_2v; break;
+    case NEON_LD1_3v: mnemonic = "ld1"; form = form_3v; break;
+    case NEON_LD1_4v: mnemonic = "ld1"; form = form_4v; break;
+    case NEON_LD2: mnemonic = "ld2"; form = form_2v; break;
+    case NEON_LD3: mnemonic = "ld3"; form = form_3v; break;
+    case NEON_LD4: mnemonic = "ld4"; form = form_4v; break;
+    case NEON_ST1_1v: mnemonic = "st1"; form = form_1v; break;
+    case NEON_ST1_2v: mnemonic = "st1"; form = form_2v; break;
+    case NEON_ST1_3v: mnemonic = "st1"; form = form_3v; break;
+    case NEON_ST1_4v: mnemonic = "st1"; form = form_4v; break;
+    case NEON_ST2: mnemonic = "st2"; form = form_2v; break;
+    case NEON_ST3: mnemonic = "st3"; form = form_3v; break;
+    case NEON_ST4: mnemonic = "st4"; form = form_4v; break;
+    default: break;
+  }
+
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreMultiStructPostIndex(
+    const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(NEONLoadStoreMultiStructPostIndex)";
+  const char *form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1";
+  const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2";
+  const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3";
+  const char *form_4v =
+      "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4";
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+  switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+    case NEON_LD1_1v_post: mnemonic = "ld1"; form = form_1v; break;
+    case NEON_LD1_2v_post: mnemonic = "ld1"; form = form_2v; break;
+    case NEON_LD1_3v_post: mnemonic = "ld1"; form = form_3v; break;
+    case NEON_LD1_4v_post: mnemonic = "ld1"; form = form_4v; break;
+    case NEON_LD2_post: mnemonic = "ld2"; form = form_2v; break;
+    case NEON_LD3_post: mnemonic = "ld3"; form = form_3v; break;
+    case NEON_LD4_post: mnemonic = "ld4"; form = form_4v; break;
+    case NEON_ST1_1v_post: mnemonic = "st1"; form = form_1v; break;
+    case NEON_ST1_2v_post: mnemonic = "st1"; form = form_2v; break;
+    case NEON_ST1_3v_post: mnemonic = "st1"; form = form_3v; break;
+    case NEON_ST1_4v_post: mnemonic = "st1"; form = form_4v; break;
+    case NEON_ST2_post: mnemonic = "st2"; form = form_2v; break;
+    case NEON_ST3_post: mnemonic = "st3"; form = form_3v; break;
+    case NEON_ST4_post: mnemonic = "st4"; form = form_4v; break;
+    default: break;
+  }
+
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(NEONLoadStoreSingleStruct)";
+
+  const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns]";
+  const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns]";
+  const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns]";
+  const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns]";
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+  switch (instr->Mask(NEONLoadStoreSingleStructMask)) {
+    case NEON_LD1_b: mnemonic = "ld1"; form = form_1b; break;
+    case NEON_LD1_h: mnemonic = "ld1"; form = form_1h; break;
+    case NEON_LD1_s:
+      mnemonic = "ld1";
+      VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+      form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+      break;
+    case NEON_ST1_b: mnemonic = "st1"; form = form_1b; break;
+    case NEON_ST1_h: mnemonic = "st1"; form = form_1h; break;
+    case NEON_ST1_s:
+      mnemonic = "st1";
+      VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+      form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+      break;
+    case NEON_LD1R:
+      mnemonic = "ld1r";
+      form = "{'Vt.%s}, ['Xns]";
+      break;
+    case NEON_LD2_b:
+    case NEON_ST2_b:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+      form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]";
+      break;
+    case NEON_LD2_h:
+    case NEON_ST2_h:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+      form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]";
+      break;
+    case NEON_LD2_s:
+    case NEON_ST2_s:
+      VIXL_STATIC_ASSERT((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d);
+      VIXL_STATIC_ASSERT((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d);
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+      if ((instr->NEONLSSize() & 1) == 0)
+        form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]";
+      else
+        form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]";
+      break;
+    case NEON_LD2R:
+      mnemonic = "ld2r";
+      form = "{'Vt.%s, 'Vt2.%s}, ['Xns]";
+      break;
+    case NEON_LD3_b:
+    case NEON_ST3_b:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+      form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]";
+      break;
+    case NEON_LD3_h:
+    case NEON_ST3_h:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+      form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]";
+      break;
+    case NEON_LD3_s:
+    case NEON_ST3_s:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+      if ((instr->NEONLSSize() & 1) == 0)
+        form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]";
+      else
+        form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]";
+      break;
+    case NEON_LD3R:
+      mnemonic = "ld3r";
+      form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]";
+      break;
+    case NEON_LD4_b:
+     case NEON_ST4_b:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+      form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]";
+      break;
+    case NEON_LD4_h:
+    case NEON_ST4_h:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+      form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]";
+      break;
+    case NEON_LD4_s:
+    case NEON_ST4_s:
+      VIXL_STATIC_ASSERT((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d);
+      VIXL_STATIC_ASSERT((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d);
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+      if ((instr->NEONLSSize() & 1) == 0)
+        form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]";
+      else
+        form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]";
+      break;
+    case NEON_LD4R:
+      mnemonic = "ld4r";
+      form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+      break;
+    default: break;
+  }
+
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreSingleStructPostIndex(
+    const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(NEONLoadStoreSingleStructPostIndex)";
+
+  const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1";
+  const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2";
+  const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4";
+  const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8";
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+  switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+    case NEON_LD1_b_post: mnemonic = "ld1"; form = form_1b; break;
+    case NEON_LD1_h_post: mnemonic = "ld1"; form = form_1h; break;
+    case NEON_LD1_s_post:
+      mnemonic = "ld1";
+      VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+      form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+      break;
+    case NEON_ST1_b_post: mnemonic = "st1"; form = form_1b; break;
+    case NEON_ST1_h_post: mnemonic = "st1"; form = form_1h; break;
+    case NEON_ST1_s_post:
+      mnemonic = "st1";
+      VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+      form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+      break;
+    case NEON_LD1R_post:
+      mnemonic = "ld1r";
+      form = "{'Vt.%s}, ['Xns], 'Xmz1";
+      break;
+    case NEON_LD2_b_post:
+    case NEON_ST2_b_post:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+      form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2";
+      break;
+    case NEON_ST2_h_post:
+    case NEON_LD2_h_post:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+      form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4";
+      break;
+    case NEON_LD2_s_post:
+    case NEON_ST2_s_post:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+      if ((instr->NEONLSSize() & 1) == 0)
+        form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8";
+      else
+        form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16";
+      break;
+    case NEON_LD2R_post:
+      mnemonic = "ld2r";
+      form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2";
+      break;
+    case NEON_LD3_b_post:
+    case NEON_ST3_b_post:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+      form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3";
+      break;
+    case NEON_LD3_h_post:
+    case NEON_ST3_h_post:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+      form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6";
+      break;
+    case NEON_LD3_s_post:
+    case NEON_ST3_s_post:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+      if ((instr->NEONLSSize() & 1) == 0)
+        form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12";
+      else
+        form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmr3";
+      break;
+    case NEON_LD3R_post:
+      mnemonic = "ld3r";
+      form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3";
+      break;
+    case NEON_LD4_b_post:
+    case NEON_ST4_b_post:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+      form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4";
+      break;
+    case NEON_LD4_h_post:
+    case NEON_ST4_h_post:
+      mnemonic = (instr->LdStXLoad()) == 1 ? "ld4" : "st4";
+      form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8";
+      break;
+    case NEON_LD4_s_post:
+    case NEON_ST4_s_post:
+      mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+      if ((instr->NEONLSSize() & 1) == 0)
+        form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16";
+      else
+        form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32";
+      break;
+    case NEON_LD4R_post:
+      mnemonic = "ld4r";
+      form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4";
+      break;
+    default: break;
+  }
+
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONModifiedImmediate(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1";
+
+  int cmode   = instr->NEONCmode();
+  int cmode_3 = (cmode >> 3) & 1;
+  int cmode_2 = (cmode >> 2) & 1;
+  int cmode_1 = (cmode >> 1) & 1;
+  int cmode_0 = cmode & 1;
+  int q = instr->NEONQ();
+  int op = instr->NEONModImmOp();
+
+  static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} };
+  static const NEONFormatMap map_h = { {30}, {NF_4H, NF_8H} };
+  static const NEONFormatMap map_s = { {30}, {NF_2S, NF_4S} };
+  NEONFormatDecoder nfd(instr, &map_b);
+
+  if (cmode_3 == 0) {
+    if (cmode_0 == 0) {
+      mnemonic = (op == 1) ? "mvni" : "movi";
+    } else {  // cmode<0> == '1'.
+      mnemonic = (op == 1) ? "bic" : "orr";
+    }
+    nfd.SetFormatMap(0, &map_s);
+  } else {  // cmode<3> == '1'.
+    if (cmode_2 == 0) {
+      if (cmode_0 == 0) {
+        mnemonic = (op == 1) ? "mvni" : "movi";
+      } else {  // cmode<0> == '1'.
+        mnemonic = (op == 1) ? "bic" : "orr";
+      }
+      nfd.SetFormatMap(0, &map_h);
+    } else {  // cmode<2> == '1'.
+      if (cmode_1 == 0) {
+        mnemonic = (op == 1) ? "mvni" : "movi";
+        form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2";
+        nfd.SetFormatMap(0, &map_s);
+      } else {   // cmode<1> == '1'.
+        if (cmode_0 == 0) {
+          mnemonic = "movi";
+          if (op == 0) {
+            form = "'Vt.%s, 'IVMIImm8";
+          } else {
+            form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm";
+          }
+        } else {  // cmode<0> == '1'
+          mnemonic = "fmov";
+          if (op == 0) {
+            form  = "'Vt.%s, 'IVMIImmFPSingle";
+            nfd.SetFormatMap(0, &map_s);
+          } else {
+            if (q == 1) {
+              form = "'Vt.2d, 'IVMIImmFPDouble";
+            }
+          }
+        }
+      }
+    }
+  }
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONScalar2RegMisc(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form     = "%sd, %sn";
+  const char *form_0   = "%sd, %sn, #0";
+  const char *form_fp0 = "%sd, %sn, #0.0";
+
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+  if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+    // These instructions all use a two bit size field, except NOT and RBIT,
+    // which use the field to encode the operation.
+    switch (instr->Mask(NEONScalar2RegMiscMask)) {
+      case NEON_CMGT_zero_scalar: mnemonic = "cmgt"; form = form_0; break;
+      case NEON_CMGE_zero_scalar: mnemonic = "cmge"; form = form_0; break;
+      case NEON_CMLE_zero_scalar: mnemonic = "cmle"; form = form_0; break;
+      case NEON_CMLT_zero_scalar: mnemonic = "cmlt"; form = form_0; break;
+      case NEON_CMEQ_zero_scalar: mnemonic = "cmeq"; form = form_0; break;
+      case NEON_NEG_scalar:       mnemonic = "neg";   break;
+      case NEON_SQNEG_scalar:     mnemonic = "sqneg"; break;
+      case NEON_ABS_scalar:       mnemonic = "abs";   break;
+      case NEON_SQABS_scalar:     mnemonic = "sqabs"; break;
+      case NEON_SUQADD_scalar:    mnemonic = "suqadd"; break;
+      case NEON_USQADD_scalar:    mnemonic = "usqadd"; break;
+      default: form = "(NEONScalar2RegMisc)";
+    }
+  } else {
+    // These instructions all use a one bit size field, except SQXTUN, SQXTN
+    // and UQXTN, which use a two bit size field.
+    nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+    switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+      case NEON_FRSQRTE_scalar:    mnemonic = "frsqrte"; break;
+      case NEON_FRECPE_scalar:     mnemonic = "frecpe";  break;
+      case NEON_SCVTF_scalar:      mnemonic = "scvtf"; break;
+      case NEON_UCVTF_scalar:      mnemonic = "ucvtf"; break;
+      case NEON_FCMGT_zero_scalar: mnemonic = "fcmgt"; form = form_fp0; break;
+      case NEON_FCMGE_zero_scalar: mnemonic = "fcmge"; form = form_fp0; break;
+      case NEON_FCMLE_zero_scalar: mnemonic = "fcmle"; form = form_fp0; break;
+      case NEON_FCMLT_zero_scalar: mnemonic = "fcmlt"; form = form_fp0; break;
+      case NEON_FCMEQ_zero_scalar: mnemonic = "fcmeq"; form = form_fp0; break;
+      case NEON_FRECPX_scalar:     mnemonic = "frecpx"; break;
+      case NEON_FCVTNS_scalar:     mnemonic = "fcvtns"; break;
+      case NEON_FCVTNU_scalar:     mnemonic = "fcvtnu"; break;
+      case NEON_FCVTPS_scalar:     mnemonic = "fcvtps"; break;
+      case NEON_FCVTPU_scalar:     mnemonic = "fcvtpu"; break;
+      case NEON_FCVTMS_scalar:     mnemonic = "fcvtms"; break;
+      case NEON_FCVTMU_scalar:     mnemonic = "fcvtmu"; break;
+      case NEON_FCVTZS_scalar:     mnemonic = "fcvtzs"; break;
+      case NEON_FCVTZU_scalar:     mnemonic = "fcvtzu"; break;
+      case NEON_FCVTAS_scalar:     mnemonic = "fcvtas"; break;
+      case NEON_FCVTAU_scalar:     mnemonic = "fcvtau"; break;
+      case NEON_FCVTXN_scalar:
+        nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+        mnemonic = "fcvtxn";
+        break;
+      default:
+        nfd.SetFormatMap(0, nfd.ScalarFormatMap());
+        nfd.SetFormatMap(1, nfd.LongScalarFormatMap());
+        switch (instr->Mask(NEONScalar2RegMiscMask)) {
+          case NEON_SQXTN_scalar:  mnemonic = "sqxtn"; break;
+          case NEON_UQXTN_scalar:  mnemonic = "uqxtn"; break;
+          case NEON_SQXTUN_scalar: mnemonic = "sqxtun"; break;
+          default: form = "(NEONScalar2RegMisc)";
+        }
+    }
+  }
+  Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalar3Diff(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "%sd, %sn, %sm";
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap(),
+                               NEONFormatDecoder::ScalarFormatMap());
+
+  switch (instr->Mask(NEONScalar3DiffMask)) {
+    case NEON_SQDMLAL_scalar  : mnemonic = "sqdmlal"; break;
+    case NEON_SQDMLSL_scalar  : mnemonic = "sqdmlsl"; break;
+    case NEON_SQDMULL_scalar  : mnemonic = "sqdmull"; break;
+    default: form = "(NEONScalar3Diff)";
+  }
+  Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalar3Same(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "%sd, %sn, %sm";
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+  if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+    nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+    switch (instr->Mask(NEONScalar3SameFPMask)) {
+      case NEON_FACGE_scalar:   mnemonic = "facge"; break;
+      case NEON_FACGT_scalar:   mnemonic = "facgt"; break;
+      case NEON_FCMEQ_scalar:   mnemonic = "fcmeq"; break;
+      case NEON_FCMGE_scalar:   mnemonic = "fcmge"; break;
+      case NEON_FCMGT_scalar:   mnemonic = "fcmgt"; break;
+      case NEON_FMULX_scalar:   mnemonic = "fmulx"; break;
+      case NEON_FRECPS_scalar:  mnemonic = "frecps"; break;
+      case NEON_FRSQRTS_scalar: mnemonic = "frsqrts"; break;
+      case NEON_FABD_scalar:    mnemonic = "fabd"; break;
+      default: form = "(NEONScalar3Same)";
+    }
+  } else {
+    switch (instr->Mask(NEONScalar3SameMask)) {
+      case NEON_ADD_scalar:    mnemonic = "add";    break;
+      case NEON_SUB_scalar:    mnemonic = "sub";    break;
+      case NEON_CMEQ_scalar:   mnemonic = "cmeq";   break;
+      case NEON_CMGE_scalar:   mnemonic = "cmge";   break;
+      case NEON_CMGT_scalar:   mnemonic = "cmgt";   break;
+      case NEON_CMHI_scalar:   mnemonic = "cmhi";   break;
+      case NEON_CMHS_scalar:   mnemonic = "cmhs";   break;
+      case NEON_CMTST_scalar:  mnemonic = "cmtst";  break;
+      case NEON_UQADD_scalar:  mnemonic = "uqadd";  break;
+      case NEON_SQADD_scalar:  mnemonic = "sqadd";  break;
+      case NEON_UQSUB_scalar:  mnemonic = "uqsub";  break;
+      case NEON_SQSUB_scalar:  mnemonic = "sqsub";  break;
+      case NEON_USHL_scalar:   mnemonic = "ushl";   break;
+      case NEON_SSHL_scalar:   mnemonic = "sshl";   break;
+      case NEON_UQSHL_scalar:  mnemonic = "uqshl";  break;
+      case NEON_SQSHL_scalar:  mnemonic = "sqshl";  break;
+      case NEON_URSHL_scalar:  mnemonic = "urshl";  break;
+      case NEON_SRSHL_scalar:  mnemonic = "srshl";  break;
+      case NEON_UQRSHL_scalar: mnemonic = "uqrshl"; break;
+      case NEON_SQRSHL_scalar: mnemonic = "sqrshl"; break;
+      case NEON_SQDMULH_scalar:  mnemonic = "sqdmulh";  break;
+      case NEON_SQRDMULH_scalar: mnemonic = "sqrdmulh"; break;
+      default: form = "(NEONScalar3Same)";
+    }
+  }
+  Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "%sd, %sn, 'Ve.%s['IVByElemIndex]";
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+  bool long_instr = false;
+
+  switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+    case NEON_SQDMULL_byelement_scalar:
+      mnemonic = "sqdmull";
+      long_instr = true;
+      break;
+    case NEON_SQDMLAL_byelement_scalar:
+      mnemonic = "sqdmlal";
+      long_instr = true;
+      break;
+    case NEON_SQDMLSL_byelement_scalar:
+      mnemonic = "sqdmlsl";
+      long_instr = true;
+      break;
+    case NEON_SQDMULH_byelement_scalar:
+      mnemonic = "sqdmulh";
+      break;
+    case NEON_SQRDMULH_byelement_scalar:
+      mnemonic = "sqrdmulh";
+      break;
+    default:
+      nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+      switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+        case NEON_FMUL_byelement_scalar: mnemonic = "fmul"; break;
+        case NEON_FMLA_byelement_scalar: mnemonic = "fmla"; break;
+        case NEON_FMLS_byelement_scalar: mnemonic = "fmls"; break;
+        case NEON_FMULX_byelement_scalar: mnemonic = "fmulx"; break;
+        default: form = "(NEONScalarByIndexedElement)";
+      }
+  }
+
+  if (long_instr) {
+    nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+  }
+
+  Format(instr, mnemonic, nfd.Substitute(
+    form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarCopy(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(NEONScalarCopy)";
+
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+
+  if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+    mnemonic = "mov";
+    form = "%sd, 'Vn.%s['IVInsIndex1]";
+  }
+
+  Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarPairwise(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "%sd, 'Vn.%s";
+  NEONFormatMap map = { {22}, {NF_2S, NF_2D} };
+  NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap(), &map);
+
+  switch (instr->Mask(NEONScalarPairwiseMask)) {
+    case NEON_ADDP_scalar:    mnemonic = "addp"; break;
+    case NEON_FADDP_scalar:   mnemonic = "faddp"; break;
+    case NEON_FMAXP_scalar:   mnemonic = "fmaxp"; break;
+    case NEON_FMAXNMP_scalar: mnemonic = "fmaxnmp"; break;
+    case NEON_FMINP_scalar:   mnemonic = "fminp"; break;
+    case NEON_FMINNMP_scalar: mnemonic = "fminnmp"; break;
+    default: form = "(NEONScalarPairwise)";
+  }
+  Format(instr, mnemonic, nfd.Substitute(form,
+      NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form   = "%sd, %sn, 'Is1";
+  const char *form_2 = "%sd, %sn, 'Is2";
+
+  static const NEONFormatMap map_shift = {
+    {22, 21, 20, 19},
+    {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S,
+     NF_D,     NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D}
+  };
+  static const NEONFormatMap map_shift_narrow = {
+    {21, 20, 19},
+    {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D}
+  };
+  NEONFormatDecoder nfd(instr, &map_shift);
+
+  if (instr->ImmNEONImmh()) {  // immh has to be non-zero.
+    switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+      case NEON_FCVTZU_imm_scalar: mnemonic = "fcvtzu";    break;
+      case NEON_FCVTZS_imm_scalar: mnemonic = "fcvtzs";   break;
+      case NEON_SCVTF_imm_scalar: mnemonic = "scvtf";    break;
+      case NEON_UCVTF_imm_scalar: mnemonic = "ucvtf";   break;
+      case NEON_SRI_scalar:       mnemonic = "sri";    break;
+      case NEON_SSHR_scalar:      mnemonic = "sshr";   break;
+      case NEON_USHR_scalar:      mnemonic = "ushr";   break;
+      case NEON_SRSHR_scalar:     mnemonic = "srshr";  break;
+      case NEON_URSHR_scalar:     mnemonic = "urshr";  break;
+      case NEON_SSRA_scalar:      mnemonic = "ssra";   break;
+      case NEON_USRA_scalar:      mnemonic = "usra";   break;
+      case NEON_SRSRA_scalar:     mnemonic = "srsra";  break;
+      case NEON_URSRA_scalar:     mnemonic = "ursra";  break;
+      case NEON_SHL_scalar:       mnemonic = "shl";    form = form_2; break;
+      case NEON_SLI_scalar:       mnemonic = "sli";    form = form_2; break;
+      case NEON_SQSHLU_scalar:    mnemonic = "sqshlu"; form = form_2; break;
+      case NEON_SQSHL_imm_scalar: mnemonic = "sqshl";  form = form_2; break;
+      case NEON_UQSHL_imm_scalar: mnemonic = "uqshl";  form = form_2; break;
+      case NEON_UQSHRN_scalar:
+        mnemonic = "uqshrn";
+        nfd.SetFormatMap(1, &map_shift_narrow);
+        break;
+      case NEON_UQRSHRN_scalar:
+        mnemonic = "uqrshrn";
+        nfd.SetFormatMap(1, &map_shift_narrow);
+        break;
+      case NEON_SQSHRN_scalar:
+        mnemonic = "sqshrn";
+        nfd.SetFormatMap(1, &map_shift_narrow);
+        break;
+      case NEON_SQRSHRN_scalar:
+        mnemonic = "sqrshrn";
+        nfd.SetFormatMap(1, &map_shift_narrow);
+        break;
+      case NEON_SQSHRUN_scalar:
+        mnemonic = "sqshrun";
+        nfd.SetFormatMap(1, &map_shift_narrow);
+        break;
+      case NEON_SQRSHRUN_scalar:
+        mnemonic = "sqrshrun";
+        nfd.SetFormatMap(1, &map_shift_narrow);
+        break;
+      default:
+        form = "(NEONScalarShiftImmediate)";
+    }
+  } else {
+    form = "(NEONScalarShiftImmediate)";
+  }
+  Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONShiftImmediate(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form         = "'Vd.%s, 'Vn.%s, 'Is1";
+  const char *form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2";
+  const char *form_xtl     = "'Vd.%s, 'Vn.%s";
+
+  // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+  static const NEONFormatMap map_shift_ta = {
+    {22, 21, 20, 19},
+    {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}
+  };
+
+  // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+  // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+  static const NEONFormatMap map_shift_tb = {
+    {22, 21, 20, 19, 30},
+    {NF_UNDEF, NF_UNDEF, NF_8B,    NF_16B, NF_4H,    NF_8H, NF_4H,    NF_8H,
+     NF_2S,    NF_4S,    NF_2S,    NF_4S,  NF_2S,    NF_4S, NF_2S,    NF_4S,
+     NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,  NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+     NF_UNDEF, NF_2D,    NF_UNDEF, NF_2D,  NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}
+  };
+
+  NEONFormatDecoder nfd(instr, &map_shift_tb);
+
+  if (instr->ImmNEONImmh()) {  // immh has to be non-zero.
+    switch (instr->Mask(NEONShiftImmediateMask)) {
+      case NEON_SQSHLU:     mnemonic = "sqshlu"; form = form_shift_2; break;
+      case NEON_SQSHL_imm:  mnemonic = "sqshl";  form = form_shift_2; break;
+      case NEON_UQSHL_imm:  mnemonic = "uqshl";  form = form_shift_2; break;
+      case NEON_SHL:        mnemonic = "shl";    form = form_shift_2; break;
+      case NEON_SLI:        mnemonic = "sli";    form = form_shift_2; break;
+      case NEON_SCVTF_imm:  mnemonic = "scvtf";  break;
+      case NEON_UCVTF_imm:  mnemonic = "ucvtf";  break;
+      case NEON_FCVTZU_imm: mnemonic = "fcvtzu"; break;
+      case NEON_FCVTZS_imm: mnemonic = "fcvtzs"; break;
+      case NEON_SRI:        mnemonic = "sri";    break;
+      case NEON_SSHR:       mnemonic = "sshr";   break;
+      case NEON_USHR:       mnemonic = "ushr";   break;
+      case NEON_SRSHR:      mnemonic = "srshr";  break;
+      case NEON_URSHR:      mnemonic = "urshr";  break;
+      case NEON_SSRA:       mnemonic = "ssra";   break;
+      case NEON_USRA:       mnemonic = "usra";   break;
+      case NEON_SRSRA:      mnemonic = "srsra";  break;
+      case NEON_URSRA:      mnemonic = "ursra";  break;
+      case NEON_SHRN:
+        mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn";
+        nfd.SetFormatMap(1, &map_shift_ta);
+        break;
+      case NEON_RSHRN:
+        mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn";
+        nfd.SetFormatMap(1, &map_shift_ta);
+        break;
+      case NEON_UQSHRN:
+        mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn";
+        nfd.SetFormatMap(1, &map_shift_ta);
+        break;
+      case NEON_UQRSHRN:
+        mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn";
+        nfd.SetFormatMap(1, &map_shift_ta);
+        break;
+      case NEON_SQSHRN:
+        mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn";
+        nfd.SetFormatMap(1, &map_shift_ta);
+        break;
+      case NEON_SQRSHRN:
+        mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn";
+        nfd.SetFormatMap(1, &map_shift_ta);
+        break;
+      case NEON_SQSHRUN:
+        mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun";
+        nfd.SetFormatMap(1, &map_shift_ta);
+        break;
+      case NEON_SQRSHRUN:
+        mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun";
+        nfd.SetFormatMap(1, &map_shift_ta);
+        break;
+      case NEON_SSHLL:
+        nfd.SetFormatMap(0, &map_shift_ta);
+        if (instr->ImmNEONImmb() == 0 &&
+            CountSetBits(instr->ImmNEONImmh(), 32) == 1) {  // sxtl variant.
+          form = form_xtl;
+          mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl";
+        } else {  // sshll variant.
+          form = form_shift_2;
+          mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll";
+        }
+        break;
+      case NEON_USHLL:
+        nfd.SetFormatMap(0, &map_shift_ta);
+        if (instr->ImmNEONImmb() == 0 &&
+            CountSetBits(instr->ImmNEONImmh(), 32) == 1) {  // uxtl variant.
+          form = form_xtl;
+          mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl";
+        } else {  // ushll variant.
+          form = form_shift_2;
+          mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll";
+        }
+        break;
+      default: form = "(NEONShiftImmediate)";
+    }
+  } else {
+    form = "(NEONShiftImmediate)";
+  }
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONTable(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "(NEONTable)";
+  const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s";
+  const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s";
+  const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+  const char form_4v[] =
+      "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+  static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} };
+  NEONFormatDecoder nfd(instr, &map_b);
+
+  switch (instr->Mask(NEONTableMask)) {
+    case NEON_TBL_1v: mnemonic = "tbl"; form = form_1v; break;
+    case NEON_TBL_2v: mnemonic = "tbl"; form = form_2v; break;
+    case NEON_TBL_3v: mnemonic = "tbl"; form = form_3v; break;
+    case NEON_TBL_4v: mnemonic = "tbl"; form = form_4v; break;
+    case NEON_TBX_1v: mnemonic = "tbx"; form = form_1v; break;
+    case NEON_TBX_2v: mnemonic = "tbx"; form = form_2v; break;
+    case NEON_TBX_3v: mnemonic = "tbx"; form = form_3v; break;
+    case NEON_TBX_4v: mnemonic = "tbx"; form = form_4v; break;
+    default: break;
+  }
+
+  char re_form[sizeof(form_4v) + 6];
+  int reg_num = instr->Rn();
+  snprintf(re_form, sizeof(re_form), form,
+           (reg_num + 1) % kNumberOfVRegisters,
+           (reg_num + 2) % kNumberOfVRegisters,
+           (reg_num + 3) % kNumberOfVRegisters);
+
+  Format(instr, mnemonic, nfd.Substitute(re_form));
+}
+
+
+void Disassembler::VisitNEONPerm(const Instruction* instr) {
+  const char *mnemonic = "unimplemented";
+  const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+  NEONFormatDecoder nfd(instr);
+
+  switch (instr->Mask(NEONPermMask)) {
+    case NEON_TRN1: mnemonic = "trn1";   break;
+    case NEON_TRN2: mnemonic = "trn2";  break;
+    case NEON_UZP1: mnemonic = "uzp1"; break;
+    case NEON_UZP2: mnemonic = "uzp2";  break;
+    case NEON_ZIP1: mnemonic = "zip1"; break;
+    case NEON_ZIP2: mnemonic = "zip2"; break;
+    default: form = "(NEONPerm)";
+  }
+  Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitUnimplemented(const Instruction* instr) {
+  Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(const Instruction* instr) {
+  Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(const Instruction* /*instr*/) {
+  // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::AppendRegisterNameToOutput(const Instruction* instr,
+                                              const CPURegister& reg) {
+  USE(instr);
+  VIXL_ASSERT(reg.IsValid());
+  char reg_char;
+
+  if (reg.IsRegister()) {
+    reg_char = reg.Is64Bits() ? 'x' : 'w';
+  } else {
+    VIXL_ASSERT(reg.IsVRegister());
+    switch (reg.SizeInBits()) {
+      case kBRegSize: reg_char = 'b'; break;
+      case kHRegSize: reg_char = 'h'; break;
+      case kSRegSize: reg_char = 's'; break;
+      case kDRegSize: reg_char = 'd'; break;
+      default:
+        VIXL_ASSERT(reg.Is128Bits());
+        reg_char = 'q';
+    }
+  }
+
+  if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
+    // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
+    AppendToOutput("%c%d", reg_char, reg.code());
+  } else if (reg.Aliases(sp)) {
+    // Disassemble w31/x31 as stack pointer wsp/sp.
+    AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
+  } else {
+    // Disassemble w31/x31 as zero register wzr/xzr.
+    AppendToOutput("%czr", reg_char);
+  }
+}
+
+
+void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
+                                                  int64_t offset) {
+  USE(instr);
+  char sign = (offset < 0) ? '-' : '+';
+  AppendToOutput("#%c0x%" PRIx64, sign, std::abs(offset));
+}
+
+
+void Disassembler::AppendAddressToOutput(const Instruction* instr,
+                                         const void* addr) {
+  USE(instr);
+  AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
+}
+
+
+void Disassembler::AppendCodeAddressToOutput(const Instruction* instr,
+                                             const void* addr) {
+  AppendAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
+                                             const void* addr) {
+  AppendAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
+                                                     const void* addr) {
+  USE(instr);
+  int64_t rel_addr = CodeRelativeAddress(addr);
+  if (rel_addr >= 0) {
+    AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
+  } else {
+    AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
+  }
+}
+
+
+void Disassembler::AppendCodeRelativeCodeAddressToOutput(
+    const Instruction* instr, const void* addr) {
+  AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeDataAddressToOutput(
+    const Instruction* instr, const void* addr) {
+  AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::MapCodeAddress(int64_t base_address,
+                                  const Instruction* instr_address) {
+  set_code_address_offset(
+      base_address - reinterpret_cast<intptr_t>(instr_address));
+}
+int64_t Disassembler::CodeRelativeAddress(const void* addr) {
+  return reinterpret_cast<intptr_t>(addr) + code_address_offset();
+}
+
+
+void Disassembler::Format(const Instruction* instr, const char* mnemonic,
+                          const char* format) {
+  VIXL_ASSERT(mnemonic != NULL);
+  ResetOutput();
+  Substitute(instr, mnemonic);
+  if (format != NULL) {
+    VIXL_ASSERT(buffer_pos_ < buffer_size_);
+    buffer_[buffer_pos_++] = ' ';
+    Substitute(instr, format);
+  }
+  VIXL_ASSERT(buffer_pos_ < buffer_size_);
+  buffer_[buffer_pos_] = 0;
+  ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(const Instruction* instr, const char* string) {
+  char chr = *string++;
+  while (chr != '\0') {
+    if (chr == '\'') {
+      string += SubstituteField(instr, string);
+    } else {
+      VIXL_ASSERT(buffer_pos_ < buffer_size_);
+      buffer_[buffer_pos_++] = chr;
+    }
+    chr = *string++;
+  }
+}
+
+
+int Disassembler::SubstituteField(const Instruction* instr,
+                                  const char* format) {
+  switch (format[0]) {
+    // NB. The remaining substitution prefix characters are: GJKUZ.
+    case 'R':  // Register. X or W, selected by sf bit.
+    case 'F':  // FP register. S or D, selected by type field.
+    case 'V':  // Vector register, V, vector format.
+    case 'W':
+    case 'X':
+    case 'B':
+    case 'H':
+    case 'S':
+    case 'D':
+    case 'Q': return SubstituteRegisterField(instr, format);
+    case 'I': return SubstituteImmediateField(instr, format);
+    case 'L': return SubstituteLiteralField(instr, format);
+    case 'N': return SubstituteShiftField(instr, format);
+    case 'P': return SubstitutePrefetchField(instr, format);
+    case 'C': return SubstituteConditionField(instr, format);
+    case 'E': return SubstituteExtendField(instr, format);
+    case 'A': return SubstitutePCRelAddressField(instr, format);
+    case 'T': return SubstituteBranchTargetField(instr, format);
+    case 'O': return SubstituteLSRegOffsetField(instr, format);
+    case 'M': return SubstituteBarrierField(instr, format);
+    case 'K': return SubstituteCrField(instr, format);
+    case 'G': return SubstituteSysOpField(instr, format);
+    default: {
+      VIXL_UNREACHABLE();
+      return 1;
+    }
+  }
+}
+
+
+int Disassembler::SubstituteRegisterField(const Instruction* instr,
+                                          const char* format) {
+  char reg_prefix = format[0];
+  unsigned reg_num = 0;
+  unsigned field_len = 2;
+
+  switch (format[1]) {
+    case 'd':
+      reg_num = instr->Rd();
+      if (format[2] == 'q') {
+        reg_prefix = instr->NEONQ() ? 'X' : 'W';
+        field_len = 3;
+      }
+      break;
+    case 'n': reg_num = instr->Rn(); break;
+    case 'm':
+      reg_num = instr->Rm();
+      switch (format[2]) {
+          // Handle registers tagged with b (bytes), z (instruction), or
+          // r (registers), used for address updates in
+          // NEON load/store instructions.
+        case 'r':
+        case 'b':
+        case 'z': {
+          field_len = 3;
+          char* eimm;
+          int imm = static_cast<int>(strtol(&format[3], &eimm, 10));
+          field_len += eimm - &format[3];
+          if (reg_num == 31) {
+            switch (format[2]) {
+              case 'z':
+                imm *= (1 << instr->NEONLSSize());
+                break;
+              case 'r':
+                imm *= (instr->NEONQ() == 0) ? kDRegSizeInBytes
+                                             : kQRegSizeInBytes;
+                break;
+              case 'b':
+                break;
+            }
+            AppendToOutput("#%d", imm);
+            return field_len;
+          }
+          break;
+        }
+      }
+      break;
+    case 'e':
+      // This is register Rm, but using a 4-bit specifier. Used in NEON
+      // by-element instructions.
+      reg_num = (instr->Rm() & 0xf);
+      break;
+    case 'a': reg_num = instr->Ra(); break;
+    case 's': reg_num = instr->Rs(); break;
+    case 't':
+      reg_num = instr->Rt();
+      if (format[0] == 'V') {
+        if ((format[2] >= '2') && (format[2] <= '4')) {
+          // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4.
+          reg_num = (reg_num + format[2] - '1') % 32;
+          field_len = 3;
+        }
+      } else {
+        if (format[2] == '2') {
+        // Handle register specifier Rt2.
+          reg_num = instr->Rt2();
+          field_len = 3;
+        }
+      }
+      break;
+    default: VIXL_UNREACHABLE();
+  }
+
+  // Increase field length for registers tagged as stack.
+  if (format[2] == 's') {
+    field_len = 3;
+  }
+
+  CPURegister::RegisterType reg_type = CPURegister::kRegister;
+  unsigned reg_size = kXRegSize;
+
+  if (reg_prefix == 'R') {
+    reg_prefix = instr->SixtyFourBits() ? 'X' : 'W';
+  } else if (reg_prefix == 'F') {
+    reg_prefix = ((instr->FPType() & 1) == 0) ? 'S' : 'D';
+  }
+
+  switch (reg_prefix) {
+    case 'W':
+      reg_type = CPURegister::kRegister; reg_size = kWRegSize; break;
+    case 'X':
+      reg_type = CPURegister::kRegister; reg_size = kXRegSize; break;
+    case 'B':
+      reg_type = CPURegister::kVRegister; reg_size = kBRegSize; break;
+    case 'H':
+      reg_type = CPURegister::kVRegister; reg_size = kHRegSize; break;
+    case 'S':
+      reg_type = CPURegister::kVRegister; reg_size = kSRegSize; break;
+    case 'D':
+      reg_type = CPURegister::kVRegister; reg_size = kDRegSize; break;
+    case 'Q':
+      reg_type = CPURegister::kVRegister; reg_size = kQRegSize; break;
+    case 'V':
+      AppendToOutput("v%d", reg_num);
+      return field_len;
+    default:
+      VIXL_UNREACHABLE();
+  }
+
+  if ((reg_type == CPURegister::kRegister) &&
+      (reg_num == kZeroRegCode) && (format[2] == 's')) {
+    reg_num = kSPRegInternalCode;
+  }
+
+  AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type));
+
+  return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(const Instruction* instr,
+                                           const char* format) {
+  VIXL_ASSERT(format[0] == 'I');
+
+  switch (format[1]) {
+    case 'M': {  // IMoveImm, IMoveNeg or IMoveLSL.
+      if (format[5] == 'L') {
+        AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
+        if (instr->ShiftMoveWide() > 0) {
+          AppendToOutput(", lsl #%" PRId32, 16 * instr->ShiftMoveWide());
+        }
+      } else {
+        VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
+        uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide()) <<
+            (16 * instr->ShiftMoveWide());
+        if (format[5] == 'N')
+          imm = ~imm;
+        if (!instr->SixtyFourBits())
+          imm &= UINT64_C(0xffffffff);
+        AppendToOutput("#0x%" PRIx64, imm);
+      }
+      return 8;
+    }
+    case 'L': {
+      switch (format[2]) {
+        case 'L': {  // ILLiteral - Immediate Load Literal.
+          AppendToOutput("pc%+" PRId32,
+                         instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+          return 9;
+        }
+        case 'S': {  // ILS - Immediate Load/Store.
+          if (instr->ImmLS() != 0) {
+            AppendToOutput(", #%" PRId32, instr->ImmLS());
+          }
+          return 3;
+        }
+        case 'P': {  // ILPx - Immediate Load/Store Pair, x = access size.
+          if (instr->ImmLSPair() != 0) {
+            // format[3] is the scale value. Convert to a number.
+            int scale = 1 << (format[3] - '0');
+            AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
+          }
+          return 4;
+        }
+        case 'U': {  // ILU - Immediate Load/Store Unsigned.
+          if (instr->ImmLSUnsigned() != 0) {
+            int shift = instr->SizeLS();
+            AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() << shift);
+          }
+          return 3;
+        }
+      }
+    }
+    case 'C': {  // ICondB - Immediate Conditional Branch.
+      int64_t offset = instr->ImmCondBranch() << 2;
+      AppendPCRelativeOffsetToOutput(instr, offset);
+      return 6;
+    }
+    case 'A': {  // IAddSub.
+      VIXL_ASSERT(instr->ShiftAddSub() <= 1);
+      int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+      AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+      return 7;
+    }
+    case 'F': {  // IFPSingle, IFPDouble or IFPFBits.
+      if (format[3] == 'F') {  // IFPFbits.
+        AppendToOutput("#%" PRId32, 64 - instr->FPScale());
+        return 8;
+      } else {
+        AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
+                       format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+        return 9;
+      }
+    }
+    case 'T': {  // ITri - Immediate Triangular Encoded.
+      AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+      return 4;
+    }
+    case 'N': {  // INzcv.
+      int nzcv = (instr->Nzcv() << Flags_offset);
+      AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+                                  ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+                                  ((nzcv & CFlag) == 0) ? 'c' : 'C',
+                                  ((nzcv & VFlag) == 0) ? 'v' : 'V');
+      return 5;
+    }
+    case 'P': {  // IP - Conditional compare.
+      AppendToOutput("#%" PRId32, instr->ImmCondCmp());
+      return 2;
+    }
+    case 'B': {  // Bitfields.
+      return SubstituteBitfieldImmediateField(instr, format);
+    }
+    case 'E': {  // IExtract.
+      AppendToOutput("#%" PRId32, instr->ImmS());
+      return 8;
+    }
+    case 'S': {  // IS - Test and branch bit.
+      AppendToOutput("#%" PRId32, (instr->ImmTestBranchBit5() << 5) |
+                                  instr->ImmTestBranchBit40());
+      return 2;
+    }
+    case 's': {  // Is - Shift (immediate).
+      switch (format[2]) {
+        case '1': {  // Is1 - SSHR.
+          int shift = 16 << HighestSetBitPosition(instr->ImmNEONImmh());
+          shift -= instr->ImmNEONImmhImmb();
+          AppendToOutput("#%d", shift);
+          return 3;
+        }
+        case '2': {  // Is2 - SLI.
+          int shift = instr->ImmNEONImmhImmb();
+          shift -= 8 << HighestSetBitPosition(instr->ImmNEONImmh());
+          AppendToOutput("#%d", shift);
+          return 3;
+        }
+        default: {
+          VIXL_UNIMPLEMENTED();
+          return 0;
+        }
+      }
+    }
+    case 'D': {  // IDebug - HLT and BRK instructions.
+      AppendToOutput("#0x%" PRIx32, instr->ImmException());
+      return 6;
+    }
+    case 'V': {  // Immediate Vector.
+      switch (format[2]) {
+        case 'E': {  // IVExtract.
+          AppendToOutput("#%" PRId32, instr->ImmNEONExt());
+          return 9;
+        }
+        case 'B': {  // IVByElemIndex.
+          int vm_index = (instr->NEONH() << 1) | instr->NEONL();
+          if (instr->NEONSize() == 1) {
+            vm_index = (vm_index << 1) | instr->NEONM();
+          }
+          AppendToOutput("%d", vm_index);
+          return strlen("IVByElemIndex");
+        }
+        case 'I': {  // INS element.
+          if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) {
+            int rd_index, rn_index;
+            int imm5 = instr->ImmNEON5();
+            int imm4 = instr->ImmNEON4();
+            int tz = CountTrailingZeros(imm5, 32);
+            rd_index = imm5 >> (tz + 1);
+            rn_index = imm4 >> tz;
+            if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
+              AppendToOutput("%d", rd_index);
+              return strlen("IVInsIndex1");
+            } else if (strncmp(format, "IVInsIndex2",
+                       strlen("IVInsIndex2")) == 0) {
+              AppendToOutput("%d", rn_index);
+              return strlen("IVInsIndex2");
+            } else {
+              VIXL_UNIMPLEMENTED();
+              return 0;
+            }
+          }
+          VIXL_FALLTHROUGH();
+        }
+        case 'L': {  // IVLSLane[0123] - suffix indicates access size shift.
+          AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
+          return 9;
+        }
+        case 'M': {  // Modified Immediate cases.
+          if (strncmp(format,
+                      "IVMIImmFPSingle",
+                      strlen("IVMIImmFPSingle")) == 0) {
+            AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+                           instr->ImmNEONFP32());
+            return strlen("IVMIImmFPSingle");
+          } else if (strncmp(format,
+                             "IVMIImmFPDouble",
+                             strlen("IVMIImmFPDouble")) == 0) {
+            AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+                           instr->ImmNEONFP64());
+            return strlen("IVMIImmFPDouble");
+          } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
+            uint64_t imm8 = instr->ImmNEONabcdefgh();
+            AppendToOutput("#0x%" PRIx64, imm8);
+            return strlen("IVMIImm8");
+          } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
+            uint64_t imm8 = instr->ImmNEONabcdefgh();
+            uint64_t imm = 0;
+            for (int i = 0; i < 8; ++i) {
+              if (imm8 & (1 << i)) {
+                imm |= (UINT64_C(0xff) << (8 * i));
+              }
+            }
+            AppendToOutput("#0x%" PRIx64, imm);
+            return strlen("IVMIImm");
+          } else if (strncmp(format, "IVMIShiftAmt1",
+                             strlen("IVMIShiftAmt1")) == 0) {
+            int cmode = instr->NEONCmode();
+            int shift_amount = 8 * ((cmode >> 1) & 3);
+            AppendToOutput("#%d", shift_amount);
+            return strlen("IVMIShiftAmt1");
+          } else if (strncmp(format, "IVMIShiftAmt2",
+                             strlen("IVMIShiftAmt2")) == 0) {
+            int cmode = instr->NEONCmode();
+            int shift_amount = 8 << (cmode & 1);
+            AppendToOutput("#%d", shift_amount);
+            return strlen("IVMIShiftAmt2");
+          } else {
+            VIXL_UNIMPLEMENTED();
+            return 0;
+          }
+        }
+        default: {
+          VIXL_UNIMPLEMENTED();
+          return 0;
+        }
+      }
+    }
+    case 'X': {  // IX - CLREX instruction.
+      AppendToOutput("#0x%" PRIx32, instr->CRm());
+      return 2;
+    }
+    default: {
+      VIXL_UNIMPLEMENTED();
+      return 0;
+    }
+  }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(const Instruction* instr,
+                                                   const char* format) {
+  VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B'));
+  unsigned r = instr->ImmR();
+  unsigned s = instr->ImmS();
+
+  switch (format[2]) {
+    case 'r': {  // IBr.
+      AppendToOutput("#%d", r);
+      return 3;
+    }
+    case 's': {  // IBs+1 or IBs-r+1.
+      if (format[3] == '+') {
+        AppendToOutput("#%d", s + 1);
+        return 5;
+      } else {
+        VIXL_ASSERT(format[3] == '-');
+        AppendToOutput("#%d", s - r + 1);
+        return 7;
+      }
+    }
+    case 'Z': {  // IBZ-r.
+      VIXL_ASSERT((format[3] == '-') && (format[4] == 'r'));
+      unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
+      AppendToOutput("#%d", reg_size - r);
+      return 5;
+    }
+    default: {
+      VIXL_UNREACHABLE();
+      return 0;
+    }
+  }
+}
+
+
+int Disassembler::SubstituteLiteralField(const Instruction* instr,
+                                         const char* format) {
+  VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
+  USE(format);
+
+  const void * address = instr->LiteralAddress<const void *>();
+  switch (instr->Mask(LoadLiteralMask)) {
+    case LDR_w_lit:
+    case LDR_x_lit:
+    case LDRSW_x_lit:
+    case LDR_s_lit:
+    case LDR_d_lit:
+    case LDR_q_lit:
+      AppendCodeRelativeDataAddressToOutput(instr, address);
+      break;
+    case PRFM_lit: {
+      // Use the prefetch hint to decide how to print the address.
+      switch (instr->PrefetchHint()) {
+        case 0x0:     // PLD: prefetch for load.
+        case 0x2:     // PST: prepare for store.
+          AppendCodeRelativeDataAddressToOutput(instr, address);
+          break;
+        case 0x1:     // PLI: preload instructions.
+          AppendCodeRelativeCodeAddressToOutput(instr, address);
+          break;
+        case 0x3:     // Unallocated hint.
+          AppendCodeRelativeAddressToOutput(instr, address);
+          break;
+      }
+      break;
+    }
+    default:
+      VIXL_UNREACHABLE();
+  }
+
+  return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(const Instruction* instr,
+                                       const char* format) {
+  VIXL_ASSERT(format[0] == 'N');
+  VIXL_ASSERT(instr->ShiftDP() <= 0x3);
+
+  switch (format[1]) {
+    case 'D': {  // HDP.
+      VIXL_ASSERT(instr->ShiftDP() != ROR);
+      VIXL_FALLTHROUGH();
+    }
+    case 'L': {  // HLo.
+      if (instr->ImmDPShift() != 0) {
+        const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+        AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
+                       instr->ImmDPShift());
+      }
+      return 3;
+    }
+    default:
+      VIXL_UNIMPLEMENTED();
+      return 0;
+  }
+}
+
+
+int Disassembler::SubstituteConditionField(const Instruction* instr,
+                                           const char* format) {
+  VIXL_ASSERT(format[0] == 'C');
+  const char* condition_code[] = { "eq", "ne", "hs", "lo",
+                                   "mi", "pl", "vs", "vc",
+                                   "hi", "ls", "ge", "lt",
+                                   "gt", "le", "al", "nv" };
+  int cond;
+  switch (format[1]) {
+    case 'B': cond = instr->ConditionBranch(); break;
+    case 'I': {
+      cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+      break;
+    }
+    default: cond = instr->Condition();
+  }
+  AppendToOutput("%s", condition_code[cond]);
+  return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
+                                              const char* format) {
+  VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) ||   // Used by `adr`.
+              (strcmp(format, "AddrPCRelPage") == 0));    // Used by `adrp`.
+
+  int64_t offset = instr->ImmPCRel();
+
+  // Compute the target address based on the effective address (after applying
+  // code_address_offset). This is required for correct behaviour of adrp.
+  const Instruction* base = instr + code_address_offset();
+  if (format[9] == 'P') {
+    offset *= kPageSize;
+    base = AlignDown(base, kPageSize);
+  }
+  // Strip code_address_offset before printing, so we can use the
+  // semantically-correct AppendCodeRelativeAddressToOutput.
+  const void* target =
+      reinterpret_cast<const void*>(base + offset - code_address_offset());
+
+  AppendPCRelativeOffsetToOutput(instr, offset);
+  AppendToOutput(" ");
+  AppendCodeRelativeAddressToOutput(instr, target);
+  return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
+                                              const char* format) {
+  VIXL_ASSERT(strncmp(format, "TImm", 4) == 0);
+
+  int64_t offset = 0;
+  switch (format[5]) {
+    // BImmUncn - unconditional branch immediate.
+    case 'n': offset = instr->ImmUncondBranch(); break;
+    // BImmCond - conditional branch immediate.
+    case 'o': offset = instr->ImmCondBranch(); break;
+    // BImmCmpa - compare and branch immediate.
+    case 'm': offset = instr->ImmCmpBranch(); break;
+    // BImmTest - test and branch immediate.
+    case 'e': offset = instr->ImmTestBranch(); break;
+    default: VIXL_UNIMPLEMENTED();
+  }
+  offset <<= kInstructionSizeLog2;
+  const void* target_address = reinterpret_cast<const void*>(instr + offset);
+  VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+
+  AppendPCRelativeOffsetToOutput(instr, offset);
+  AppendToOutput(" ");
+  AppendCodeRelativeCodeAddressToOutput(instr, target_address);
+
+  return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(const Instruction* instr,
+                                        const char* format) {
+  VIXL_ASSERT(strncmp(format, "Ext", 3) == 0);
+  VIXL_ASSERT(instr->ExtendMode() <= 7);
+  USE(format);
+
+  const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+                                "sxtb", "sxth", "sxtw", "sxtx" };
+
+  // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+  // registers becomes lsl.
+  if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+      (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+       (instr->ExtendMode() == UXTX))) {
+    if (instr->ImmExtendShift() > 0) {
+      AppendToOutput(", lsl #%" PRId32, instr->ImmExtendShift());
+    }
+  } else {
+    AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+    if (instr->ImmExtendShift() > 0) {
+      AppendToOutput(" #%" PRId32, instr->ImmExtendShift());
+    }
+  }
+  return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(const Instruction* instr,
+                                             const char* format) {
+  VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+  const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+                                "undefined", "undefined", "sxtw", "sxtx" };
+  USE(format);
+
+  unsigned shift = instr->ImmShiftLS();
+  Extend ext = static_cast<Extend>(instr->ExtendMode());
+  char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+  unsigned rm = instr->Rm();
+  if (rm == kZeroRegCode) {
+    AppendToOutput("%czr", reg_type);
+  } else {
+    AppendToOutput("%c%d", reg_type, rm);
+  }
+
+  // Extend mode UXTX is an alias for shift mode LSL here.
+  if (!((ext == UXTX) && (shift == 0))) {
+    AppendToOutput(", %s", extend_mode[ext]);
+    if (shift != 0) {
+      AppendToOutput(" #%d", instr->SizeLS());
+    }
+  }
+  return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(const Instruction* instr,
+                                          const char* format) {
+  VIXL_ASSERT(format[0] == 'P');
+  USE(format);
+
+  static const char* hints[] = {"ld", "li", "st"};
+  static const char* stream_options[] = {"keep", "strm"};
+
+  unsigned hint = instr->PrefetchHint();
+  unsigned target = instr->PrefetchTarget() + 1;
+  unsigned stream = instr->PrefetchStream();
+
+  if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
+    // Unallocated prefetch operations.
+    int prefetch_mode = instr->ImmPrefetchOperation();
+    AppendToOutput("#0b%c%c%c%c%c",
+                   (prefetch_mode & (1 << 4)) ? '1' : '0',
+                   (prefetch_mode & (1 << 3)) ? '1' : '0',
+                   (prefetch_mode & (1 << 2)) ? '1' : '0',
+                   (prefetch_mode & (1 << 1)) ? '1' : '0',
+                   (prefetch_mode & (1 << 0)) ? '1' : '0');
+  } else {
+    VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
+    AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
+  }
+  return 6;
+}
+
+int Disassembler::SubstituteBarrierField(const Instruction* instr,
+                                         const char* format) {
+  VIXL_ASSERT(format[0] == 'M');
+  USE(format);
+
+  static const char* options[4][4] = {
+    { "sy (0b0000)", "oshld", "oshst", "osh" },
+    { "sy (0b0100)", "nshld", "nshst", "nsh" },
+    { "sy (0b1000)", "ishld", "ishst", "ish" },
+    { "sy (0b1100)", "ld", "st", "sy" }
+  };
+  int domain = instr->ImmBarrierDomain();
+  int type = instr->ImmBarrierType();
+
+  AppendToOutput("%s", options[domain][type]);
+  return 1;
+}
+
+int Disassembler::SubstituteSysOpField(const Instruction* instr,
+                                       const char* format) {
+  VIXL_ASSERT(format[0] == 'G');
+  int op = -1;
+  switch (format[1]) {
+    case '1': op = instr->SysOp1(); break;
+    case '2': op = instr->SysOp2(); break;
+    default:
+      VIXL_UNREACHABLE();
+  }
+  AppendToOutput("#%d", op);
+  return 2;
+}
+
+int Disassembler::SubstituteCrField(const Instruction* instr,
+                                    const char* format) {
+  VIXL_ASSERT(format[0] == 'K');
+  int cr = -1;
+  switch (format[1]) {
+    case 'n': cr = instr->CRn(); break;
+    case 'm': cr = instr->CRm(); break;
+    default:
+      VIXL_UNREACHABLE();
+  }
+  AppendToOutput("C%d", cr);
+  return 2;
+}
+
+void Disassembler::ResetOutput() {
+  buffer_pos_ = 0;
+  buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_ - buffer_pos_,
+          format, args);
+  va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(const Instruction* instr) {
+  fprintf(stream_, "0x%016" PRIx64 "  %08" PRIx32 "\t\t%s\n",
+          reinterpret_cast<uint64_t>(instr),
+          instr->InstructionBits(),
+          GetOutput());
+}
+
+}  // namespace vixl
diff --git a/disas/libvixl/a64/disasm-a64.h b/disas/libvixl/vixl/a64/disasm-a64.h
index ddfe98be19..930df6ea6a 100644
--- a/disas/libvixl/a64/disasm-a64.h
+++ b/disas/libvixl/vixl/a64/disasm-a64.h
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2015, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -27,11 +27,11 @@
 #ifndef VIXL_A64_DISASM_A64_H
 #define VIXL_A64_DISASM_A64_H
 
-#include "globals.h"
-#include "utils.h"
-#include "instructions-a64.h"
-#include "decoder-a64.h"
-#include "assembler-a64.h"
+#include "vixl/globals.h"
+#include "vixl/utils.h"
+#include "vixl/a64/instructions-a64.h"
+#include "vixl/a64/decoder-a64.h"
+#include "vixl/a64/assembler-a64.h"
 
 namespace vixl {
 
@@ -55,6 +55,7 @@ class Disassembler: public DecoderVisitor {
   // customize the disassembly output.
 
   // Prints the name of a register.
+  // TODO: This currently doesn't allow renaming of V registers.
   virtual void AppendRegisterNameToOutput(const Instruction* instr,
                                           const CPURegister& reg);
 
@@ -122,7 +123,8 @@ class Disassembler: public DecoderVisitor {
   int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
   int SubstitutePrefetchField(const Instruction* instr, const char* format);
   int SubstituteBarrierField(const Instruction* instr, const char* format);
-
+  int SubstituteSysOpField(const Instruction* instr, const char* format);
+  int SubstituteCrField(const Instruction* instr, const char* format);
   bool RdIsZROrSP(const Instruction* instr) const {
     return (instr->Rd() == kZeroRegCode);
   }
@@ -163,7 +165,6 @@ class Disassembler: public DecoderVisitor {
 class PrintDisassembler: public Disassembler {
  public:
   explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
-  virtual ~PrintDisassembler() { }
 
  protected:
   virtual void ProcessOutput(const Instruction* instr);
diff --git a/disas/libvixl/vixl/a64/instructions-a64.cc b/disas/libvixl/vixl/a64/instructions-a64.cc
new file mode 100644
index 0000000000..33992f88a4
--- /dev/null
+++ b/disas/libvixl/vixl/a64/instructions-a64.cc
@@ -0,0 +1,622 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//   * Redistributions of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//   * Redistributions in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//   * Neither the name of ARM Limited nor the names of its contributors may be
+//     used to endorse or promote products derived from this software without
+//     specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "vixl/a64/instructions-a64.h"
+#include "vixl/a64/assembler-a64.h"
+
+namespace vixl {
+
+
+// Floating-point infinity values.
+const float16 kFP16PositiveInfinity = 0x7c00;
+const float16 kFP16NegativeInfinity = 0xfc00;
+const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
+const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
+const double kFP64PositiveInfinity =
+    rawbits_to_double(UINT64_C(0x7ff0000000000000));
+const double kFP64NegativeInfinity =
+    rawbits_to_double(UINT64_C(0xfff0000000000000));
+
+
+// The default NaN values (for FPCR.DN=1).
+const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
+const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
+const float16 kFP16DefaultNaN = 0x7e00;
+
+
+static uint64_t RotateRight(uint64_t value,
+                            unsigned int rotate,
+                            unsigned int width) {
+  VIXL_ASSERT(width <= 64);
+  rotate &= 63;
+  return ((value & ((UINT64_C(1) << rotate) - 1)) <<
+          (width - rotate)) | (value >> rotate);
+}
+
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+                                    uint64_t value,
+                                    unsigned width) {
+  VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+              (width == 32));
+  VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+  uint64_t result = value & ((UINT64_C(1) << width) - 1);
+  for (unsigned i = width; i < reg_size; i *= 2) {
+    result |= (result << i);
+  }
+  return result;
+}
+
+
+bool Instruction::IsLoad() const {
+  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+    return false;
+  }
+
+  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+    return Mask(LoadStorePairLBit) != 0;
+  } else {
+    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
+    switch (op) {
+      case LDRB_w:
+      case LDRH_w:
+      case LDR_w:
+      case LDR_x:
+      case LDRSB_w:
+      case LDRSB_x:
+      case LDRSH_w:
+      case LDRSH_x:
+      case LDRSW_x:
+      case LDR_b:
+      case LDR_h:
+      case LDR_s:
+      case LDR_d:
+      case LDR_q: return true;
+      default: return false;
+    }
+  }
+}
+
+
+bool Instruction::IsStore() const {
+  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+    return false;
+  }
+
+  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+    return Mask(LoadStorePairLBit) == 0;
+  } else {
+    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
+    switch (op) {
+      case STRB_w:
+      case STRH_w:
+      case STR_w:
+      case STR_x:
+      case STR_b:
+      case STR_h:
+      case STR_s:
+      case STR_d:
+      case STR_q: return true;
+      default: return false;
+    }
+  }
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are
+// not met.
+uint64_t Instruction::ImmLogical() const {
+  unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
+  int32_t n = BitN();
+  int32_t imm_s = ImmSetBits();
+  int32_t imm_r = ImmRotate();
+
+  // An integer is constructed from the n, imm_s and imm_r bits according to
+  // the following table:
+  //
+  //  N   imms    immr    size        S             R
+  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
+  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
+  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
+  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
+  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
+  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
+  // (s bits must not be all set)
+  //
+  // A pattern is constructed of size bits, where the least significant S+1
+  // bits are set. The pattern is rotated right by R, and repeated across a
+  // 32 or 64-bit value, depending on destination register width.
+  //
+
+  if (n == 1) {
+    if (imm_s == 0x3f) {
+      return 0;
+    }
+    uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
+    return RotateRight(bits, imm_r, 64);
+  } else {
+    if ((imm_s >> 1) == 0x1f) {
+      return 0;
+    }
+    for (int width = 0x20; width >= 0x2; width >>= 1) {
+      if ((imm_s & width) == 0) {
+        int mask = width - 1;
+        if ((imm_s & mask) == mask) {
+          return 0;
+        }
+        uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
+        return RepeatBitsAcrossReg(reg_size,
+                                   RotateRight(bits, imm_r & mask, width),
+                                   width);
+      }
+    }
+  }
+  VIXL_UNREACHABLE();
+  return 0;
+}
+
+
+uint32_t Instruction::ImmNEONabcdefgh() const {
+  return ImmNEONabc() << 5 | ImmNEONdefgh();
+}
+
+
+float Instruction::Imm8ToFP32(uint32_t imm8) {
+  //   Imm8: abcdefgh (8 bits)
+  // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+  // where B is b ^ 1
+  uint32_t bits = imm8;
+  uint32_t bit7 = (bits >> 7) & 0x1;
+  uint32_t bit6 = (bits >> 6) & 0x1;
+  uint32_t bit5_to_0 = bits & 0x3f;
+  uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+  return rawbits_to_float(result);
+}
+
+
+float Instruction::ImmFP32() const {
+  return Imm8ToFP32(ImmFP());
+}
+
+
+double Instruction::Imm8ToFP64(uint32_t imm8) {
+  //   Imm8: abcdefgh (8 bits)
+  // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+  //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+  // where B is b ^ 1
+  uint32_t bits = imm8;
+  uint64_t bit7 = (bits >> 7) & 0x1;
+  uint64_t bit6 = (bits >> 6) & 0x1;
+  uint64_t bit5_to_0 = bits & 0x3f;
+  uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+  return rawbits_to_double(result);
+}
+
+
+double Instruction::ImmFP64() const {
+  return Imm8ToFP64(ImmFP());
+}
+
+
+float Instruction::ImmNEONFP32() const {
+  return Imm8ToFP32(ImmNEONabcdefgh());
+}
+
+
+double Instruction::ImmNEONFP64() const {
+  return Imm8ToFP64(ImmNEONabcdefgh());
+}
+
+
+unsigned CalcLSDataSize(LoadStoreOp op) {
+  VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
+  unsigned size = static_cast<Instr>(op) >> LSSize_offset;
+  if ((op & LSVector_mask) != 0) {
+    // Vector register memory operations encode the access size in the "size"
+    // and "opc" fields.
+    if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
+      size = kQRegSizeInBytesLog2;
+    }
+  }
+  return size;
+}
+
+
+unsigned CalcLSPairDataSize(LoadStorePairOp op) {
+  VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
+  VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
+  switch (op) {
+    case STP_q:
+    case LDP_q: return kQRegSizeInBytesLog2;
+    case STP_x:
+    case LDP_x:
+    case STP_d:
+    case LDP_d: return kXRegSizeInBytesLog2;
+    default: return kWRegSizeInBytesLog2;
+  }
+}
+
+
+int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+  switch (branch_type) {
+    case UncondBranchType:
+      return ImmUncondBranch_width;
+    case CondBranchType:
+      return ImmCondBranch_width;
+    case CompareBranchType:
+      return ImmCmpBranch_width;
+    case TestBranchType:
+      return ImmTestBranch_width;
+    default:
+      VIXL_UNREACHABLE();
+      return 0;
+  }
+}
+
+
+int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
+  int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
+  return encoded_max * kInstructionSize;
+}
+
+
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+                                     int64_t offset) {
+  return is_intn(ImmBranchRangeBitwidth(branch_type), offset);
+}
+
+
+const Instruction* Instruction::ImmPCOffsetTarget() const {
+  const Instruction * base = this;
+  ptrdiff_t offset;
+  if (IsPCRelAddressing()) {
+    // ADR and ADRP.
+    offset = ImmPCRel();
+    if (Mask(PCRelAddressingMask) == ADRP) {
+      base = AlignDown(base, kPageSize);
+      offset *= kPageSize;
+    } else {
+      VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
+    }
+  } else {
+    // All PC-relative branches.
+    VIXL_ASSERT(BranchType() != UnknownBranchType);
+    // Relative branch offsets are instruction-size-aligned.
+    offset = ImmBranch() << kInstructionSizeLog2;
+  }
+  return base + offset;
+}
+
+
+int Instruction::ImmBranch() const {
+  switch (BranchType()) {
+    case CondBranchType: return ImmCondBranch();
+    case UncondBranchType: return ImmUncondBranch();
+    case CompareBranchType: return ImmCmpBranch();
+    case TestBranchType: return ImmTestBranch();
+    default: VIXL_UNREACHABLE();
+  }
+  return 0;
+}
+
+
+void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
+  if (IsPCRelAddressing()) {
+    SetPCRelImmTarget(target);
+  } else {
+    SetBranchImmTarget(target);
+  }
+}
+
+
+void Instruction::SetPCRelImmTarget(const Instruction* target) {
+  ptrdiff_t imm21;
+  if ((Mask(PCRelAddressingMask) == ADR)) {
+    imm21 = target - this;
+  } else {
+    VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
+    uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
+    uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
+    imm21 = target_page - this_page;
+  }
+  Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
+
+  SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(const Instruction* target) {
+  VIXL_ASSERT(((target - this) & 3) == 0);
+  Instr branch_imm = 0;
+  uint32_t imm_mask = 0;
+  int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
+  switch (BranchType()) {
+    case CondBranchType: {
+      branch_imm = Assembler::ImmCondBranch(offset);
+      imm_mask = ImmCondBranch_mask;
+      break;
+    }
+    case UncondBranchType: {
+      branch_imm = Assembler::ImmUncondBranch(offset);
+      imm_mask = ImmUncondBranch_mask;
+      break;
+    }
+    case CompareBranchType: {
+      branch_imm = Assembler::ImmCmpBranch(offset);
+      imm_mask = ImmCmpBranch_mask;
+      break;
+    }
+    case TestBranchType: {
+      branch_imm = Assembler::ImmTestBranch(offset);
+      imm_mask = ImmTestBranch_mask;
+      break;
+    }
+    default: VIXL_UNREACHABLE();
+  }
+  SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(const Instruction* source) {
+  VIXL_ASSERT(IsWordAligned(source));
+  ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
+  Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
+  Instr mask = ImmLLiteral_mask;
+
+  SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+VectorFormat VectorFormatHalfWidth(const VectorFormat vform) {
+  VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
+              vform == kFormatH || vform == kFormatS || vform == kFormatD);
+  switch (vform) {
+    case kFormat8H: return kFormat8B;
+    case kFormat4S: return kFormat4H;
+    case kFormat2D: return kFormat2S;
+    case kFormatH:  return kFormatB;
+    case kFormatS:  return kFormatH;
+    case kFormatD:  return kFormatS;
+    default: VIXL_UNREACHABLE(); return kFormatUndefined;
+  }
+}
+
+
+VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) {
+  VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
+              vform == kFormatB || vform == kFormatH || vform == kFormatS);
+  switch (vform) {
+    case kFormat8B: return kFormat8H;
+    case kFormat4H: return kFormat4S;
+    case kFormat2S: return kFormat2D;
+    case kFormatB:  return kFormatH;
+    case kFormatH:  return kFormatS;
+    case kFormatS:  return kFormatD;
+    default: VIXL_UNREACHABLE(); return kFormatUndefined;
+  }
+}
+
+
+VectorFormat VectorFormatFillQ(const VectorFormat vform) {
+  switch (vform) {
+    case kFormatB:
+    case kFormat8B:
+    case kFormat16B: return kFormat16B;
+    case kFormatH:
+    case kFormat4H:
+    case kFormat8H:  return kFormat8H;
+    case kFormatS:
+    case kFormat2S:
+    case kFormat4S:  return kFormat4S;
+    case kFormatD:
+    case kFormat1D:
+    case kFormat2D:  return kFormat2D;
+    default: VIXL_UNREACHABLE(); return kFormatUndefined;
+  }
+}
+
+VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) {
+  switch (vform) {
+    case kFormat4H: return kFormat8B;
+    case kFormat8H: return kFormat16B;
+    case kFormat2S: return kFormat4H;
+    case kFormat4S: return kFormat8H;
+    case kFormat1D: return kFormat2S;
+    case kFormat2D: return kFormat4S;
+    default: VIXL_UNREACHABLE(); return kFormatUndefined;
+  }
+}
+
+VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) {
+  VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
+  switch (vform) {
+    case kFormat8B: return kFormat16B;
+    case kFormat4H: return kFormat8H;
+    case kFormat2S: return kFormat4S;
+    default: VIXL_UNREACHABLE(); return kFormatUndefined;
+  }
+}
+
+
+VectorFormat VectorFormatHalfLanes(const VectorFormat vform) {
+  VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
+  switch (vform) {
+    case kFormat16B: return kFormat8B;
+    case kFormat8H: return kFormat4H;
+    case kFormat4S: return kFormat2S;
+    default: VIXL_UNREACHABLE(); return kFormatUndefined;
+  }
+}
+
+
+VectorFormat ScalarFormatFromLaneSize(int laneSize) {
+  switch (laneSize) {
+    case 8:  return kFormatB;
+    case 16: return kFormatH;
+    case 32: return kFormatS;
+    case 64: return kFormatD;
+    default: VIXL_UNREACHABLE(); return kFormatUndefined;
+  }
+}
+
+
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
+  VIXL_ASSERT(vform != kFormatUndefined);
+  switch (vform) {
+    case kFormatB: return kBRegSize;
+    case kFormatH: return kHRegSize;
+    case kFormatS: return kSRegSize;
+    case kFormatD: return kDRegSize;
+    case kFormat8B:
+    case kFormat4H:
+    case kFormat2S:
+    case kFormat1D: return kDRegSize;
+    default: return kQRegSize;
+  }
+}
+
+
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
+  return RegisterSizeInBitsFromFormat(vform) / 8;
+}
+
+
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
+  VIXL_ASSERT(vform != kFormatUndefined);
+  switch (vform) {
+    case kFormatB:
+    case kFormat8B:
+    case kFormat16B: return 8;
+    case kFormatH:
+    case kFormat4H:
+    case kFormat8H: return 16;
+    case kFormatS:
+    case kFormat2S:
+    case kFormat4S: return 32;
+    case kFormatD:
+    case kFormat1D:
+    case kFormat2D: return 64;
+    default: VIXL_UNREACHABLE(); return 0;
+  }
+}
+
+
+int LaneSizeInBytesFromFormat(VectorFormat vform) {
+  return LaneSizeInBitsFromFormat(vform) / 8;
+}
+
+
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
+  VIXL_ASSERT(vform != kFormatUndefined);
+  switch (vform) {
+    case kFormatB:
+    case kFormat8B:
+    case kFormat16B: return 0;
+    case kFormatH:
+    case kFormat4H:
+    case kFormat8H: return 1;
+    case kFormatS:
+    case kFormat2S:
+    case kFormat4S: return 2;
+    case kFormatD:
+    case kFormat1D:
+    case kFormat2D: return 3;
+    default: VIXL_UNREACHABLE(); return 0;
+  }
+}
+
+
+int LaneCountFromFormat(VectorFormat vform) {
+  VIXL_ASSERT(vform != kFormatUndefined);
+  switch (vform) {
+    case kFormat16B: return 16;
+    case kFormat8B:
+    case kFormat8H: return 8;
+    case kFormat4H:
+    case kFormat4S: return 4;
+    case kFormat2S:
+    case kFormat2D: return 2;
+    case kFormat1D:
+    case kFormatB:
+    case kFormatH:
+    case kFormatS:
+    case kFormatD: return 1;
+    default: VIXL_UNREACHABLE(); return 0;
+  }
+}
+
+
+int MaxLaneCountFromFormat(VectorFormat vform) {
+  VIXL_ASSERT(vform != kFormatUndefined);
+  switch (vform) {
+    case kFormatB:
+    case kFormat8B:
+    case kFormat16B: return 16;
+    case kFormatH:
+    case kFormat4H:
+    case kFormat8H: return 8;
+    case kFormatS:
+    case kFormat2S:
+    case kFormat4S: return 4;
+    case kFormatD:
+    case kFormat1D:
+    case kFormat2D: return 2;
+    default: VIXL_UNREACHABLE(); return 0;
+  }
+}
+
+
+// Does 'vform' indicate a vector format or a scalar format?
+bool IsVectorFormat(VectorFormat vform) {
+  VIXL_ASSERT(vform != kFormatUndefined);
+  switch (vform) {
+    case kFormatB:
+    case kFormatH:
+    case kFormatS:
+    case kFormatD: return false;
+    default: return true;
+  }
+}
+
+
+int64_t MaxIntFromFormat(VectorFormat vform) {
+  return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+
+int64_t MinIntFromFormat(VectorFormat vform) {
+  return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+
+uint64_t MaxUintFromFormat(VectorFormat vform) {
+  return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+}  // namespace vixl
+
diff --git a/disas/libvixl/vixl/a64/instructions-a64.h b/disas/libvixl/vixl/a64/instructions-a64.h
new file mode 100644
index 0000000000..7e0dbae36a
--- /dev/null
+++ b/disas/libvixl/vixl/a64/instructions-a64.h
@@ -0,0 +1,757 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//   * Redistributions of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//   * Redistributions in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//   * Neither the name of ARM Limited nor the names of its contributors may be
+//     used to endorse or promote products derived from this software without
+//     specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
+#define VIXL_A64_INSTRUCTIONS_A64_H_
+
+#include "vixl/globals.h"
+#include "vixl/utils.h"
+#include "vixl/a64/constants-a64.h"
+
+namespace vixl {
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MBytes;
+
+// This is the nominal page size (as used by the adrp instruction); the actual
+// size of the memory pages allocated by the kernel is likely to differ.
+const unsigned kPageSize = 4 * KBytes;
+const unsigned kPageSizeLog2 = 12;
+
+const unsigned kBRegSize = 8;
+const unsigned kBRegSizeLog2 = 3;
+const unsigned kBRegSizeInBytes = kBRegSize / 8;
+const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
+const unsigned kHRegSize = 16;
+const unsigned kHRegSizeLog2 = 4;
+const unsigned kHRegSizeInBytes = kHRegSize / 8;
+const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
+const unsigned kWRegSize = 32;
+const unsigned kWRegSizeLog2 = 5;
+const unsigned kWRegSizeInBytes = kWRegSize / 8;
+const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
+const unsigned kXRegSize = 64;
+const unsigned kXRegSizeLog2 = 6;
+const unsigned kXRegSizeInBytes = kXRegSize / 8;
+const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
+const unsigned kSRegSize = 32;
+const unsigned kSRegSizeLog2 = 5;
+const unsigned kSRegSizeInBytes = kSRegSize / 8;
+const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
+const unsigned kDRegSize = 64;
+const unsigned kDRegSizeLog2 = 6;
+const unsigned kDRegSizeInBytes = kDRegSize / 8;
+const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
+const unsigned kQRegSize = 128;
+const unsigned kQRegSizeLog2 = 7;
+const unsigned kQRegSizeInBytes = kQRegSize / 8;
+const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
+const uint64_t kWRegMask = UINT64_C(0xffffffff);
+const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSRegMask = UINT64_C(0xffffffff);
+const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSSignMask = UINT64_C(0x80000000);
+const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kWSignMask = UINT64_C(0x80000000);
+const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kByteMask = UINT64_C(0xff);
+const uint64_t kHalfWordMask = UINT64_C(0xffff);
+const uint64_t kWordMask = UINT64_C(0xffffffff);
+const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
+const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
+const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
+const int64_t kXMinInt = INT64_C(0x8000000000000000);
+const int32_t kWMaxInt = INT32_C(0x7fffffff);
+const int32_t kWMinInt = INT32_C(0x80000000);
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+
+const unsigned kAddressTagOffset = 56;
+const unsigned kAddressTagWidth = 8;
+const uint64_t kAddressTagMask =
+    ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
+VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
+
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+const unsigned kFloat16MantissaBits = 10;
+const unsigned kFloat16ExponentBits = 5;
+
+// Floating-point infinity values.
+extern const float16 kFP16PositiveInfinity;
+extern const float16 kFP16NegativeInfinity;
+extern const float kFP32PositiveInfinity;
+extern const float kFP32NegativeInfinity;
+extern const double kFP64PositiveInfinity;
+extern const double kFP64NegativeInfinity;
+
+// The default NaN values (for FPCR.DN=1).
+extern const float16 kFP16DefaultNaN;
+extern const float kFP32DefaultNaN;
+extern const double kFP64DefaultNaN;
+
+unsigned CalcLSDataSize(LoadStoreOp op);
+unsigned CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+  UnknownBranchType = 0,
+  CondBranchType    = 1,
+  UncondBranchType  = 2,
+  CompareBranchType = 3,
+  TestBranchType    = 4
+};
+
+enum AddrMode {
+  Offset,
+  PreIndex,
+  PostIndex
+};
+
+enum FPRounding {
+  // The first four values are encodable directly by FPCR<RMode>.
+  FPTieEven = 0x0,
+  FPPositiveInfinity = 0x1,
+  FPNegativeInfinity = 0x2,
+  FPZero = 0x3,
+
+  // The final rounding modes are only available when explicitly specified by
+  // the instruction (such as with fcvta). It cannot be set in FPCR.
+  FPTieAway,
+  FPRoundOdd
+};
+
+enum Reg31Mode {
+  Reg31IsStackPointer,
+  Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+  Instr InstructionBits() const {
+    return *(reinterpret_cast<const Instr*>(this));
+  }
+
+  void SetInstructionBits(Instr new_instr) {
+    *(reinterpret_cast<Instr*>(this)) = new_instr;
+  }
+
+  int Bit(int pos) const {
+    return (InstructionBits() >> pos) & 1;
+  }
+
+  uint32_t Bits(int msb, int lsb) const {
+    return unsigned_bitextract_32(msb, lsb, InstructionBits());
+  }
+
+  int32_t SignedBits(int msb, int lsb) const {
+    int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+    return signed_bitextract_32(msb, lsb, bits);
+  }
+
+  Instr Mask(uint32_t mask) const {
+    return InstructionBits() & mask;
+  }
+
+  #define DEFINE_GETTER(Name, HighBit, LowBit, Func)             \
+  int32_t Name() const { return Func(HighBit, LowBit); }
+  INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+  #undef DEFINE_GETTER
+
+  // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+  // formed from ImmPCRelLo and ImmPCRelHi.
+  int ImmPCRel() const {
+    int offset =
+        static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+    int width = ImmPCRelLo_width + ImmPCRelHi_width;
+    return signed_bitextract_32(width - 1, 0, offset);
+  }
+
+  uint64_t ImmLogical() const;
+  unsigned ImmNEONabcdefgh() const;
+  float ImmFP32() const;
+  double ImmFP64() const;
+  float ImmNEONFP32() const;
+  double ImmNEONFP64() const;
+
+  unsigned SizeLS() const {
+    return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
+  }
+
+  unsigned SizeLSPair() const {
+    return CalcLSPairDataSize(
+        static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+  }
+
+  int NEONLSIndex(int access_size_shift) const {
+    int64_t q = NEONQ();
+    int64_t s = NEONS();
+    int64_t size = NEONLSSize();
+    int64_t index = (q << 3) | (s << 2) | size;
+    return static_cast<int>(index >> access_size_shift);
+  }
+
+  // Helpers.
+  bool IsCondBranchImm() const {
+    return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+  }
+
+  bool IsUncondBranchImm() const {
+    return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+  }
+
+  bool IsCompareBranch() const {
+    return Mask(CompareBranchFMask) == CompareBranchFixed;
+  }
+
+  bool IsTestBranch() const {
+    return Mask(TestBranchFMask) == TestBranchFixed;
+  }
+
+  bool IsImmBranch() const {
+    return BranchType() != UnknownBranchType;
+  }
+
+  bool IsPCRelAddressing() const {
+    return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+  }
+
+  bool IsLogicalImmediate() const {
+    return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+  }
+
+  bool IsAddSubImmediate() const {
+    return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+  }
+
+  bool IsAddSubExtended() const {
+    return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+  }
+
+  bool IsLoadOrStore() const {
+    return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+  }
+
+  bool IsLoad() const;
+  bool IsStore() const;
+
+  bool IsLoadLiteral() const {
+    // This includes PRFM_lit.
+    return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+  }
+
+  bool IsMovn() const {
+    return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+           (Mask(MoveWideImmediateMask) == MOVN_w);
+  }
+
+  static int ImmBranchRangeBitwidth(ImmBranchType branch_type);
+  static int32_t ImmBranchForwardRange(ImmBranchType branch_type);
+  static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
+
+  // Indicate whether Rd can be the stack pointer or the zero register. This
+  // does not check that the instruction actually has an Rd field.
+  Reg31Mode RdMode() const {
+    // The following instructions use sp or wsp as Rd:
+    //  Add/sub (immediate) when not setting the flags.
+    //  Add/sub (extended) when not setting the flags.
+    //  Logical (immediate) when not setting the flags.
+    // Otherwise, r31 is the zero register.
+    if (IsAddSubImmediate() || IsAddSubExtended()) {
+      if (Mask(AddSubSetFlagsBit)) {
+        return Reg31IsZeroRegister;
+      } else {
+        return Reg31IsStackPointer;
+      }
+    }
+    if (IsLogicalImmediate()) {
+      // Of the logical (immediate) instructions, only ANDS (and its aliases)
+      // can set the flags. The others can all write into sp.
+      // Note that some logical operations are not available to
+      // immediate-operand instructions, so we have to combine two masks here.
+      if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+        return Reg31IsZeroRegister;
+      } else {
+        return Reg31IsStackPointer;
+      }
+    }
+    return Reg31IsZeroRegister;
+  }
+
+  // Indicate whether Rn can be the stack pointer or the zero register. This
+  // does not check that the instruction actually has an Rn field.
+  Reg31Mode RnMode() const {
+    // The following instructions use sp or wsp as Rn:
+    //  All loads and stores.
+    //  Add/sub (immediate).
+    //  Add/sub (extended).
+    // Otherwise, r31 is the zero register.
+    if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+      return Reg31IsStackPointer;
+    }
+    return Reg31IsZeroRegister;
+  }
+
+  ImmBranchType BranchType() const {
+    if (IsCondBranchImm()) {
+      return CondBranchType;
+    } else if (IsUncondBranchImm()) {
+      return UncondBranchType;
+    } else if (IsCompareBranch()) {
+      return CompareBranchType;
+    } else if (IsTestBranch()) {
+      return TestBranchType;
+    } else {
+      return UnknownBranchType;
+    }
+  }
+
+  // Find the target of this instruction. 'this' may be a branch or a
+  // PC-relative addressing instruction.
+  const Instruction* ImmPCOffsetTarget() const;
+
+  // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+  // a PC-relative addressing instruction.
+  void SetImmPCOffsetTarget(const Instruction* target);
+  // Patch a literal load instruction to load from 'source'.
+  void SetImmLLiteral(const Instruction* source);
+
+  // The range of a load literal instruction, expressed as 'instr +- range'.
+  // The range is actually the 'positive' range; the branch instruction can
+  // target [instr - range - kInstructionSize, instr + range].
+  static const int kLoadLiteralImmBitwidth = 19;
+  static const int kLoadLiteralRange =
+      (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
+
+  // Calculate the address of a literal referred to by a load-literal
+  // instruction, and return it as the specified type.
+  //
+  // The literal itself is safely mutable only if the backing buffer is safely
+  // mutable.
+  template <typename T>
+  T LiteralAddress() const {
+    uint64_t base_raw = reinterpret_cast<uint64_t>(this);
+    int64_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+    uint64_t address_raw = base_raw + offset;
+
+    // Cast the address using a C-style cast. A reinterpret_cast would be
+    // appropriate, but it can't cast one integral type to another.
+    T address = (T)(address_raw);
+
+    // Assert that the address can be represented by the specified type.
+    VIXL_ASSERT((uint64_t)(address) == address_raw);
+
+    return address;
+  }
+
+  uint32_t Literal32() const {
+    uint32_t literal;
+    memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
+    return literal;
+  }
+
+  uint64_t Literal64() const {
+    uint64_t literal;
+    memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
+    return literal;
+  }
+
+  float LiteralFP32() const {
+    return rawbits_to_float(Literal32());
+  }
+
+  double LiteralFP64() const {
+    return rawbits_to_double(Literal64());
+  }
+
+  const Instruction* NextInstruction() const {
+    return this + kInstructionSize;
+  }
+
+  const Instruction* InstructionAtOffset(int64_t offset) const {
+    VIXL_ASSERT(IsWordAligned(this + offset));
+    return this + offset;
+  }
+
+  template<typename T> static Instruction* Cast(T src) {
+    return reinterpret_cast<Instruction*>(src);
+  }
+
+  template<typename T> static const Instruction* CastConst(T src) {
+    return reinterpret_cast<const Instruction*>(src);
+  }
+
+ private:
+  int ImmBranch() const;
+
+  static float Imm8ToFP32(uint32_t imm8);
+  static double Imm8ToFP64(uint32_t imm8);
+
+  void SetPCRelImmTarget(const Instruction* target);
+  void SetBranchImmTarget(const Instruction* target);
+};
+
+
+// Functions for handling NEON vector format information.
+enum VectorFormat {
+  kFormatUndefined = 0xffffffff,
+  kFormat8B  = NEON_8B,
+  kFormat16B = NEON_16B,
+  kFormat4H  = NEON_4H,
+  kFormat8H  = NEON_8H,
+  kFormat2S  = NEON_2S,
+  kFormat4S  = NEON_4S,
+  kFormat1D  = NEON_1D,
+  kFormat2D  = NEON_2D,
+
+  // Scalar formats. We add the scalar bit to distinguish between scalar and
+  // vector enumerations; the bit is always set in the encoding of scalar ops
+  // and always clear for vector ops. Although kFormatD and kFormat1D appear
+  // to be the same, their meaning is subtly different. The first is a scalar
+  // operation, the second a vector operation that only affects one lane.
+  kFormatB = NEON_B | NEONScalar,
+  kFormatH = NEON_H | NEONScalar,
+  kFormatS = NEON_S | NEONScalar,
+  kFormatD = NEON_D | NEONScalar
+};
+
+VectorFormat VectorFormatHalfWidth(const VectorFormat vform);
+VectorFormat VectorFormatDoubleWidth(const VectorFormat vform);
+VectorFormat VectorFormatDoubleLanes(const VectorFormat vform);
+VectorFormat VectorFormatHalfLanes(const VectorFormat vform);
+VectorFormat ScalarFormatFromLaneSize(int lanesize);
+VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform);
+VectorFormat VectorFormatFillQ(const VectorFormat vform);
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
+// TODO: Make the return types of these functions consistent.
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
+int LaneSizeInBytesFromFormat(VectorFormat vform);
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
+int LaneCountFromFormat(VectorFormat vform);
+int MaxLaneCountFromFormat(VectorFormat vform);
+bool IsVectorFormat(VectorFormat vform);
+int64_t MaxIntFromFormat(VectorFormat vform);
+int64_t MinIntFromFormat(VectorFormat vform);
+uint64_t MaxUintFromFormat(VectorFormat vform);
+
+
+enum NEONFormat {
+  NF_UNDEF = 0,
+  NF_8B    = 1,
+  NF_16B   = 2,
+  NF_4H    = 3,
+  NF_8H    = 4,
+  NF_2S    = 5,
+  NF_4S    = 6,
+  NF_1D    = 7,
+  NF_2D    = 8,
+  NF_B     = 9,
+  NF_H     = 10,
+  NF_S     = 11,
+  NF_D     = 12
+};
+
+static const unsigned kNEONFormatMaxBits = 6;
+
+struct NEONFormatMap {
+  // The bit positions in the instruction to consider.
+  uint8_t bits[kNEONFormatMaxBits];
+
+  // Mapping from concatenated bits to format.
+  NEONFormat map[1 << kNEONFormatMaxBits];
+};
+
+class NEONFormatDecoder {
+ public:
+  enum SubstitutionMode {
+    kPlaceholder,
+    kFormat
+  };
+
+  // Construct a format decoder with increasingly specific format maps for each
+  // subsitution. If no format map is specified, the default is the integer
+  // format map.
+  explicit NEONFormatDecoder(const Instruction* instr) {
+    instrbits_ = instr->InstructionBits();
+    SetFormatMaps(IntegerFormatMap());
+  }
+  NEONFormatDecoder(const Instruction* instr,
+                    const NEONFormatMap* format) {
+    instrbits_ = instr->InstructionBits();
+    SetFormatMaps(format);
+  }
+  NEONFormatDecoder(const Instruction* instr,
+                    const NEONFormatMap* format0,
+                    const NEONFormatMap* format1) {
+    instrbits_ = instr->InstructionBits();
+    SetFormatMaps(format0, format1);
+  }
+  NEONFormatDecoder(const Instruction* instr,
+                    const NEONFormatMap* format0,
+                    const NEONFormatMap* format1,
+                    const NEONFormatMap* format2) {
+    instrbits_ = instr->InstructionBits();
+    SetFormatMaps(format0, format1, format2);
+  }
+
+  // Set the format mapping for all or individual substitutions.
+  void SetFormatMaps(const NEONFormatMap* format0,
+                     const NEONFormatMap* format1 = NULL,
+                     const NEONFormatMap* format2 = NULL) {
+    VIXL_ASSERT(format0 != NULL);
+    formats_[0] = format0;
+    formats_[1] = (format1 == NULL) ? formats_[0] : format1;
+    formats_[2] = (format2 == NULL) ? formats_[1] : format2;
+  }
+  void SetFormatMap(unsigned index, const NEONFormatMap* format) {
+    VIXL_ASSERT(index <= (sizeof(formats_) / sizeof(formats_[0])));
+    VIXL_ASSERT(format != NULL);
+    formats_[index] = format;
+  }
+
+  // Substitute %s in the input string with the placeholder string for each
+  // register, ie. "'B", "'H", etc.
+  const char* SubstitutePlaceholders(const char* string) {
+    return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
+  }
+
+  // Substitute %s in the input string with a new string based on the
+  // substitution mode.
+  const char* Substitute(const char* string,
+                         SubstitutionMode mode0 = kFormat,
+                         SubstitutionMode mode1 = kFormat,
+                         SubstitutionMode mode2 = kFormat) {
+    snprintf(form_buffer_, sizeof(form_buffer_), string,
+             GetSubstitute(0, mode0),
+             GetSubstitute(1, mode1),
+             GetSubstitute(2, mode2));
+    return form_buffer_;
+  }
+
+  // Append a "2" to a mnemonic string based of the state of the Q bit.
+  const char* Mnemonic(const char* mnemonic) {
+    if ((instrbits_ & NEON_Q) != 0) {
+      snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
+      return mne_buffer_;
+    }
+    return mnemonic;
+  }
+
+  VectorFormat GetVectorFormat(int format_index = 0) {
+    return GetVectorFormat(formats_[format_index]);
+  }
+
+  VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
+    static const VectorFormat vform[] = {
+      kFormatUndefined,
+      kFormat8B, kFormat16B, kFormat4H, kFormat8H,
+      kFormat2S, kFormat4S, kFormat1D, kFormat2D,
+      kFormatB, kFormatH, kFormatS, kFormatD
+    };
+    VIXL_ASSERT(GetNEONFormat(format_map) < (sizeof(vform) / sizeof(vform[0])));
+    return vform[GetNEONFormat(format_map)];
+  }
+
+  // Built in mappings for common cases.
+
+  // The integer format map uses three bits (Q, size<1:0>) to encode the
+  // "standard" set of NEON integer vector formats.
+  static const NEONFormatMap* IntegerFormatMap() {
+    static const NEONFormatMap map = {
+      {23, 22, 30},
+      {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}
+    };
+    return &map;
+  }
+
+  // The long integer format map uses two bits (size<1:0>) to encode the
+  // long set of NEON integer vector formats. These are used in narrow, wide
+  // and long operations.
+  static const NEONFormatMap* LongIntegerFormatMap() {
+    static const NEONFormatMap map = {
+      {23, 22}, {NF_8H, NF_4S, NF_2D}
+    };
+    return &map;
+  }
+
+  // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
+  // formats: NF_2S, NF_4S, NF_2D.
+  static const NEONFormatMap* FPFormatMap() {
+    // The FP format map assumes two bits (Q, size<0>) are used to encode the
+    // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
+    static const NEONFormatMap map = {
+      {22, 30}, {NF_2S, NF_4S, NF_UNDEF, NF_2D}
+    };
+    return &map;
+  }
+
+  // The load/store format map uses three bits (Q, 11, 10) to encode the
+  // set of NEON vector formats.
+  static const NEONFormatMap* LoadStoreFormatMap() {
+    static const NEONFormatMap map = {
+      {11, 10, 30},
+      {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+    };
+    return &map;
+  }
+
+  // The logical format map uses one bit (Q) to encode the NEON vector format:
+  // NF_8B, NF_16B.
+  static const NEONFormatMap* LogicalFormatMap() {
+    static const NEONFormatMap map = {
+      {30}, {NF_8B, NF_16B}
+    };
+    return &map;
+  }
+
+  // The triangular format map uses between two and five bits to encode the NEON
+  // vector format:
+  // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
+  // x1000->2S, x1001->4S,  10001->2D, all others undefined.
+  static const NEONFormatMap* TriangularFormatMap() {
+    static const NEONFormatMap map = {
+      {19, 18, 17, 16, 30},
+      {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S,
+       NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_UNDEF, NF_2D,
+       NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, NF_4S, NF_8B, NF_16B,
+       NF_4H, NF_8H, NF_8B, NF_16B}
+    };
+    return &map;
+  }
+
+  // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
+  // formats: NF_B, NF_H, NF_S, NF_D.
+  static const NEONFormatMap* ScalarFormatMap() {
+    static const NEONFormatMap map = {
+      {23, 22}, {NF_B, NF_H, NF_S, NF_D}
+    };
+    return &map;
+  }
+
+  // The long scalar format map uses two bits (size<1:0>) to encode the longer
+  // NEON scalar formats: NF_H, NF_S, NF_D.
+  static const NEONFormatMap* LongScalarFormatMap() {
+    static const NEONFormatMap map = {
+      {23, 22}, {NF_H, NF_S, NF_D}
+    };
+    return &map;
+  }
+
+  // The FP scalar format map assumes one bit (size<0>) is used to encode the
+  // NEON FP scalar formats: NF_S, NF_D.
+  static const NEONFormatMap* FPScalarFormatMap() {
+    static const NEONFormatMap map = {
+      {22}, {NF_S, NF_D}
+    };
+    return &map;
+  }
+
+  // The triangular scalar format map uses between one and four bits to encode
+  // the NEON FP scalar formats:
+  // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
+  static const NEONFormatMap* TriangularScalarFormatMap() {
+    static const NEONFormatMap map = {
+      {19, 18, 17, 16},
+      {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B,
+       NF_D,     NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B}
+    };
+    return &map;
+  }
+
+ private:
+  // Get a pointer to a string that represents the format or placeholder for
+  // the specified substitution index, based on the format map and instruction.
+  const char* GetSubstitute(int index, SubstitutionMode mode) {
+    if (mode == kFormat) {
+      return NEONFormatAsString(GetNEONFormat(formats_[index]));
+    }
+    VIXL_ASSERT(mode == kPlaceholder);
+    return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
+  }
+
+  // Get the NEONFormat enumerated value for bits obtained from the
+  // instruction based on the specified format mapping.
+  NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
+    return format_map->map[PickBits(format_map->bits)];
+  }
+
+  // Convert a NEONFormat into a string.
+  static const char* NEONFormatAsString(NEONFormat format) {
+    static const char* formats[] = {
+      "undefined",
+      "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
+      "b", "h", "s", "d"
+    };
+    VIXL_ASSERT(format < (sizeof(formats) / sizeof(formats[0])));
+    return formats[format];
+  }
+
+  // Convert a NEONFormat into a register placeholder string.
+  static const char* NEONFormatAsPlaceholder(NEONFormat format) {
+    VIXL_ASSERT((format == NF_B) || (format == NF_H) ||
+                (format == NF_S) || (format == NF_D) ||
+                (format == NF_UNDEF));
+    static const char* formats[] = {
+      "undefined",
+      "undefined", "undefined", "undefined", "undefined",
+      "undefined", "undefined", "undefined", "undefined",
+      "'B", "'H", "'S", "'D"
+    };
+    return formats[format];
+  }
+
+  // Select bits from instrbits_ defined by the bits array, concatenate them,
+  // and return the value.
+  uint8_t PickBits(const uint8_t bits[]) {
+    uint8_t result = 0;
+    for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
+      if (bits[b] == 0) break;
+      result <<= 1;
+      result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
+    }
+    return result;
+  }
+
+  Instr instrbits_;
+  const NEONFormatMap* formats_[3];
+  char form_buffer_[64];
+  char mne_buffer_[16];
+};
+}  // namespace vixl
+
+#endif  // VIXL_A64_INSTRUCTIONS_A64_H_
diff --git a/disas/libvixl/code-buffer.h b/disas/libvixl/vixl/code-buffer.h
index da6233dd80..f93ebb6b82 100644
--- a/disas/libvixl/code-buffer.h
+++ b/disas/libvixl/vixl/code-buffer.h
@@ -28,7 +28,7 @@
 #define VIXL_CODE_BUFFER_H
 
 #include <string.h>
-#include "globals.h"
+#include "vixl/globals.h"
 
 namespace vixl {
 
diff --git a/disas/libvixl/utils.cc b/disas/libvixl/vixl/compiler-intrinsics.cc
index 80b132a11e..fd551faeb1 100644
--- a/disas/libvixl/utils.cc
+++ b/disas/libvixl/vixl/compiler-intrinsics.cc
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2015, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -24,53 +24,13 @@
 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "utils.h"
-#include <stdio.h>
+#include "compiler-intrinsics.h"
 
 namespace vixl {
 
-uint32_t float_to_rawbits(float value) {
-  uint32_t bits = 0;
-  memcpy(&bits, &value, 4);
-  return bits;
-}
-
-
-uint64_t double_to_rawbits(double value) {
-  uint64_t bits = 0;
-  memcpy(&bits, &value, 8);
-  return bits;
-}
-
-
-float rawbits_to_float(uint32_t bits) {
-  float value = 0.0;
-  memcpy(&value, &bits, 4);
-  return value;
-}
-
-
-double rawbits_to_double(uint64_t bits) {
-  double value = 0.0;
-  memcpy(&value, &bits, 8);
-  return value;
-}
-
 
-int CountLeadingZeros(uint64_t value, int width) {
-  VIXL_ASSERT((width == 32) || (width == 64));
-  int count = 0;
-  uint64_t bit_test = UINT64_C(1) << (width - 1);
-  while ((count < width) && ((bit_test & value) == 0)) {
-    count++;
-    bit_test >>= 1;
-  }
-  return count;
-}
-
-
-int CountLeadingSignBits(int64_t value, int width) {
-  VIXL_ASSERT((width == 32) || (width == 64));
+int CountLeadingSignBitsFallBack(int64_t value, int width) {
+  VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
   if (value >= 0) {
     return CountLeadingZeros(value, width) - 1;
   } else {
@@ -79,23 +39,46 @@ int CountLeadingSignBits(int64_t value, int width) {
 }
 
 
-int CountTrailingZeros(uint64_t value, int width) {
-  VIXL_ASSERT((width == 32) || (width == 64));
+int CountLeadingZerosFallBack(uint64_t value, int width) {
+  VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
+  if (value == 0) {
+    return width;
+  }
   int count = 0;
-  while ((count < width) && (((value >> count) & 1) == 0)) {
-    count++;
+  value = value << (64 - width);
+  if ((value & UINT64_C(0xffffffff00000000)) == 0) {
+    count += 32;
+    value = value << 32;
   }
+  if ((value & UINT64_C(0xffff000000000000)) == 0) {
+    count += 16;
+    value = value << 16;
+  }
+  if ((value & UINT64_C(0xff00000000000000)) == 0) {
+    count += 8;
+    value = value << 8;
+  }
+  if ((value & UINT64_C(0xf000000000000000)) == 0) {
+    count += 4;
+    value = value << 4;
+  }
+  if ((value & UINT64_C(0xc000000000000000)) == 0) {
+    count += 2;
+    value = value << 2;
+  }
+  if ((value & UINT64_C(0x8000000000000000)) == 0) {
+    count += 1;
+  }
+  count += (value == 0);
   return count;
 }
 
 
-int CountSetBits(uint64_t value, int width) {
-  // TODO: Other widths could be added here, as the implementation already
-  // supports them.
-  VIXL_ASSERT((width == 32) || (width == 64));
+int CountSetBitsFallBack(uint64_t value, int width) {
+  VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
 
   // Mask out unused bits to ensure that they are not counted.
-  value &= (UINT64_C(0xffffffffffffffff) >> (64-width));
+  value &= (UINT64_C(0xffffffffffffffff) >> (64 - width));
 
   // Add up the set bits.
   // The algorithm works by adding pairs of bit fields together iteratively,
@@ -122,30 +105,40 @@ int CountSetBits(uint64_t value, int width) {
     value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]);
   }
 
-  return value;
-}
-
-
-uint64_t LowestSetBit(uint64_t value) {
-  return value & -value;
-}
-
-
-bool IsPowerOf2(int64_t value) {
-  return (value != 0) && ((value & (value - 1)) == 0);
+  return static_cast<int>(value);
 }
 
 
-unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
-  VIXL_ASSERT((reg_size % 8) == 0);
+int CountTrailingZerosFallBack(uint64_t value, int width) {
+  VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
   int count = 0;
-  for (unsigned i = 0; i < (reg_size / 16); i++) {
-    if ((imm & 0xffff) == 0) {
-      count++;
-    }
-    imm >>= 16;
+  value = value << (64 - width);
+  if ((value & UINT64_C(0xffffffff)) == 0) {
+    count += 32;
+    value = value >> 32;
   }
-  return count;
+  if ((value & 0xffff) == 0) {
+    count += 16;
+    value = value >> 16;
+  }
+  if ((value & 0xff) == 0) {
+    count += 8;
+    value = value >> 8;
+  }
+  if ((value & 0xf) == 0) {
+    count += 4;
+    value = value >> 4;
+  }
+  if ((value & 0x3) == 0) {
+    count += 2;
+    value = value >> 2;
+  }
+  if ((value & 0x1) == 0) {
+    count += 1;
+  }
+  count += (value == 0);
+  return count - (64 - width);
 }
 
+
 }  // namespace vixl
diff --git a/disas/libvixl/vixl/compiler-intrinsics.h b/disas/libvixl/vixl/compiler-intrinsics.h
new file mode 100644
index 0000000000..9431beddb9
--- /dev/null
+++ b/disas/libvixl/vixl/compiler-intrinsics.h
@@ -0,0 +1,155 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//   * Redistributions of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//   * Redistributions in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//   * Neither the name of ARM Limited nor the names of its contributors may be
+//     used to endorse or promote products derived from this software without
+//     specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef VIXL_COMPILER_INTRINSICS_H
+#define VIXL_COMPILER_INTRINSICS_H
+
+#include "globals.h"
+
+namespace vixl {
+
+// Helper to check whether the version of GCC used is greater than the specified
+// requirement.
+#define MAJOR 1000000
+#define MINOR 1000
+#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel)                         \
+    ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR + __GNUC_PATCHLEVEL__) >=      \
+     ((major) * MAJOR + (minor) * MINOR + (patchlevel)))
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel)                         \
+    ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR) >=                            \
+     ((major) * MAJOR + (minor) * MINOR + (patchlevel)))
+#else
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
+#endif
+
+
+#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
+
+#define COMPILER_HAS_BUILTIN_CLRSB    (__has_builtin(__builtin_clrsb))
+#define COMPILER_HAS_BUILTIN_CLZ      (__has_builtin(__builtin_clz))
+#define COMPILER_HAS_BUILTIN_CTZ      (__has_builtin(__builtin_ctz))
+#define COMPILER_HAS_BUILTIN_FFS      (__has_builtin(__builtin_ffs))
+#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
+
+#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
+// The documentation for these builtins is available at:
+// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
+
+# define COMPILER_HAS_BUILTIN_CLRSB    (GCC_VERSION_OR_NEWER(4, 7, 0))
+# define COMPILER_HAS_BUILTIN_CLZ      (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_CTZ      (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_FFS      (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
+
+#else
+// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
+// implemented C++ methods.
+
+#define COMPILER_HAS_BUILTIN_BSWAP    false
+#define COMPILER_HAS_BUILTIN_CLRSB    false
+#define COMPILER_HAS_BUILTIN_CLZ      false
+#define COMPILER_HAS_BUILTIN_CTZ      false
+#define COMPILER_HAS_BUILTIN_FFS      false
+#define COMPILER_HAS_BUILTIN_POPCOUNT false
+
+#endif
+
+
+template<typename V>
+inline bool IsPowerOf2(V value) {
+  return (value != 0) && ((value & (value - 1)) == 0);
+}
+
+
+// Declaration of fallback functions.
+int CountLeadingSignBitsFallBack(int64_t value, int width);
+int CountLeadingZerosFallBack(uint64_t value, int width);
+int CountSetBitsFallBack(uint64_t value, int width);
+int CountTrailingZerosFallBack(uint64_t value, int width);
+
+
+// Implementation of intrinsics functions.
+// TODO: The implementations could be improved for sizes different from 32bit
+// and 64bit: we could mask the values and call the appropriate builtin.
+
+template<typename V>
+inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CLRSB
+  if (width == 32) {
+    return __builtin_clrsb(value);
+  } else if (width == 64) {
+    return __builtin_clrsbll(value);
+  }
+#endif
+  return CountLeadingSignBitsFallBack(value, width);
+}
+
+
+template<typename V>
+inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CLZ
+  if (width == 32) {
+    return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
+  } else if (width == 64) {
+    return (value == 0) ? 64 : __builtin_clzll(value);
+  }
+#endif
+  return CountLeadingZerosFallBack(value, width);
+}
+
+
+template<typename V>
+inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_POPCOUNT
+  if (width == 32) {
+    return __builtin_popcount(static_cast<unsigned>(value));
+  } else if (width == 64) {
+    return __builtin_popcountll(value);
+  }
+#endif
+  return CountSetBitsFallBack(value, width);
+}
+
+
+template<typename V>
+inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CTZ
+  if (width == 32) {
+    return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
+  } else if (width == 64) {
+    return (value == 0) ? 64 : __builtin_ctzll(value);
+  }
+#endif
+  return CountTrailingZerosFallBack(value, width);
+}
+
+}  // namespace vixl
+
+#endif  // VIXL_COMPILER_INTRINSICS_H
+
diff --git a/disas/libvixl/globals.h b/disas/libvixl/vixl/globals.h
index 0c2493105d..61dc9f7f7e 100644
--- a/disas/libvixl/globals.h
+++ b/disas/libvixl/vixl/globals.h
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2015, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -49,20 +49,26 @@
 #include <stdint.h>
 #include <stdlib.h>
 #include <stddef.h>
-#include "platform.h"
+#include "vixl/platform.h"
 
 
 typedef uint8_t byte;
 
+// Type for half-precision (16 bit) floating point numbers.
+typedef uint16_t float16;
+
 const int KBytes = 1024;
 const int MBytes = 1024 * KBytes;
 
-#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
+#define VIXL_ABORT() \
+    do { printf("in %s, line %i", __FILE__, __LINE__); abort(); } while (false)
 #ifdef VIXL_DEBUG
   #define VIXL_ASSERT(condition) assert(condition)
   #define VIXL_CHECK(condition) VIXL_ASSERT(condition)
-  #define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT()
-  #define VIXL_UNREACHABLE() printf("UNREACHABLE\t"); VIXL_ABORT()
+  #define VIXL_UNIMPLEMENTED() \
+    do { fprintf(stderr, "UNIMPLEMENTED\t"); VIXL_ABORT(); } while (false)
+  #define VIXL_UNREACHABLE() \
+    do { fprintf(stderr, "UNREACHABLE\t"); VIXL_ABORT(); } while (false)
 #else
   #define VIXL_ASSERT(condition) ((void) 0)
   #define VIXL_CHECK(condition) assert(condition)
@@ -76,10 +82,70 @@ const int MBytes = 1024 * KBytes;
 #define VIXL_STATIC_ASSERT_LINE(line, condition) \
   typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
   __attribute__((unused))
-#define VIXL_STATIC_ASSERT(condition) VIXL_STATIC_ASSERT_LINE(__LINE__, condition) //NOLINT
+#define VIXL_STATIC_ASSERT(condition) \
+    VIXL_STATIC_ASSERT_LINE(__LINE__, condition)
+
+template <typename T1>
+inline void USE(T1) {}
+
+template <typename T1, typename T2>
+inline void USE(T1, T2) {}
+
+template <typename T1, typename T2, typename T3>
+inline void USE(T1, T2, T3) {}
+
+template <typename T1, typename T2, typename T3, typename T4>
+inline void USE(T1, T2, T3, T4) {}
+
+#define VIXL_ALIGNMENT_EXCEPTION() \
+    do { fprintf(stderr, "ALIGNMENT EXCEPTION\t"); VIXL_ABORT(); } while (0)
+
+// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
+// argument to annotate intentional fall-through between switch labels.
+// For more information please refer to:
+// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
+#ifndef __has_warning
+  #define __has_warning(x)  0
+#endif
+
+// Note: This option is only available for Clang. And will only be enabled for
+// C++11(201103L).
+#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
+  #define VIXL_FALLTHROUGH() [[clang::fallthrough]] //NOLINT
+#else
+  #define VIXL_FALLTHROUGH() do {} while (0)
+#endif
+
+#if __cplusplus >= 201103L
+  #define VIXL_NO_RETURN  [[noreturn]] //NOLINT
+#else
+  #define VIXL_NO_RETURN  __attribute__((noreturn))
+#endif
+
+// Some functions might only be marked as "noreturn" for the DEBUG build. This
+// macro should be used for such cases (for more details see what
+// VIXL_UNREACHABLE expands to).
+#ifdef VIXL_DEBUG
+  #define VIXL_DEBUG_NO_RETURN  VIXL_NO_RETURN
+#else
+  #define VIXL_DEBUG_NO_RETURN
+#endif
 
-template <typename T> inline void USE(T) {}
+#ifdef VIXL_INCLUDE_SIMULATOR
+#ifndef VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
+  #define VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE  1
+#endif
+#else
+#ifndef VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
+  #define VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE  0
+#endif
+#if VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
+  #warning "Generating Simulator instructions without Simulator support."
+#endif
+#endif
 
-#define VIXL_ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); VIXL_ABORT()
+#ifdef USE_SIMULATOR
+  #error "Please see the release notes for USE_SIMULATOR."
+#endif
 
 #endif  // VIXL_GLOBALS_H
diff --git a/disas/libvixl/vixl/invalset.h b/disas/libvixl/vixl/invalset.h
new file mode 100644
index 0000000000..ffdc0237b4
--- /dev/null
+++ b/disas/libvixl/vixl/invalset.h
@@ -0,0 +1,775 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//   * Redistributions of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//   * Redistributions in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//   * Neither the name of ARM Limited nor the names of its contributors may be
+//     used to endorse or promote products derived from this software without
+//     specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_INVALSET_H_
+#define VIXL_INVALSET_H_
+
+#include <string.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "vixl/globals.h"
+
+namespace vixl {
+
+// We define a custom data structure template and its iterator as `std`
+// containers do not fit the performance requirements for some of our use cases.
+//
+// The structure behaves like an iterable unordered set with special properties
+// and restrictions. "InvalSet" stands for "Invalidatable Set".
+//
+// Restrictions and requirements:
+// - Adding an element already present in the set is illegal. In debug mode,
+//   this is checked at insertion time.
+// - The templated class `ElementType` must provide comparison operators so that
+//   `std::sort()` can be used.
+// - A key must be available to represent invalid elements.
+// - Elements with an invalid key must compare higher or equal to any other
+//   element.
+//
+// Use cases and performance considerations:
+// Our use cases present two specificities that allow us to design this
+// structure to provide fast insertion *and* fast search and deletion
+// operations:
+// - Elements are (generally) inserted in order (sorted according to their key).
+// - A key is available to mark elements as invalid (deleted).
+// The backing `std::vector` allows for fast insertions. When
+// searching for an element we ensure the elements are sorted (this is generally
+// the case) and perform a binary search. When deleting an element we do not
+// free the associated memory immediately. Instead, an element to be deleted is
+// marked with the 'invalid' key. Other methods of the container take care of
+// ignoring entries marked as invalid.
+// To avoid the overhead of the `std::vector` container when only few entries
+// are used, a number of elements are preallocated.
+
+// 'ElementType' and 'KeyType' are respectively the types of the elements and
+// their key.  The structure only reclaims memory when safe to do so, if the
+// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and
+// greater than `<total number of elements> / RECLAIM_FACTOR.
+#define TEMPLATE_INVALSET_P_DECL                                               \
+  class ElementType,                                                           \
+  unsigned N_PREALLOCATED_ELEMENTS,                                            \
+  class KeyType,                                                               \
+  KeyType INVALID_KEY,                                                         \
+  size_t RECLAIM_FROM,                                                         \
+  unsigned RECLAIM_FACTOR
+
+#define TEMPLATE_INVALSET_P_DEF                                                \
+ElementType, N_PREALLOCATED_ELEMENTS,                                          \
+KeyType, INVALID_KEY, RECLAIM_FROM, RECLAIM_FACTOR
+
+template<class S> class InvalSetIterator;  // Forward declaration.
+
+template<TEMPLATE_INVALSET_P_DECL> class InvalSet {
+ public:
+  InvalSet();
+  ~InvalSet();
+
+  static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS;
+  static const KeyType kInvalidKey = INVALID_KEY;
+
+  // It is illegal to insert an element already present in the set.
+  void insert(const ElementType& element);
+
+  // Looks for the specified element in the set and - if found - deletes it.
+  void erase(const ElementType& element);
+
+  // This indicates the number of (valid) elements stored in this set.
+  size_t size() const;
+
+  // Returns true if no elements are stored in the set.
+  // Note that this does not mean the the backing storage is empty: it can still
+  // contain invalid elements.
+  bool empty() const;
+
+  void clear();
+
+  const ElementType min_element();
+
+  // This returns the key of the minimum element in the set.
+  KeyType min_element_key();
+
+  static bool IsValid(const ElementType& element);
+  static KeyType Key(const ElementType& element);
+  static void SetKey(ElementType* element, KeyType key);
+
+ protected:
+  // Returns a pointer to the element in vector_ if it was found, or NULL
+  // otherwise.
+  ElementType* Search(const ElementType& element);
+
+  // The argument *must* point to an element stored in *this* set.
+  // This function is not allowed to move elements in the backing vector
+  // storage.
+  void EraseInternal(ElementType* element);
+
+  // The elements in the range searched must be sorted.
+  ElementType* BinarySearch(const ElementType& element,
+                            ElementType* start,
+                            ElementType* end) const;
+
+  // Sort the elements.
+  enum SortType {
+    // The 'hard' version guarantees that invalid elements are moved to the end
+    // of the container.
+    kHardSort,
+    // The 'soft' version only guarantees that the elements will be sorted.
+    // Invalid elements may still be present anywhere in the set.
+    kSoftSort
+  };
+  void Sort(SortType sort_type);
+
+  // Delete the elements that have an invalid key. The complexity is linear
+  // with the size of the vector.
+  void Clean();
+
+  const ElementType Front() const;
+  const ElementType Back() const;
+
+  // Delete invalid trailing elements and return the last valid element in the
+  // set.
+  const ElementType CleanBack();
+
+  // Returns a pointer to the start or end of the backing storage.
+  const ElementType* StorageBegin() const;
+  const ElementType* StorageEnd() const;
+  ElementType* StorageBegin();
+  ElementType* StorageEnd();
+
+  // Returns the index of the element within the backing storage. The element
+  // must belong to the backing storage.
+  size_t ElementIndex(const ElementType* element) const;
+
+  // Returns the element at the specified index in the backing storage.
+  const ElementType* ElementAt(size_t index) const;
+  ElementType* ElementAt(size_t index);
+
+  static const ElementType* FirstValidElement(const ElementType* from,
+                                              const ElementType* end);
+
+  void CacheMinElement();
+  const ElementType CachedMinElement() const;
+
+  bool ShouldReclaimMemory() const;
+  void ReclaimMemory();
+
+  bool IsUsingVector() const { return vector_ != NULL; }
+  void set_sorted(bool sorted) { sorted_ = sorted; }
+
+  // We cache some data commonly required by users to improve performance.
+  // We cannot cache pointers to elements as we do not control the backing
+  // storage.
+  bool valid_cached_min_;
+  size_t cached_min_index_;  // Valid iff `valid_cached_min_` is true.
+  KeyType cached_min_key_;         // Valid iff `valid_cached_min_` is true.
+
+  // Indicates whether the elements are sorted.
+  bool sorted_;
+
+  // This represents the number of (valid) elements in this set.
+  size_t size_;
+
+  // The backing storage is either the array of preallocated elements or the
+  // vector. The structure starts by using the preallocated elements, and
+  // transitions (permanently) to using the vector once more than
+  // kNPreallocatedElements are used.
+  // Elements are only invalidated when using the vector. The preallocated
+  // storage always only contains valid elements.
+  ElementType preallocated_[kNPreallocatedElements];
+  std::vector<ElementType>* vector_;
+
+#ifdef VIXL_DEBUG
+  // Iterators acquire and release this monitor. While a set is acquired,
+  // certain operations are illegal to ensure that the iterator will
+  // correctly iterate over the elements in the set.
+  int monitor_;
+  int monitor() const { return monitor_; }
+  void Acquire() { monitor_++; }
+  void Release() {
+    monitor_--;
+    VIXL_ASSERT(monitor_ >= 0);
+  }
+#endif
+
+  friend class InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> >;
+  typedef ElementType _ElementType;
+  typedef KeyType _KeyType;
+};
+
+
+template<class S> class InvalSetIterator {
+ private:
+  // Redefine types to mirror the associated set types.
+  typedef typename S::_ElementType ElementType;
+  typedef typename S::_KeyType KeyType;
+
+ public:
+  explicit InvalSetIterator(S* inval_set);
+  ~InvalSetIterator();
+
+  ElementType* Current() const;
+  void Advance();
+  bool Done() const;
+
+  // Mark this iterator as 'done'.
+  void Finish();
+
+  // Delete the current element and advance the iterator to point to the next
+  // element.
+  void DeleteCurrentAndAdvance();
+
+  static bool IsValid(const ElementType& element);
+  static KeyType Key(const ElementType& element);
+
+ protected:
+  void MoveToValidElement();
+
+  // Indicates if the iterator is looking at the vector or at the preallocated
+  // elements.
+  const bool using_vector_;
+  // Used when looking at the preallocated elements, or in debug mode when using
+  // the vector to track how many times the iterator has advanced.
+  size_t index_;
+  typename std::vector<ElementType>::iterator iterator_;
+  S* inval_set_;
+};
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+InvalSet<TEMPLATE_INVALSET_P_DEF>::InvalSet()
+  : valid_cached_min_(false),
+    sorted_(true), size_(0), vector_(NULL) {
+#ifdef VIXL_DEBUG
+  monitor_ = 0;
+#endif
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+InvalSet<TEMPLATE_INVALSET_P_DEF>::~InvalSet() {
+  VIXL_ASSERT(monitor_ == 0);
+  delete vector_;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+void InvalSet<TEMPLATE_INVALSET_P_DEF>::insert(const ElementType& element) {
+  VIXL_ASSERT(monitor() == 0);
+  VIXL_ASSERT(IsValid(element));
+  VIXL_ASSERT(Search(element) == NULL);
+  set_sorted(empty() || (sorted_ && (element > CleanBack())));
+  if (IsUsingVector()) {
+    vector_->push_back(element);
+  } else {
+    if (size_ < kNPreallocatedElements) {
+      preallocated_[size_] = element;
+    } else {
+      // Transition to using the vector.
+      vector_ = new std::vector<ElementType>(preallocated_,
+                                             preallocated_ + size_);
+      vector_->push_back(element);
+    }
+  }
+  size_++;
+
+  if (valid_cached_min_ && (element < min_element())) {
+    cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1;
+    cached_min_key_ = Key(element);
+    valid_cached_min_ = true;
+  }
+
+  if (ShouldReclaimMemory()) {
+    ReclaimMemory();
+  }
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+void InvalSet<TEMPLATE_INVALSET_P_DEF>::erase(const ElementType& element) {
+  VIXL_ASSERT(monitor() == 0);
+  VIXL_ASSERT(IsValid(element));
+  ElementType* local_element = Search(element);
+  if (local_element != NULL) {
+    EraseInternal(local_element);
+  }
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::Search(
+    const ElementType& element) {
+  VIXL_ASSERT(monitor() == 0);
+  if (empty()) {
+    return NULL;
+  }
+  if (ShouldReclaimMemory()) {
+    ReclaimMemory();
+  }
+  if (!sorted_) {
+    Sort(kHardSort);
+  }
+  if (!valid_cached_min_) {
+    CacheMinElement();
+  }
+  return BinarySearch(element, ElementAt(cached_min_index_), StorageEnd());
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::size() const {
+  return size_;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+bool InvalSet<TEMPLATE_INVALSET_P_DEF>::empty() const {
+  return size_ == 0;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+void InvalSet<TEMPLATE_INVALSET_P_DEF>::clear() {
+  VIXL_ASSERT(monitor() == 0);
+  size_ = 0;
+  if (IsUsingVector()) {
+    vector_->clear();
+  }
+  set_sorted(true);
+  valid_cached_min_ = false;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::min_element() {
+  VIXL_ASSERT(monitor() == 0);
+  VIXL_ASSERT(!empty());
+  CacheMinElement();
+  return *ElementAt(cached_min_index_);
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+KeyType InvalSet<TEMPLATE_INVALSET_P_DEF>::min_element_key() {
+  VIXL_ASSERT(monitor() == 0);
+  if (valid_cached_min_) {
+    return cached_min_key_;
+  } else {
+    return Key(min_element());
+  }
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+bool InvalSet<TEMPLATE_INVALSET_P_DEF>::IsValid(const ElementType& element) {
+  return Key(element) != kInvalidKey;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+void InvalSet<TEMPLATE_INVALSET_P_DEF>::EraseInternal(ElementType* element) {
+  // Note that this function must be safe even while an iterator has acquired
+  // this set.
+  VIXL_ASSERT(element != NULL);
+  size_t deleted_index = ElementIndex(element);
+  if (IsUsingVector()) {
+    VIXL_ASSERT((&(vector_->front()) <= element) &&
+                (element <= &(vector_->back())));
+    SetKey(element, kInvalidKey);
+  } else {
+    VIXL_ASSERT((preallocated_ <= element) &&
+                (element < (preallocated_ + kNPreallocatedElements)));
+    ElementType* end = preallocated_ + kNPreallocatedElements;
+    size_t copy_size = sizeof(*element) * (end - element - 1);
+    memmove(element, element + 1, copy_size);
+  }
+  size_--;
+
+  if (valid_cached_min_ &&
+      (deleted_index == cached_min_index_)) {
+    if (sorted_ && !empty()) {
+      const ElementType* min = FirstValidElement(element, StorageEnd());
+      cached_min_index_ = ElementIndex(min);
+      cached_min_key_ = Key(*min);
+      valid_cached_min_ = true;
+    } else {
+      valid_cached_min_ = false;
+    }
+  }
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::BinarySearch(
+    const ElementType& element, ElementType* start, ElementType* end) const {
+  if (start == end) {
+    return NULL;
+  }
+  VIXL_ASSERT(sorted_);
+  VIXL_ASSERT(start < end);
+  VIXL_ASSERT(!empty());
+
+  // Perform a binary search through the elements while ignoring invalid
+  // elements.
+  ElementType* elements = start;
+  size_t low = 0;
+  size_t high = (end - start) - 1;
+  while (low < high) {
+    // Find valid bounds.
+    while (!IsValid(elements[low]) && (low < high)) ++low;
+    while (!IsValid(elements[high]) && (low < high)) --high;
+    VIXL_ASSERT(low <= high);
+    // Avoid overflow when computing the middle index.
+    size_t middle = low / 2 + high / 2 + (low & high & 1);
+    if ((middle == low) || (middle == high)) {
+      break;
+    }
+    while (!IsValid(elements[middle]) && (middle < high - 1)) ++middle;
+    while (!IsValid(elements[middle]) && (low + 1 < middle)) --middle;
+    if (!IsValid(elements[middle])) {
+      break;
+    }
+    if (elements[middle] < element) {
+      low = middle;
+    } else {
+      high = middle;
+    }
+  }
+
+  if (elements[low] == element) return &elements[low];
+  if (elements[high] == element) return &elements[high];
+  return NULL;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+void InvalSet<TEMPLATE_INVALSET_P_DEF>::Sort(SortType sort_type) {
+  VIXL_ASSERT(monitor() == 0);
+  if (sort_type == kSoftSort) {
+    if (sorted_) {
+      return;
+    }
+  }
+  if (empty()) {
+    return;
+  }
+
+  Clean();
+  std::sort(StorageBegin(), StorageEnd());
+
+  set_sorted(true);
+  cached_min_index_ = 0;
+  cached_min_key_ = Key(Front());
+  valid_cached_min_ = true;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+void InvalSet<TEMPLATE_INVALSET_P_DEF>::Clean() {
+  VIXL_ASSERT(monitor() == 0);
+  if (empty() || !IsUsingVector()) {
+    return;
+  }
+  // Manually iterate through the vector storage to discard invalid elements.
+  ElementType* start = &(vector_->front());
+  ElementType* end = start + vector_->size();
+  ElementType* c = start;
+  ElementType* first_invalid;
+  ElementType* first_valid;
+  ElementType* next_invalid;
+
+  while (c < end && IsValid(*c)) { c++; }
+  first_invalid = c;
+
+  while (c < end) {
+    while (c < end && !IsValid(*c)) { c++; }
+    first_valid = c;
+    while (c < end && IsValid(*c)) { c++; }
+    next_invalid = c;
+
+    ptrdiff_t n_moved_elements = (next_invalid - first_valid);
+    memmove(first_invalid, first_valid,  n_moved_elements * sizeof(*c));
+    first_invalid = first_invalid + n_moved_elements;
+    c = next_invalid;
+  }
+
+  // Delete the trailing invalid elements.
+  vector_->erase(vector_->begin() + (first_invalid - start), vector_->end());
+  VIXL_ASSERT(vector_->size() == size_);
+
+  if (sorted_) {
+    valid_cached_min_ = true;
+    cached_min_index_ = 0;
+    cached_min_key_ = Key(*ElementAt(0));
+  } else {
+    valid_cached_min_ = false;
+  }
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Front() const {
+  VIXL_ASSERT(!empty());
+  return IsUsingVector() ? vector_->front() : preallocated_[0];
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Back() const {
+  VIXL_ASSERT(!empty());
+  return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1];
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::CleanBack() {
+  VIXL_ASSERT(monitor() == 0);
+  if (IsUsingVector()) {
+    // Delete the invalid trailing elements.
+    typename std::vector<ElementType>::reverse_iterator it = vector_->rbegin();
+    while (!IsValid(*it)) {
+      it++;
+    }
+    vector_->erase(it.base(), vector_->end());
+  }
+  return Back();
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() const {
+  return IsUsingVector() ? &(vector_->front()) : preallocated_;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() const {
+  return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() {
+  return IsUsingVector() ? &(vector_->front()) : preallocated_;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() {
+  return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementIndex(
+    const ElementType* element) const {
+  VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd()));
+  return element - StorageBegin();
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementAt(
+    size_t index) const {
+  VIXL_ASSERT(
+      (IsUsingVector() && (index < vector_->size())) || (index < size_));
+  return StorageBegin() + index;
+}
+
+template<TEMPLATE_INVALSET_P_DECL>
+ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementAt(size_t index) {
+  VIXL_ASSERT(
+      (IsUsingVector() && (index < vector_->size())) || (index < size_));
+  return StorageBegin() + index;
+}
+
+template<TEMPLATE_INVALSET_P_DECL>
+const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::FirstValidElement(
+    const ElementType* from, const ElementType* end) {
+  while ((from < end) && !IsValid(*from)) {
+    from++;
+  }
+  return from;
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+void InvalSet<TEMPLATE_INVALSET_P_DEF>::CacheMinElement() {
+  VIXL_ASSERT(monitor() == 0);
+  VIXL_ASSERT(!empty());
+
+  if (valid_cached_min_) {
+    return;
+  }
+
+  if (sorted_) {
+    const ElementType* min = FirstValidElement(StorageBegin(), StorageEnd());
+    cached_min_index_ = ElementIndex(min);
+    cached_min_key_ = Key(*min);
+    valid_cached_min_ = true;
+  } else {
+    Sort(kHardSort);
+  }
+  VIXL_ASSERT(valid_cached_min_);
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+bool InvalSet<TEMPLATE_INVALSET_P_DEF>::ShouldReclaimMemory() const {
+  if (!IsUsingVector()) {
+    return false;
+  }
+  size_t n_invalid_elements = vector_->size() - size_;
+  return (n_invalid_elements > RECLAIM_FROM) &&
+         (n_invalid_elements > vector_->size() / RECLAIM_FACTOR);
+}
+
+
+template<TEMPLATE_INVALSET_P_DECL>
+void InvalSet<TEMPLATE_INVALSET_P_DEF>::ReclaimMemory() {
+  VIXL_ASSERT(monitor() == 0);
+  Clean();
+}
+
+
+template<class S>
+InvalSetIterator<S>::InvalSetIterator(S* inval_set)
+    : using_vector_((inval_set != NULL) && inval_set->IsUsingVector()),
+      index_(0),
+      inval_set_(inval_set) {
+  if (inval_set != NULL) {
+    inval_set->Sort(S::kSoftSort);
+#ifdef VIXL_DEBUG
+    inval_set->Acquire();
+#endif
+    if (using_vector_) {
+      iterator_ = typename std::vector<ElementType>::iterator(
+          inval_set_->vector_->begin());
+    }
+    MoveToValidElement();
+  }
+}
+
+
+template<class S>
+InvalSetIterator<S>::~InvalSetIterator() {
+#ifdef VIXL_DEBUG
+  if (inval_set_ != NULL) {
+    inval_set_->Release();
+  }
+#endif
+}
+
+
+template<class S>
+typename S::_ElementType* InvalSetIterator<S>::Current() const {
+  VIXL_ASSERT(!Done());
+  if (using_vector_) {
+    return &(*iterator_);
+  } else {
+    return &(inval_set_->preallocated_[index_]);
+  }
+}
+
+
+template<class S>
+void InvalSetIterator<S>::Advance() {
+  VIXL_ASSERT(!Done());
+  if (using_vector_) {
+    iterator_++;
+#ifdef VIXL_DEBUG
+    index_++;
+#endif
+    MoveToValidElement();
+  } else {
+    index_++;
+  }
+}
+
+
+template<class S>
+bool InvalSetIterator<S>::Done() const {
+  if (using_vector_) {
+    bool done = (iterator_ == inval_set_->vector_->end());
+    VIXL_ASSERT(done == (index_ == inval_set_->size()));
+    return done;
+  } else {
+    return index_ == inval_set_->size();
+  }
+}
+
+
+template<class S>
+void InvalSetIterator<S>::Finish() {
+  VIXL_ASSERT(inval_set_->sorted_);
+  if (using_vector_) {
+    iterator_ = inval_set_->vector_->end();
+  }
+  index_ = inval_set_->size();
+}
+
+
+template<class S>
+void InvalSetIterator<S>::DeleteCurrentAndAdvance() {
+  if (using_vector_) {
+    inval_set_->EraseInternal(&(*iterator_));
+    MoveToValidElement();
+  } else {
+    inval_set_->EraseInternal(inval_set_->preallocated_ + index_);
+  }
+}
+
+
+template<class S>
+bool InvalSetIterator<S>::IsValid(const ElementType& element) {
+  return S::IsValid(element);
+}
+
+
+template<class S>
+typename S::_KeyType InvalSetIterator<S>::Key(const ElementType& element) {
+  return S::Key(element);
+}
+
+
+template<class S>
+void InvalSetIterator<S>::MoveToValidElement() {
+  if (using_vector_) {
+    while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) {
+      iterator_++;
+    }
+  } else {
+    VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0]));
+    // Nothing to do.
+  }
+}
+
+#undef TEMPLATE_INVALSET_P_DECL
+#undef TEMPLATE_INVALSET_P_DEF
+
+}  // namespace vixl
+
+#endif  // VIXL_INVALSET_H_
diff --git a/disas/libvixl/platform.h b/disas/libvixl/vixl/platform.h
index de2b110cca..ab588f07f5 100644
--- a/disas/libvixl/platform.h
+++ b/disas/libvixl/vixl/platform.h
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2014, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
diff --git a/disas/libvixl/vixl/utils.cc b/disas/libvixl/vixl/utils.cc
new file mode 100644
index 0000000000..3b8bd75fba
--- /dev/null
+++ b/disas/libvixl/vixl/utils.cc
@@ -0,0 +1,142 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//   * Redistributions of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//   * Redistributions in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//   * Neither the name of ARM Limited nor the names of its contributors may be
+//     used to endorse or promote products derived from this software without
+//     specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "vixl/utils.h"
+#include <stdio.h>
+
+namespace vixl {
+
+uint32_t float_to_rawbits(float value) {
+  uint32_t bits = 0;
+  memcpy(&bits, &value, 4);
+  return bits;
+}
+
+
+uint64_t double_to_rawbits(double value) {
+  uint64_t bits = 0;
+  memcpy(&bits, &value, 8);
+  return bits;
+}
+
+
+float rawbits_to_float(uint32_t bits) {
+  float value = 0.0;
+  memcpy(&value, &bits, 4);
+  return value;
+}
+
+
+double rawbits_to_double(uint64_t bits) {
+  double value = 0.0;
+  memcpy(&value, &bits, 8);
+  return value;
+}
+
+
+uint32_t float_sign(float val) {
+  uint32_t rawbits = float_to_rawbits(val);
+  return unsigned_bitextract_32(31, 31, rawbits);
+}
+
+
+uint32_t float_exp(float val) {
+  uint32_t rawbits = float_to_rawbits(val);
+  return unsigned_bitextract_32(30, 23, rawbits);
+}
+
+
+uint32_t float_mantissa(float val) {
+  uint32_t rawbits = float_to_rawbits(val);
+  return unsigned_bitextract_32(22, 0, rawbits);
+}
+
+
+uint32_t double_sign(double val) {
+  uint64_t rawbits = double_to_rawbits(val);
+  return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, rawbits));
+}
+
+
+uint32_t double_exp(double val) {
+  uint64_t rawbits = double_to_rawbits(val);
+  return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, rawbits));
+}
+
+
+uint64_t double_mantissa(double val) {
+  uint64_t rawbits = double_to_rawbits(val);
+  return unsigned_bitextract_64(51, 0, rawbits);
+}
+
+
+float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
+  uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
+  return rawbits_to_float(bits);
+}
+
+
+double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
+  uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
+  return rawbits_to_double(bits);
+}
+
+
+int float16classify(float16 value) {
+  uint16_t exponent_max = (1 << 5) - 1;
+  uint16_t exponent_mask = exponent_max << 10;
+  uint16_t mantissa_mask = (1 << 10) - 1;
+
+  uint16_t exponent = (value & exponent_mask) >> 10;
+  uint16_t mantissa = value & mantissa_mask;
+  if (exponent == 0) {
+    if (mantissa == 0) {
+      return FP_ZERO;
+    }
+    return FP_SUBNORMAL;
+  } else if (exponent == exponent_max) {
+    if (mantissa == 0) {
+      return FP_INFINITE;
+    }
+    return FP_NAN;
+  }
+  return FP_NORMAL;
+}
+
+
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+  VIXL_ASSERT((reg_size % 8) == 0);
+  int count = 0;
+  for (unsigned i = 0; i < (reg_size / 16); i++) {
+    if ((imm & 0xffff) == 0) {
+      count++;
+    }
+    imm >>= 16;
+  }
+  return count;
+}
+
+}  // namespace vixl
diff --git a/disas/libvixl/utils.h b/disas/libvixl/vixl/utils.h
index b4406263ac..5ab134e240 100644
--- a/disas/libvixl/utils.h
+++ b/disas/libvixl/vixl/utils.h
@@ -1,4 +1,4 @@
-// Copyright 2013, ARM Limited
+// Copyright 2015, ARM Limited
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -27,16 +27,17 @@
 #ifndef VIXL_UTILS_H
 #define VIXL_UTILS_H
 
-#include <math.h>
 #include <string.h>
-#include "globals.h"
+#include <cmath>
+#include "vixl/globals.h"
+#include "vixl/compiler-intrinsics.h"
 
 namespace vixl {
 
 // Macros for compile-time format checking.
-#if defined(__GNUC__)
+#if GCC_VERSION_OR_NEWER(4, 4, 0)
 #define PRINTF_CHECK(format_index, varargs_index) \
-  __attribute__((format(printf, format_index, varargs_index)))
+  __attribute__((format(gnu_printf, format_index, varargs_index)))
 #else
 #define PRINTF_CHECK(format_index, varargs_index)
 #endif
@@ -53,9 +54,9 @@ inline bool is_uintn(unsigned n, int64_t x) {
   return !(x >> n);
 }
 
-inline unsigned truncate_to_intn(unsigned n, int64_t x) {
+inline uint32_t truncate_to_intn(unsigned n, int64_t x) {
   VIXL_ASSERT((0 < n) && (n < 64));
-  return (x & ((INT64_C(1) << n) - 1));
+  return static_cast<uint32_t>(x & ((INT64_C(1) << n) - 1));
 }
 
 #define INT_1_TO_63_LIST(V)                                                    \
@@ -73,7 +74,7 @@ inline bool is_int##N(int64_t x) { return is_intn(N, x); }
 #define DECLARE_IS_UINT_N(N)                                                   \
 inline bool is_uint##N(int64_t x) { return is_uintn(N, x); }
 #define DECLARE_TRUNCATE_TO_INT_N(N)                                           \
-inline int truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
+inline uint32_t truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
 INT_1_TO_63_LIST(DECLARE_IS_INT_N)
 INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
 INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
@@ -104,12 +105,24 @@ uint64_t double_to_rawbits(double value);
 float rawbits_to_float(uint32_t bits);
 double rawbits_to_double(uint64_t bits);
 
+uint32_t float_sign(float val);
+uint32_t float_exp(float val);
+uint32_t float_mantissa(float val);
+uint32_t double_sign(double val);
+uint32_t double_exp(double val);
+uint64_t double_mantissa(double val);
+
+float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
+double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
+
+// An fpclassify() function for 16-bit half-precision floats.
+int float16classify(float16 value);
 
 // NaN tests.
 inline bool IsSignallingNaN(double num) {
   const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
   uint64_t raw = double_to_rawbits(num);
-  if (isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
+  if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
     return true;
   }
   return false;
@@ -119,30 +132,37 @@ inline bool IsSignallingNaN(double num) {
 inline bool IsSignallingNaN(float num) {
   const uint32_t kFP32QuietNaNMask = 0x00400000;
   uint32_t raw = float_to_rawbits(num);
-  if (isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
+  if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
     return true;
   }
   return false;
 }
 
 
+inline bool IsSignallingNaN(float16 num) {
+  const uint16_t kFP16QuietNaNMask = 0x0200;
+  return (float16classify(num) == FP_NAN) &&
+         ((num & kFP16QuietNaNMask) == 0);
+}
+
+
 template <typename T>
 inline bool IsQuietNaN(T num) {
-  return isnan(num) && !IsSignallingNaN(num);
+  return std::isnan(num) && !IsSignallingNaN(num);
 }
 
 
 // Convert the NaN in 'num' to a quiet NaN.
 inline double ToQuietNaN(double num) {
   const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
-  VIXL_ASSERT(isnan(num));
+  VIXL_ASSERT(std::isnan(num));
   return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
 }
 
 
 inline float ToQuietNaN(float num) {
   const uint32_t kFP32QuietNaNMask = 0x00400000;
-  VIXL_ASSERT(isnan(num));
+  VIXL_ASSERT(std::isnan(num));
   return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
 }
 
@@ -158,16 +178,71 @@ inline float FusedMultiplyAdd(float op1, float op2, float a) {
 }
 
 
-// Bit counting.
-int CountLeadingZeros(uint64_t value, int width);
-int CountLeadingSignBits(int64_t value, int width);
-int CountTrailingZeros(uint64_t value, int width);
-int CountSetBits(uint64_t value, int width);
-uint64_t LowestSetBit(uint64_t value);
-bool IsPowerOf2(int64_t value);
+inline uint64_t LowestSetBit(uint64_t value) {
+  return value & -value;
+}
+
+
+template<typename T>
+inline int HighestSetBitPosition(T value) {
+  VIXL_ASSERT(value != 0);
+  return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
+}
+
+
+template<typename V>
+inline int WhichPowerOf2(V value) {
+  VIXL_ASSERT(IsPowerOf2(value));
+  return CountTrailingZeros(value);
+}
+
 
 unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
 
+
+template <typename T>
+T ReverseBits(T value) {
+  VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+              (sizeof(value) == 4) || (sizeof(value) == 8));
+  T result = 0;
+  for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+    result = (result << 1) | (value & 1);
+    value >>= 1;
+  }
+  return result;
+}
+
+
+template <typename T>
+T ReverseBytes(T value, int block_bytes_log2) {
+  VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
+  VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
+  // Split the 64-bit value into an 8-bit array, where b[0] is the least
+  // significant byte, and b[7] is the most significant.
+  uint8_t bytes[8];
+  uint64_t mask = UINT64_C(0xff00000000000000);
+  for (int i = 7; i >= 0; i--) {
+    bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
+    mask >>= 8;
+  }
+
+  // Permutation tables for REV instructions.
+  //  permute_table[0] is used by REV16_x, REV16_w
+  //  permute_table[1] is used by REV32_x, REV_w
+  //  permute_table[2] is used by REV_x
+  VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
+  static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
+                                               {4, 5, 6, 7, 0, 1, 2, 3},
+                                               {0, 1, 2, 3, 4, 5, 6, 7} };
+  T result = 0;
+  for (int i = 0; i < 8; i++) {
+    result <<= 8;
+    result |= bytes[permute_table[block_bytes_log2 - 1][i]];
+  }
+  return result;
+}
+
+
 // Pointer alignment
 // TODO: rename/refactor to make it specific to instructions.
 template<typename T>