summary refs log tree commit diff stats
diff options
context:
space:
mode:
authorAnthony Liguori <anthony@codemonkey.ws>2013-10-09 07:50:37 -0700
committerAnthony Liguori <anthony@codemonkey.ws>2013-10-09 07:51:23 -0700
commitce079abb410d685d48c1285bc6749d9b23c78c5c (patch)
tree3947e0b5939a542437995a711a182ee4311bde29
parent0e19885e736938c3f6bd8c139eca00728bb24384 (diff)
parent3df2b8fde949be86d8a78923c992fdd698d4ea4c (diff)
downloadfocaccia-qemu-ce079abb410d685d48c1285bc6749d9b23c78c5c.tar.gz
focaccia-qemu-ce079abb410d685d48c1285bc6749d9b23c78c5c.zip
Merge remote-tracking branch 'sweil/tci' into staging
# By Stefan Weil
# Via Stefan Weil
* sweil/tci:
  misc: Use new rotate functions
  bitops: Add rotate functions (rol8, ror8, ...)
  tci: Add implementation of rotl_i64, rotr_i64

Message-id: 1380137693-3729-1-git-send-email-sw@weilnetz.de
Signed-off-by: Anthony Liguori <anthony@codemonkey.ws>
-rw-r--r--include/qemu/bitops.h80
-rw-r--r--target-arm/iwmmxt_helper.c2
-rw-r--r--tcg/optimize.c12
-rw-r--r--tcg/tci/tcg-target.c1
-rw-r--r--tci.c14
5 files changed, 96 insertions, 13 deletions
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index 06e2e6f0ee..304c90c2b4 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -184,6 +184,86 @@ static inline unsigned long hweight_long(unsigned long w)
 }
 
 /**
+ * rol8 - rotate an 8-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline uint8_t rol8(uint8_t word, unsigned int shift)
+{
+    return (word << shift) | (word >> (8 - shift));
+}
+
+/**
+ * ror8 - rotate an 8-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline uint8_t ror8(uint8_t word, unsigned int shift)
+{
+    return (word >> shift) | (word << (8 - shift));
+}
+
+/**
+ * rol16 - rotate a 16-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline uint16_t rol16(uint16_t word, unsigned int shift)
+{
+    return (word << shift) | (word >> (16 - shift));
+}
+
+/**
+ * ror16 - rotate a 16-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline uint16_t ror16(uint16_t word, unsigned int shift)
+{
+    return (word >> shift) | (word << (16 - shift));
+}
+
+/**
+ * rol32 - rotate a 32-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline uint32_t rol32(uint32_t word, unsigned int shift)
+{
+    return (word << shift) | (word >> (32 - shift));
+}
+
+/**
+ * ror32 - rotate a 32-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline uint32_t ror32(uint32_t word, unsigned int shift)
+{
+    return (word >> shift) | (word << (32 - shift));
+}
+
+/**
+ * rol64 - rotate a 64-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline uint64_t rol64(uint64_t word, unsigned int shift)
+{
+    return (word << shift) | (word >> (64 - shift));
+}
+
+/**
+ * ror64 - rotate a 64-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline uint64_t ror64(uint64_t word, unsigned int shift)
+{
+    return (word >> shift) | (word << (64 - shift));
+}
+
+/**
  * extract32:
  * @value: the value to extract the bit field from
  * @start: the lowest bit in the bit field (numbered from 0)
diff --git a/target-arm/iwmmxt_helper.c b/target-arm/iwmmxt_helper.c
index 7953b53f7e..e6cfa62da8 100644
--- a/target-arm/iwmmxt_helper.c
+++ b/target-arm/iwmmxt_helper.c
@@ -577,7 +577,7 @@ uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n)
 
 uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n)
 {
-    x = (x >> n) | (x << (64 - n));
+    x = ror64(x, n);
     env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
     return x;
 }
diff --git a/tcg/optimize.c b/tcg/optimize.c
index b29bf25b67..89e2d6a3b3 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -238,20 +238,16 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
         return (int64_t)x >> (int64_t)y;
 
     case INDEX_op_rotr_i32:
-        x = ((uint32_t)x << (32 - y)) | ((uint32_t)x >> y);
-        return x;
+        return ror32(x, y);
 
     case INDEX_op_rotr_i64:
-        x = ((uint64_t)x << (64 - y)) | ((uint64_t)x >> y);
-        return x;
+        return ror64(x, y);
 
     case INDEX_op_rotl_i32:
-        x = ((uint32_t)x << y) | ((uint32_t)x >> (32 - y));
-        return x;
+        return rol32(x, y);
 
     case INDEX_op_rotl_i64:
-        x = ((uint64_t)x << y) | ((uint64_t)x >> (64 - y));
-        return x;
+        return rol64(x, y);
 
     CASE_OP_32_64(not):
         return ~x;
diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c
index 233ab3bf35..4976becbe7 100644
--- a/tcg/tci/tcg-target.c
+++ b/tcg/tci/tcg-target.c
@@ -670,7 +670,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
     case INDEX_op_shl_i64:
     case INDEX_op_shr_i64:
     case INDEX_op_sar_i64:
-        /* TODO: Implementation of rotl_i64, rotr_i64 missing in tci.c. */
     case INDEX_op_rotl_i64:     /* Optional (TCG_TARGET_HAS_rot_i64). */
     case INDEX_op_rotr_i64:     /* Optional (TCG_TARGET_HAS_rot_i64). */
         tcg_out_r(s, args[0]);
diff --git a/tci.c b/tci.c
index 6d64891557..0202ed97d1 100644
--- a/tci.c
+++ b/tci.c
@@ -688,13 +688,13 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(&tb_ptr);
             t2 = tci_read_ri32(&tb_ptr);
-            tci_write_reg32(t0, (t1 << t2) | (t1 >> (32 - t2)));
+            tci_write_reg32(t0, rol32(t1, t2));
             break;
         case INDEX_op_rotr_i32:
             t0 = *tb_ptr++;
             t1 = tci_read_ri32(&tb_ptr);
             t2 = tci_read_ri32(&tb_ptr);
-            tci_write_reg32(t0, (t1 >> t2) | (t1 << (32 - t2)));
+            tci_write_reg32(t0, ror32(t1, t2));
             break;
 #endif
 #if TCG_TARGET_HAS_deposit_i32
@@ -952,8 +952,16 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
             break;
 #if TCG_TARGET_HAS_rot_i64
         case INDEX_op_rotl_i64:
+            t0 = *tb_ptr++;
+            t1 = tci_read_ri64(&tb_ptr);
+            t2 = tci_read_ri64(&tb_ptr);
+            tci_write_reg64(t0, rol64(t1, t2));
+            break;
         case INDEX_op_rotr_i64:
-            TODO();
+            t0 = *tb_ptr++;
+            t1 = tci_read_ri64(&tb_ptr);
+            t2 = tci_read_ri64(&tb_ptr);
+            tci_write_reg64(t0, ror64(t1, t2));
             break;
 #endif
 #if TCG_TARGET_HAS_deposit_i64