diff options
| author | Anthony Liguori <aliguori@us.ibm.com> | 2012-03-14 16:47:49 -0500 |
|---|---|---|
| committer | Anthony Liguori <aliguori@us.ibm.com> | 2012-03-14 16:47:49 -0500 |
| commit | aea6ff7fa07b046fb9f43d6262d6e34b77e8437e (patch) | |
| tree | dd3043d1742273a95fa7fc5e99b8d5ffe0c710e5 /tcg/arm/tcg-target.c | |
| parent | 9e4dd565b46749d5e6d5cf87bfd84f1917c68319 (diff) | |
| parent | dd83b06ae61cfa2dc4381ab49f365bd0995fc930 (diff) | |
| download | focaccia-qemu-aea6ff7fa07b046fb9f43d6262d6e34b77e8437e.tar.gz focaccia-qemu-aea6ff7fa07b046fb9f43d6262d6e34b77e8437e.zip | |
Merge remote-tracking branch 'afaerber/qom-cpu.v5' into staging
* afaerber/qom-cpu.v5: (43 commits) qom: Introduce CPU class Rename CPUState -> CPUArchState xtensa hw/: Don't use CPUState sparc hw/: Don't use CPUState sh4 hw/: Don't use CPUState s390x hw/: Don't use CPUState ppc hw/: Don't use CPUState mips hw/: Don't use CPUState microblaze hw/: Don't use CPUState m68k hw/: Don't use CPUState lm32 hw/: Don't use CPUState i386 hw/: Don't use CPUState cris hw/: Don't use CPUState arm hw/: Don't use CPUState alpha hw/: Don't use CPUState xtensa-semi: Don't use CPUState m68k-semi: Don't use CPUState arm-semi: Don't use CPUState target-xtensa: Don't overuse CPUState target-unicore32: Don't overuse CPUState ...
Diffstat (limited to 'tcg/arm/tcg-target.c')
| -rw-r--r-- | tcg/arm/tcg-target.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c index 5b233f564c..5af21b3f5d 100644 --- a/tcg/arm/tcg-target.c +++ b/tcg/arm/tcg-target.c @@ -990,10 +990,10 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); /* In the - * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))] + * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_read))] * below, the offset is likely to exceed 12 bits if mem_index != 0 and * not exceed otherwise, so use an - * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) + * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table) * before. */ if (mem_index) @@ -1001,7 +1001,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) (mem_index << (TLB_SHIFT & 1)) | ((16 - (TLB_SHIFT >> 1)) << 8)); tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addr_read)); + offsetof(CPUArchState, tlb_table[0][0].addr_read)); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); /* Check alignment. */ @@ -1012,12 +1012,12 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) /* XXX: possibly we could use a block data load or writeback in * the first access. */ tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addr_read) + 4); + offsetof(CPUArchState, tlb_table[0][0].addr_read) + 4); tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); # endif tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addend)); + offsetof(CPUArchState, tlb_table[0][0].addend)); switch (opc) { case 0: @@ -1210,10 +1210,10 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); /* In the - * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))] + * ldr r1 [r0, #(offsetof(CPUArchState, tlb_table[mem_index][0].addr_write))] * below, the offset is likely to exceed 12 bits if mem_index != 0 and * not exceed otherwise, so use an - * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) + * add r0, r0, #(mem_index * sizeof *CPUArchState.tlb_table) * before. */ if (mem_index) @@ -1221,7 +1221,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) (mem_index << (TLB_SHIFT & 1)) | ((16 - (TLB_SHIFT >> 1)) << 8)); tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addr_write)); + offsetof(CPUArchState, tlb_table[0][0].addr_write)); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1, TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); /* Check alignment. */ @@ -1232,12 +1232,12 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) /* XXX: possibly we could use a block data load or writeback in * the first access. */ tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addr_write) + 4); + offsetof(CPUArchState, tlb_table[0][0].addr_write) + 4); tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0)); # endif tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, - offsetof(CPUState, tlb_table[0][0].addend)); + offsetof(CPUArchState, tlb_table[0][0].addend)); switch (opc) { case 0: @@ -1797,7 +1797,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC); tcg_add_target_add_op_defs(arm_op_defs); - tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), + tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf), CPU_TEMP_BUF_NLONGS * sizeof(long)); } |