This code should now build the x86_x64-softmmu part 2.
This commit is contained in:
@ -1464,7 +1464,7 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc,
|
||||
|
||||
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||
{
|
||||
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
|
||||
TCGReg addrlo, datalo, datahi, addrhi QEMU_UNUSED_VAR;
|
||||
TCGMemOp opc;
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
int mem_index;
|
||||
@ -1593,7 +1593,7 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc,
|
||||
|
||||
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||
{
|
||||
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
|
||||
TCGReg addrlo, datalo, datahi, addrhi QEMU_UNUQEMU_UNUSED_VARSED;
|
||||
TCGMemOp opc;
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
int mem_index;
|
||||
|
@ -77,7 +77,13 @@ static const int tcg_target_call_iarg_regs[] = {
|
||||
TCG_REG_R8,
|
||||
TCG_REG_R9,
|
||||
#else
|
||||
/* 32 bit mode uses stack based calling convention (GCC default). */
|
||||
/* 32 bit mode uses stack based calling convention (GCC default).
|
||||
We add a dummy value here for MSVC compatibility for the error:
|
||||
"error C2466: cannot allocate an array of constant size 0"
|
||||
The "tcg_target_call_iarg_regs" array is not accessed when
|
||||
TCG_TARGET_REG_BITS == 32
|
||||
*/
|
||||
0,
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -108,8 +114,19 @@ static const int tcg_target_call_oarg_regs[] = {
|
||||
detection, as we're not going to go so far as our own inline assembly.
|
||||
If not available, default values will be assumed. */
|
||||
#if defined(CONFIG_CPUID_H)
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
/* %ecx */
|
||||
#define bit_MOVBE (1 << 22)
|
||||
/* %edx */
|
||||
#define bit_CMOV (1 << 15)
|
||||
/* Extended Features (%eax == 7) */
|
||||
#define bit_BMI (1 << 3)
|
||||
#define bit_BMI2 (1 << 8)
|
||||
#else
|
||||
#include <cpuid.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* For 32-bit, we are going to attempt to determine at runtime whether cmov
|
||||
is available. */
|
||||
@ -393,7 +410,25 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
|
||||
#define JCC_JG 0xf
|
||||
|
||||
static const uint8_t tcg_cond_to_jcc[] = {
|
||||
[TCG_COND_EQ] = JCC_JE,
|
||||
#ifdef _MSC_VER
|
||||
0, // TCG_COND_NEVER
|
||||
0, // TCG_COND_ALWAYS
|
||||
JCC_JL, // TCG_COND_LT
|
||||
JCC_JGE, // TCG_COND_GE
|
||||
JCC_JB, // TCG_COND_LTU
|
||||
JCC_JAE, // TCG_COND_GEU
|
||||
0, // n/a
|
||||
0, // n/a
|
||||
JCC_JE, // TCG_COND_EQ
|
||||
JCC_JNE, // TCG_COND_NE
|
||||
JCC_JLE, // TCG_COND_LE
|
||||
JCC_JG, // TCG_COND_GT
|
||||
JCC_JBE, // TCG_COND_LEU
|
||||
JCC_JA, // TCG_COND_GTU
|
||||
0, // n/a
|
||||
0, // n/a
|
||||
#else
|
||||
[TCG_COND_EQ] = JCC_JE,
|
||||
[TCG_COND_NE] = JCC_JNE,
|
||||
[TCG_COND_LT] = JCC_JL,
|
||||
[TCG_COND_GE] = JCC_JGE,
|
||||
@ -403,6 +438,7 @@ static const uint8_t tcg_cond_to_jcc[] = {
|
||||
[TCG_COND_GEU] = JCC_JAE,
|
||||
[TCG_COND_LEU] = JCC_JBE,
|
||||
[TCG_COND_GTU] = JCC_JA,
|
||||
#endif
|
||||
};
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
@ -843,7 +879,7 @@ static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
|
||||
}
|
||||
|
||||
/* Use SMALL != 0 to force a short forward branch. */
|
||||
static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
|
||||
static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int smallflag)
|
||||
{
|
||||
int32_t val, val1;
|
||||
TCGLabel *l = &s->labels[label_index];
|
||||
@ -859,7 +895,7 @@ static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
|
||||
}
|
||||
tcg_out8(s, val1);
|
||||
} else {
|
||||
if (small) {
|
||||
if (smallflag) {
|
||||
tcg_abort();
|
||||
}
|
||||
if (opc == -1) {
|
||||
@ -870,7 +906,7 @@ static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
|
||||
tcg_out32(s, val - 6);
|
||||
}
|
||||
}
|
||||
} else if (small) {
|
||||
} else if (smallflag) {
|
||||
if (opc == -1) {
|
||||
tcg_out8(s, OPC_JMP_short);
|
||||
} else {
|
||||
@ -906,25 +942,25 @@ static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
|
||||
|
||||
static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
|
||||
TCGArg arg1, TCGArg arg2, int const_arg2,
|
||||
int label_index, int small)
|
||||
int label_index, int smallflag)
|
||||
{
|
||||
tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
|
||||
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
|
||||
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, smallflag);
|
||||
}
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
|
||||
TCGArg arg1, TCGArg arg2, int const_arg2,
|
||||
int label_index, int small)
|
||||
int label_index, int smallflag)
|
||||
{
|
||||
tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
|
||||
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
|
||||
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, smallflag);
|
||||
}
|
||||
#else
|
||||
/* XXX: we implement it at the target level to avoid having to
|
||||
handle cross basic blocks temporaries */
|
||||
static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
|
||||
const int *const_args, int small)
|
||||
const int *const_args, int smallflag)
|
||||
{
|
||||
int label_next;
|
||||
label_next = gen_new_label(s);
|
||||
@ -933,69 +969,69 @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
|
||||
tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
|
||||
label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_NE:
|
||||
tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_LT:
|
||||
tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_jxx(s, JCC_JNE, label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_LE:
|
||||
tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_jxx(s, JCC_JNE, label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_GT:
|
||||
tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_jxx(s, JCC_JNE, label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_GE:
|
||||
tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_jxx(s, JCC_JNE, label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_LTU:
|
||||
tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_jxx(s, JCC_JNE, label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_LEU:
|
||||
tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_jxx(s, JCC_JNE, label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_GTU:
|
||||
tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_jxx(s, JCC_JNE, label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
case TCG_COND_GEU:
|
||||
tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
tcg_out_jxx(s, JCC_JNE, label_next, 1);
|
||||
tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
|
||||
args[5], small);
|
||||
args[5], smallflag);
|
||||
break;
|
||||
default:
|
||||
tcg_abort();
|
||||
@ -1118,19 +1154,94 @@ static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
|
||||
* int mmu_idx, uintptr_t ra)
|
||||
*/
|
||||
static void * const qemu_ld_helpers[16] = {
|
||||
[MO_UB] = helper_ret_ldub_mmu,
|
||||
#ifdef _MSC_VER
|
||||
helper_ret_ldub_mmu, // MO_UB
|
||||
# ifdef HOST_WORDS_BIGENDIAN
|
||||
helper_be_lduw_mmu, // MO_BEUW
|
||||
helper_be_ldul_mmu, // MO_BEUL
|
||||
helper_be_ldq_mmu, // MO_BEQ
|
||||
0, // MO_SB
|
||||
0, // MO_BESW
|
||||
0, // MO_BESL
|
||||
0, // n/a
|
||||
0, // n/a
|
||||
helper_le_lduw_mmu, // MO_LEUW
|
||||
helper_le_ldul_mmu, // MO_LEUL
|
||||
helper_le_ldq_mmu, // MO_LEQ
|
||||
0, // n/a
|
||||
0, // MO_LESW
|
||||
0, // MO_LESL
|
||||
0, // n/a
|
||||
# else // !HOST_WORDS_BIGENDIAN
|
||||
helper_le_lduw_mmu, // MO_LEUW
|
||||
helper_le_ldul_mmu, // MO_LEUL
|
||||
helper_le_ldq_mmu, // MO_LEQ
|
||||
0, // MO_SB
|
||||
0, // MO_LESW
|
||||
0, // MO_LESL
|
||||
0, // n/a
|
||||
0, // n/a
|
||||
helper_be_lduw_mmu, // MO_BEUW
|
||||
helper_be_ldul_mmu, // MO_BEUL
|
||||
helper_be_ldq_mmu, // MO_BEQ
|
||||
0, // n/a
|
||||
0, // MO_BESW
|
||||
0, // MO_BESL
|
||||
0, // n/a
|
||||
# endif // HOST_WORDS_BIGENDIAN
|
||||
|
||||
#else //_MSC_VER
|
||||
[MO_UB] = helper_ret_ldub_mmu,
|
||||
[MO_LEUW] = helper_le_lduw_mmu,
|
||||
[MO_LEUL] = helper_le_ldul_mmu,
|
||||
[MO_LEQ] = helper_le_ldq_mmu,
|
||||
[MO_BEUW] = helper_be_lduw_mmu,
|
||||
[MO_BEUL] = helper_be_ldul_mmu,
|
||||
[MO_BEQ] = helper_be_ldq_mmu,
|
||||
#endif // _MSC_VER
|
||||
};
|
||||
|
||||
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
||||
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
||||
*/
|
||||
static void * const qemu_st_helpers[16] = {
|
||||
#ifdef _MSC_VER
|
||||
helper_ret_stb_mmu, // MO_UB
|
||||
# ifdef HOST_WORDS_BIGENDIAN
|
||||
helper_be_stw_mmu, // MO_BEUW
|
||||
helper_be_stl_mmu, // MO_BEUL
|
||||
helper_be_stq_mmu, // MO_BEQ
|
||||
0, // MO_SB
|
||||
0, // MO_BESW
|
||||
0, // MO_BESL
|
||||
0, // n/a
|
||||
0, // n/a
|
||||
helper_le_stw_mmu, // MO_LEUW
|
||||
helper_le_stl_mmu, // MO_LEUL
|
||||
helper_le_stq_mmu, // MO_LEQ
|
||||
0, // n/a
|
||||
0, // MO_LESW
|
||||
0, // MO_LESL
|
||||
0, // n/a
|
||||
# else // !HOST_WORDS_BIGENDIAN
|
||||
helper_le_stw_mmu, // MO_LEUW
|
||||
helper_le_stl_mmu, // MO_LEUL
|
||||
helper_le_stq_mmu, // MO_LEQ
|
||||
0, // MO_SB
|
||||
0, // MO_LESW
|
||||
0, // MO_LESL
|
||||
0, // n/a
|
||||
0, // n/a
|
||||
helper_be_stw_mmu, // MO_BEUW
|
||||
helper_be_stl_mmu, // MO_BEUL
|
||||
helper_be_stq_mmu, // MO_BEQ
|
||||
0, // n/a
|
||||
0, // MO_BESW
|
||||
0, // MO_BESL
|
||||
0, // n/a
|
||||
# endif // HOST_WORDS_BIGENDIAN
|
||||
|
||||
#else //_MSC_VER
|
||||
[MO_UB] = helper_ret_stb_mmu,
|
||||
[MO_LEUW] = helper_le_stw_mmu,
|
||||
[MO_LEUL] = helper_le_stl_mmu,
|
||||
@ -1138,6 +1249,7 @@ static void * const qemu_st_helpers[16] = {
|
||||
[MO_BEUW] = helper_be_stw_mmu,
|
||||
[MO_BEUL] = helper_be_stl_mmu,
|
||||
[MO_BEQ] = helper_be_stq_mmu,
|
||||
#endif // _MSC_VER
|
||||
};
|
||||
|
||||
/* Perform the TLB load and compare.
|
||||
@ -1521,7 +1633,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
|
||||
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||
{
|
||||
TCGReg datalo, datahi, addrlo;
|
||||
TCGReg addrhi __attribute__((unused));
|
||||
TCGReg addrhi QEMU_UNUSED_VAR;
|
||||
TCGMemOp opc;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
int mem_index;
|
||||
@ -1652,7 +1764,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
|
||||
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||
{
|
||||
TCGReg datalo, datahi, addrlo;
|
||||
TCGReg addrhi __attribute__((unused));
|
||||
TCGReg addrhi QEMU_UNUSED_VAR;
|
||||
TCGMemOp opc;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
int mem_index;
|
||||
@ -2059,9 +2171,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
||||
}
|
||||
|
||||
static const TCGTargetOpDef x86_op_defs[] = {
|
||||
{ INDEX_op_exit_tb, { } },
|
||||
{ INDEX_op_goto_tb, { } },
|
||||
{ INDEX_op_br, { } },
|
||||
{ INDEX_op_exit_tb, { NULL } },
|
||||
{ INDEX_op_goto_tb, { NULL } },
|
||||
{ INDEX_op_br, { NULL } },
|
||||
{ INDEX_op_ld8u_i32, { "r", "r" } },
|
||||
{ INDEX_op_ld8s_i32, { "r", "r" } },
|
||||
{ INDEX_op_ld16u_i32, { "r", "r" } },
|
||||
@ -2275,10 +2387,26 @@ static void tcg_target_init(TCGContext *s)
|
||||
{
|
||||
#ifdef CONFIG_CPUID_H
|
||||
unsigned a, b, c, d;
|
||||
int max = __get_cpuid_max(0, 0);
|
||||
int max;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
int cpu_info[4];
|
||||
__cpuid(cpu_info, 0);
|
||||
max = cpu_info[0];
|
||||
#else
|
||||
max = __get_cpuid_max(0, 0);
|
||||
#endif
|
||||
|
||||
if (max >= 1) {
|
||||
#ifdef _MSC_VER
|
||||
__cpuid(cpu_info, 1);
|
||||
a = cpu_info[0];
|
||||
b = cpu_info[1];
|
||||
c = cpu_info[2];
|
||||
d = cpu_info[3];
|
||||
#else
|
||||
__cpuid(1, a, b, c, d);
|
||||
#endif
|
||||
#ifndef have_cmov
|
||||
/* For 32-bit, 99% certainty that we're running on hardware that
|
||||
supports cmov, but we still need to check. In case cmov is not
|
||||
@ -2294,7 +2422,11 @@ static void tcg_target_init(TCGContext *s)
|
||||
|
||||
if (max >= 7) {
|
||||
/* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
|
||||
#ifdef _MSC_VER
|
||||
__cpuidex(cpu_info, 7, 0);
|
||||
#else
|
||||
__cpuid_count(7, 0, a, b, c, d);
|
||||
#endif
|
||||
#ifdef bit_BMI
|
||||
have_bmi1 = (b & bit_BMI) != 0;
|
||||
#endif
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#define TCG_TARGET_INSN_UNIT_SIZE 16
|
||||
typedef struct {
|
||||
uint64_t lo __attribute__((aligned(16)));
|
||||
uint64_t QEMU_ALIGN(16, lo);
|
||||
uint64_t hi;
|
||||
} tcg_insn_unit;
|
||||
|
||||
|
@ -1150,7 +1150,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
|
||||
|
||||
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
{
|
||||
TCGReg addr_regl, addr_regh __attribute__((unused));
|
||||
TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR;
|
||||
TCGReg data_regl, data_regh;
|
||||
TCGMemOp opc;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
@ -1279,7 +1279,7 @@ static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
|
||||
|
||||
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
{
|
||||
TCGReg addr_regl, addr_regh __attribute__((unused));
|
||||
TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR;
|
||||
TCGReg data_regl, data_regh, base;
|
||||
TCGMemOp opc;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
|
@ -105,13 +105,13 @@ static TCGArg find_better_copy(TCGContext *s, TCGArg temp)
|
||||
TCGArg i;
|
||||
|
||||
/* If this is already a global, we can't do better. */
|
||||
if (temp < s->nb_globals) {
|
||||
if (temp < (unsigned int)s->nb_globals) {
|
||||
return temp;
|
||||
}
|
||||
|
||||
/* Search for a global first. */
|
||||
for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) {
|
||||
if (i < s->nb_globals) {
|
||||
if (i < (unsigned int)s->nb_globals) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -257,19 +257,19 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
|
||||
return ror32(x, y & 31);
|
||||
|
||||
case INDEX_op_rotr_i64:
|
||||
return ror64(x, y & 63);
|
||||
return (TCGArg)ror64(x, y & 63);
|
||||
|
||||
case INDEX_op_rotl_i32:
|
||||
return rol32(x, y & 31);
|
||||
|
||||
case INDEX_op_rotl_i64:
|
||||
return rol64(x, y & 63);
|
||||
return (TCGArg)rol64(x, y & 63);
|
||||
|
||||
CASE_OP_32_64(not):
|
||||
return ~x;
|
||||
|
||||
CASE_OP_32_64(neg):
|
||||
return -x;
|
||||
return 0-x;
|
||||
|
||||
CASE_OP_32_64(andc):
|
||||
return x & ~y;
|
||||
@ -311,29 +311,29 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
|
||||
|
||||
case INDEX_op_muluh_i64:
|
||||
mulu64(&l64, &h64, x, y);
|
||||
return h64;
|
||||
return (TCGArg)h64;
|
||||
case INDEX_op_mulsh_i64:
|
||||
muls64(&l64, &h64, x, y);
|
||||
return h64;
|
||||
return (TCGArg)h64;
|
||||
|
||||
case INDEX_op_div_i32:
|
||||
/* Avoid crashing on divide by zero, otherwise undefined. */
|
||||
return (int32_t)x / ((int32_t)y ? : 1);
|
||||
return (int32_t)x / ((int32_t)y ? (int32_t)y : 1);
|
||||
case INDEX_op_divu_i32:
|
||||
return (uint32_t)x / ((uint32_t)y ? : 1);
|
||||
return (uint32_t)x / ((uint32_t)y ? (uint32_t)y : 1);
|
||||
case INDEX_op_div_i64:
|
||||
return (int64_t)x / ((int64_t)y ? : 1);
|
||||
return (int64_t)x / ((int64_t)y ? (int64_t)y : 1);
|
||||
case INDEX_op_divu_i64:
|
||||
return (uint64_t)x / ((uint64_t)y ? : 1);
|
||||
return (uint64_t)x / ((uint64_t)y ? (uint64_t)y : 1);
|
||||
|
||||
case INDEX_op_rem_i32:
|
||||
return (int32_t)x % ((int32_t)y ? : 1);
|
||||
return (int32_t)x % ((int32_t)y ? (int32_t)y : 1);
|
||||
case INDEX_op_remu_i32:
|
||||
return (uint32_t)x % ((uint32_t)y ? : 1);
|
||||
return (uint32_t)x % ((uint32_t)y ? (uint32_t)y : 1);
|
||||
case INDEX_op_rem_i64:
|
||||
return (int64_t)x % ((int64_t)y ? : 1);
|
||||
return (int64_t)x % ((int64_t)y ? (int64_t)y : 1);
|
||||
case INDEX_op_remu_i64:
|
||||
return (uint64_t)x % ((uint64_t)y ? : 1);
|
||||
return (uint64_t)x % ((uint64_t)y ? (uint64_t)y : 1);
|
||||
|
||||
default:
|
||||
fprintf(stderr,
|
||||
@ -867,11 +867,11 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
|
||||
|
||||
CASE_OP_32_64(neg):
|
||||
/* Set to 1 all bits to the left of the rightmost. */
|
||||
mask = -(temps[args[1]].mask & -temps[args[1]].mask);
|
||||
mask = 0-(temps[args[1]].mask & (0-temps[args[1]].mask));
|
||||
break;
|
||||
|
||||
CASE_OP_32_64(deposit):
|
||||
mask = deposit64(temps[args[1]].mask, args[3], args[4],
|
||||
mask = (tcg_target_ulong)deposit64(temps[args[1]].mask, args[3], args[4],
|
||||
temps[args[2]].mask);
|
||||
break;
|
||||
|
||||
@ -1088,7 +1088,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
|
||||
CASE_OP_32_64(deposit):
|
||||
if (temps[args[1]].state == TCG_TEMP_CONST
|
||||
&& temps[args[2]].state == TCG_TEMP_CONST) {
|
||||
tmp = deposit64(temps[args[1]].val, args[3], args[4],
|
||||
tmp = (TCGArg)deposit64(temps[args[1]].val, args[3], args[4],
|
||||
temps[args[2]].val);
|
||||
tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp);
|
||||
gen_args += 2;
|
||||
|
@ -1576,7 +1576,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
{
|
||||
TCGReg datalo, datahi, addrlo, rbase;
|
||||
TCGReg addrhi __attribute__((unused));
|
||||
TCGReg addrhi QEMU_UNUSED_VAR;
|
||||
TCGMemOp opc, s_bits;
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
int mem_index;
|
||||
@ -1649,7 +1649,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||
{
|
||||
TCGReg datalo, datahi, addrlo, rbase;
|
||||
TCGReg addrhi __attribute__((unused));
|
||||
TCGReg addrhi QEMU_UNUSED_VAR;
|
||||
TCGMemOp opc, s_bits;
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
int mem_index;
|
||||
@ -2603,14 +2603,14 @@ void flush_icache_range(uintptr_t start, uintptr_t stop)
|
||||
#if defined _AIX
|
||||
#include <sys/systemcfg.h>
|
||||
|
||||
static void __attribute__((constructor)) tcg_cache_init(void)
|
||||
INITIALIZER(tcg_cache_init)
|
||||
{
|
||||
icache_bsize = _system_configuration.icache_line;
|
||||
dcache_bsize = _system_configuration.dcache_line;
|
||||
}
|
||||
|
||||
#elif defined __linux__
|
||||
static void __attribute__((constructor)) tcg_cache_init(void)
|
||||
INITIALIZER(tcg_cache_init)
|
||||
{
|
||||
unsigned long dsize = qemu_getauxval(AT_DCACHEBSIZE);
|
||||
unsigned long isize = qemu_getauxval(AT_ICACHEBSIZE);
|
||||
@ -2633,7 +2633,7 @@ static void __attribute__((constructor)) tcg_cache_init(void)
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
static void __attribute__((constructor)) tcg_cache_init(void)
|
||||
INITIALIZER(tcg_cache_init)
|
||||
{
|
||||
size_t len;
|
||||
unsigned cacheline;
|
||||
@ -2656,7 +2656,7 @@ static void __attribute__((constructor)) tcg_cache_init(void)
|
||||
#include <sys/types.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
static void __attribute__((constructor)) tcg_cache_init(void)
|
||||
INITIALIZER(tcg_cache_init)
|
||||
{
|
||||
size_t len = 4;
|
||||
unsigned cacheline;
|
||||
|
@ -148,12 +148,20 @@ extern bool use_vis3_instructions;
|
||||
|
||||
#define TCG_AREG0 TCG_REG_I0
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <windows.h>
|
||||
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
|
||||
{
|
||||
uintptr_t p;
|
||||
FlushInstructionCache(GetCurrentProcess(), (const void*)start, stop-start);
|
||||
}
|
||||
#else
|
||||
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
|
||||
{
|
||||
uintptr_t p;
|
||||
for (p = start & -8; p < ((stop + 7) & -8); p += 8) {
|
||||
__asm__ __volatile__("flush\t%0" : : "r" (p));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -20,6 +20,8 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "tcg.h"
|
||||
|
||||
typedef struct TCGBackendData {
|
||||
/* Empty */
|
||||
char dummy;
|
||||
|
@ -747,7 +747,7 @@ static inline void tcg_gen_mov_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg)
|
||||
|
||||
static inline void tcg_gen_movi_i64(TCGContext *s, TCGv_i64 ret, int64_t arg)
|
||||
{
|
||||
tcg_gen_movi_i32(s, TCGV_LOW(ret), arg);
|
||||
tcg_gen_movi_i32(s, TCGV_LOW(ret), (int32_t)arg);
|
||||
tcg_gen_movi_i32(s, TCGV_HIGH(ret), arg >> 32);
|
||||
}
|
||||
|
||||
@ -863,7 +863,7 @@ static inline void tcg_gen_and_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, T
|
||||
|
||||
static inline void tcg_gen_andi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
|
||||
{
|
||||
tcg_gen_andi_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
|
||||
tcg_gen_andi_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (uint32_t)arg2);
|
||||
tcg_gen_andi_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
|
||||
}
|
||||
|
||||
@ -875,7 +875,7 @@ static inline void tcg_gen_or_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TC
|
||||
|
||||
static inline void tcg_gen_ori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
|
||||
{
|
||||
tcg_gen_ori_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
|
||||
tcg_gen_ori_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (uint32_t)arg2);
|
||||
tcg_gen_ori_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
|
||||
}
|
||||
|
||||
@ -887,7 +887,7 @@ static inline void tcg_gen_xor_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, T
|
||||
|
||||
static inline void tcg_gen_xori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
|
||||
{
|
||||
tcg_gen_xori_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), arg2);
|
||||
tcg_gen_xori_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (int32_t)arg2);
|
||||
tcg_gen_xori_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32);
|
||||
}
|
||||
|
||||
@ -900,7 +900,7 @@ static inline void tcg_gen_shl_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, T
|
||||
|
||||
static inline void tcg_gen_shli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
|
||||
{
|
||||
tcg_gen_shifti_i64(s, ret, arg1, arg2, 0, 0);
|
||||
tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 0, 0);
|
||||
}
|
||||
|
||||
static inline void tcg_gen_shr_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||
@ -910,7 +910,7 @@ static inline void tcg_gen_shr_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, T
|
||||
|
||||
static inline void tcg_gen_shri_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
|
||||
{
|
||||
tcg_gen_shifti_i64(s, ret, arg1, arg2, 1, 0);
|
||||
tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 1, 0);
|
||||
}
|
||||
|
||||
static inline void tcg_gen_sar_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
|
||||
@ -920,7 +920,7 @@ static inline void tcg_gen_sar_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, T
|
||||
|
||||
static inline void tcg_gen_sari_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
|
||||
{
|
||||
tcg_gen_shifti_i64(s, ret, arg1, arg2, 1, 1);
|
||||
tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 1, 1);
|
||||
}
|
||||
|
||||
static inline void tcg_gen_brcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 arg1,
|
||||
|
@ -44,7 +44,12 @@ DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT)
|
||||
|
||||
DEF(br, 0, 0, 1, TCG_OPF_BB_END)
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define IMPL(X) ((0 && !(X)) ? TCG_OPF_NOT_PRESENT : 0)
|
||||
#else
|
||||
#define IMPL(X) (__builtin_constant_p(X) && !(X) ? TCG_OPF_NOT_PRESENT : 0)
|
||||
#endif
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
# define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT
|
||||
#else
|
||||
|
113
qemu/tcg/tcg.c
113
qemu/tcg/tcg.c
@ -69,7 +69,8 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
|
||||
|
||||
/* The CIE and FDE header definitions will be common to all hosts. */
|
||||
typedef struct {
|
||||
uint32_t len __attribute__((aligned((sizeof(void *)))));
|
||||
//uint32_t QEMU_ALIGN(sizeof(void *), len);
|
||||
uint32_t QEMU_ALIGN(8, len);
|
||||
uint32_t id;
|
||||
uint8_t version;
|
||||
char augmentation[1];
|
||||
@ -78,17 +79,18 @@ typedef struct {
|
||||
uint8_t return_column;
|
||||
} DebugFrameCIE;
|
||||
|
||||
typedef struct QEMU_PACKED {
|
||||
uint32_t len __attribute__((aligned((sizeof(void *)))));
|
||||
QEMU_PACK( typedef struct {
|
||||
// uint32_t QEMU_ALIGN(sizeof(void *), len);
|
||||
uint32_t QEMU_ALIGN(8, len);
|
||||
uint32_t cie_offset;
|
||||
uintptr_t func_start;
|
||||
uintptr_t func_len;
|
||||
} DebugFrameFDEHeader;
|
||||
}) DebugFrameFDEHeader;
|
||||
|
||||
typedef struct QEMU_PACKED {
|
||||
QEMU_PACK( typedef struct {
|
||||
DebugFrameCIE cie;
|
||||
DebugFrameFDEHeader fde;
|
||||
} DebugFrameHeader;
|
||||
}) DebugFrameHeader;
|
||||
|
||||
/* Forward declarations for functions declared and used in tcg-target.c. */
|
||||
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
|
||||
@ -115,12 +117,12 @@ TCGOpDef tcg_op_defs_org[] = {
|
||||
};
|
||||
|
||||
#if TCG_TARGET_INSN_UNIT_SIZE == 1
|
||||
static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
|
||||
static QEMU_UNUSED_FUNC inline void tcg_out8(TCGContext *s, uint8_t v)
|
||||
{
|
||||
*s->code_ptr++ = v;
|
||||
}
|
||||
|
||||
static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
|
||||
static QEMU_UNUSED_FUNC inline void tcg_patch8(tcg_insn_unit *p,
|
||||
uint8_t v)
|
||||
{
|
||||
*p = v;
|
||||
@ -128,10 +130,10 @@ static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
|
||||
#endif
|
||||
|
||||
#if TCG_TARGET_INSN_UNIT_SIZE <= 2
|
||||
static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
|
||||
static QEMU_UNUSED_FUNC inline void tcg_out16(TCGContext *s, uint16_t v)
|
||||
{
|
||||
if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
|
||||
*s->code_ptr++ = v;
|
||||
*s->code_ptr++ = (tcg_insn_unit)v;
|
||||
} else {
|
||||
tcg_insn_unit *p = s->code_ptr;
|
||||
memcpy(p, &v, sizeof(v));
|
||||
@ -139,11 +141,11 @@ static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
|
||||
}
|
||||
}
|
||||
|
||||
static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
|
||||
static QEMU_UNUSED_FUNC inline void tcg_patch16(tcg_insn_unit *p,
|
||||
uint16_t v)
|
||||
{
|
||||
if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
|
||||
*p = v;
|
||||
*p = (tcg_insn_unit)v;
|
||||
} else {
|
||||
memcpy(p, &v, sizeof(v));
|
||||
}
|
||||
@ -151,7 +153,7 @@ static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
|
||||
#endif
|
||||
|
||||
#if TCG_TARGET_INSN_UNIT_SIZE <= 4
|
||||
static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
|
||||
static QEMU_UNUSED_FUNC inline void tcg_out32(TCGContext *s, uint32_t v)
|
||||
{
|
||||
if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
|
||||
*s->code_ptr++ = v;
|
||||
@ -162,7 +164,7 @@ static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
|
||||
}
|
||||
}
|
||||
|
||||
static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
|
||||
static QEMU_UNUSED_FUNC inline void tcg_patch32(tcg_insn_unit *p,
|
||||
uint32_t v)
|
||||
{
|
||||
if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
|
||||
@ -174,10 +176,10 @@ static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
|
||||
#endif
|
||||
|
||||
#if TCG_TARGET_INSN_UNIT_SIZE <= 8
|
||||
static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
|
||||
static QEMU_UNUSED_FUNC inline void tcg_out64(TCGContext *s, uint64_t v)
|
||||
{
|
||||
if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
|
||||
*s->code_ptr++ = v;
|
||||
*s->code_ptr++ = (tcg_insn_unit)v;
|
||||
} else {
|
||||
tcg_insn_unit *p = s->code_ptr;
|
||||
memcpy(p, &v, sizeof(v));
|
||||
@ -185,11 +187,11 @@ static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
|
||||
}
|
||||
}
|
||||
|
||||
static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
|
||||
static QEMU_UNUSED_FUNC inline void tcg_patch64(tcg_insn_unit *p,
|
||||
uint64_t v)
|
||||
{
|
||||
if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
|
||||
*p = v;
|
||||
*p = (tcg_insn_unit)v;
|
||||
} else {
|
||||
memcpy(p, &v, sizeof(v));
|
||||
}
|
||||
@ -486,7 +488,7 @@ static inline int tcg_global_mem_new_internal(TCGContext *s, TCGType type, int r
|
||||
#endif
|
||||
pstrcpy(buf, sizeof(buf), name);
|
||||
pstrcat(buf, sizeof(buf), "_0");
|
||||
ts->name = strdup(buf);
|
||||
ts->name = g_strdup(buf);
|
||||
ts++;
|
||||
|
||||
ts->base_type = type;
|
||||
@ -501,7 +503,7 @@ static inline int tcg_global_mem_new_internal(TCGContext *s, TCGType type, int r
|
||||
#endif
|
||||
pstrcpy(buf, sizeof(buf), name);
|
||||
pstrcat(buf, sizeof(buf), "_1");
|
||||
ts->name = strdup(buf);
|
||||
ts->name = g_strdup(buf);
|
||||
|
||||
s->nb_globals += 2;
|
||||
} else
|
||||
@ -1097,6 +1099,24 @@ static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
|
||||
|
||||
static const char * const cond_name[] =
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
"never", // TCG_COND_NEVER
|
||||
"always", // TCG_COND_ALWAYS
|
||||
"lt", // TCG_COND_LT
|
||||
"ge", // TCG_COND_GE
|
||||
"ltu", // TCG_COND_LTU
|
||||
"geu", // TCG_COND_GEU
|
||||
NULL, // n/a
|
||||
NULL, // n/a
|
||||
"eq", // TCG_COND_EQ
|
||||
"ne", // TCG_COND_NE
|
||||
"le", // TCG_COND_LE
|
||||
"gt", // TCG_COND_GT
|
||||
"leu", // TCG_COND_LEU
|
||||
"gtu", // TCG_COND_GTU
|
||||
NULL, // n/a
|
||||
NULL, // n/a
|
||||
#else
|
||||
[TCG_COND_NEVER] = "never",
|
||||
[TCG_COND_ALWAYS] = "always",
|
||||
[TCG_COND_EQ] = "eq",
|
||||
@ -1109,11 +1129,49 @@ static const char * const cond_name[] =
|
||||
[TCG_COND_GEU] = "geu",
|
||||
[TCG_COND_LEU] = "leu",
|
||||
[TCG_COND_GTU] = "gtu"
|
||||
#endif
|
||||
};
|
||||
|
||||
static const char * const ldst_name[] =
|
||||
{
|
||||
[MO_UB] = "ub",
|
||||
#ifdef _MSC_VER
|
||||
"ub", // MO_UB
|
||||
# ifdef HOST_WORDS_BIGENDIAN
|
||||
"beuw", // MO_BEUW
|
||||
"beul", // MO_BEUL
|
||||
"beq", // MO_BEQ
|
||||
"sb", // MO_SB
|
||||
"besw", // MO_BESW
|
||||
"besl", // MO_BESL
|
||||
NULL, // n/a
|
||||
NULL, // n/a
|
||||
"leuw", // MO_LEUW
|
||||
"leul", // MO_LEUL
|
||||
"leq", // MO_LEQ
|
||||
NULL, // n/a
|
||||
"lesw", // MO_LESW
|
||||
"lesl", // MO_LESL
|
||||
NULL, // n/a
|
||||
# else // !HOST_WORDS_BIGENDIAN
|
||||
"leuw", // MO_LEUW
|
||||
"leul", // MO_LEUL
|
||||
"leq", // MO_LEQ
|
||||
"sb", // MO_SB
|
||||
"lesw", // MO_LESW
|
||||
"lesl", // MO_LESL
|
||||
NULL, // n/a
|
||||
NULL, // n/a
|
||||
"beuw", // MO_BEUW
|
||||
"beul", // MO_BEUL
|
||||
"beq", // MO_BEQ
|
||||
NULL, // n/a
|
||||
"besw", // MO_BESW
|
||||
"besl", // MO_BESL
|
||||
NULL, // n/a
|
||||
# endif // HOST_WORDS_BIGENDIAN
|
||||
|
||||
#else //_MSC_VER
|
||||
[MO_UB] = "ub",
|
||||
[MO_SB] = "sb",
|
||||
[MO_LEUW] = "leuw",
|
||||
[MO_LESW] = "lesw",
|
||||
@ -1125,6 +1183,7 @@ static const char * const ldst_name[] =
|
||||
[MO_BEUL] = "beul",
|
||||
[MO_BESL] = "besl",
|
||||
[MO_BEQ] = "beq",
|
||||
#endif // _MSC_VER
|
||||
};
|
||||
|
||||
void tcg_dump_ops(TCGContext *s)
|
||||
@ -1921,7 +1980,7 @@ static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs)
|
||||
if (!ts->fixed_reg) {
|
||||
switch(ts->val_type) {
|
||||
case TEMP_VAL_CONST:
|
||||
ts->reg = tcg_reg_alloc(s, s->tcg_target_available_regs[ts->type],
|
||||
ts->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type],
|
||||
allocated_regs);
|
||||
ts->val_type = TEMP_VAL_REG;
|
||||
s->reg_to_temp[ts->reg] = temp;
|
||||
@ -2061,7 +2120,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
|
||||
we don't have to reload SOURCE the next time it is used. */
|
||||
if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
|
||||
|| ts->val_type == TEMP_VAL_MEM) {
|
||||
ts->reg = tcg_reg_alloc(s, s->tcg_target_available_regs[itype],
|
||||
ts->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[itype],
|
||||
allocated_regs);
|
||||
if (ts->val_type == TEMP_VAL_MEM) {
|
||||
tcg_out_ld(s, itype, ts->reg, ts->mem_reg, ts->mem_offset);
|
||||
@ -2111,7 +2170,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
|
||||
/* When allocating a new register, make sure to not spill the
|
||||
input one. */
|
||||
tcg_regset_set_reg(allocated_regs, ts->reg);
|
||||
ots->reg = tcg_reg_alloc(s, s->tcg_target_available_regs[otype],
|
||||
ots->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[otype],
|
||||
allocated_regs);
|
||||
}
|
||||
tcg_out_mov(s, otype, ots->reg, ts->reg);
|
||||
@ -2342,13 +2401,13 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
|
||||
if (ts->val_type == TEMP_VAL_REG) {
|
||||
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
|
||||
} else if (ts->val_type == TEMP_VAL_MEM) {
|
||||
reg = tcg_reg_alloc(s, s->tcg_target_available_regs[ts->type],
|
||||
reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type],
|
||||
s->reserved_regs);
|
||||
/* XXX: not correct if reading values from the stack */
|
||||
tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
|
||||
tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
|
||||
} else if (ts->val_type == TEMP_VAL_CONST) {
|
||||
reg = tcg_reg_alloc(s, s->tcg_target_available_regs[ts->type],
|
||||
reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type],
|
||||
s->reserved_regs);
|
||||
/* XXX: sign extend may be needed on some targets */
|
||||
tcg_out_movi(s, ts->type, reg, ts->val);
|
||||
@ -2572,7 +2631,7 @@ static inline int tcg_gen_code_common(TCGContext *s,
|
||||
}
|
||||
args += def->nb_args;
|
||||
next:
|
||||
if (search_pc >= 0 && search_pc < tcg_current_code_size(s)) {
|
||||
if (search_pc >= 0 && (size_t)search_pc < tcg_current_code_size(s)) {
|
||||
return op_index;
|
||||
}
|
||||
op_index++;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "tcg-target.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
#include "uc_priv.h"
|
||||
|
||||
@ -180,7 +181,7 @@ typedef struct TCGLabel {
|
||||
typedef struct TCGPool {
|
||||
struct TCGPool *next;
|
||||
int size;
|
||||
uint8_t data[0] __attribute__ ((aligned));
|
||||
uint8_t QEMU_ALIGN(8, data[0]);
|
||||
} TCGPool;
|
||||
|
||||
#define TCG_POOL_CHUNK_SIZE 32768
|
||||
@ -853,7 +854,7 @@ TCGv_i64 tcg_const_local_i64(TCGContext *s, int64_t val);
|
||||
|
||||
static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
|
||||
{
|
||||
return a - b;
|
||||
return (char*)a - (char*)b;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Reference in New Issue
Block a user