Expose more TB related stuff
This commit is contained in:
@ -121,8 +121,7 @@ typedef void (*uc_invalidate_tb_t)(struct uc_struct *uc, uint64_t start,
|
|||||||
size_t len);
|
size_t len);
|
||||||
|
|
||||||
// Request generating TB at given address
|
// Request generating TB at given address
|
||||||
typedef struct TranslationBlock *(*uc_gen_tb_t)(struct uc_struct *uc,
|
typedef uc_err (*uc_gen_tb_t)(struct uc_struct *uc, uint64_t pc, uc_tb *out_tb);
|
||||||
uint64_t pc);
|
|
||||||
|
|
||||||
struct hook {
|
struct hook {
|
||||||
int type; // UC_HOOK_*
|
int type; // UC_HOOK_*
|
||||||
|
@ -391,6 +391,13 @@ typedef enum uc_query_type {
|
|||||||
// result = True)
|
// result = True)
|
||||||
} uc_query_type;
|
} uc_query_type;
|
||||||
|
|
||||||
|
// Represent a TranslationBlock.
|
||||||
|
typedef struct uc_tb {
|
||||||
|
uint64_t pc;
|
||||||
|
uint16_t icount;
|
||||||
|
uint16_t size;
|
||||||
|
} uc_tb;
|
||||||
|
|
||||||
// The implementation of uc_ctl is like what Linux ioctl does but slightly
|
// The implementation of uc_ctl is like what Linux ioctl does but slightly
|
||||||
// different.
|
// different.
|
||||||
//
|
//
|
||||||
@ -461,7 +468,7 @@ typedef enum uc_control_type {
|
|||||||
// Read: @args = (int)
|
// Read: @args = (int)
|
||||||
UC_CTL_CPU_MODEL,
|
UC_CTL_CPU_MODEL,
|
||||||
// Request a tb cache at a specific address
|
// Request a tb cache at a specific address
|
||||||
// Read: @args = (uint64_t)
|
// Read: @args = (uint64_t, uc_tb*)
|
||||||
UC_CTL_TB_REQUEST_CACHE,
|
UC_CTL_TB_REQUEST_CACHE,
|
||||||
// Invalidate a tb cache at a specific address
|
// Invalidate a tb cache at a specific address
|
||||||
// Read: @args = (uint64_t)
|
// Read: @args = (uint64_t)
|
||||||
@ -493,8 +500,8 @@ typedef enum uc_control_type {
|
|||||||
uc_ctl(uc, UC_CTL_WRITE(UC_CTL_CPU_MODEL, 1), (model))
|
uc_ctl(uc, UC_CTL_WRITE(UC_CTL_CPU_MODEL, 1), (model))
|
||||||
#define uc_ctl_remove_cache(uc, address) \
|
#define uc_ctl_remove_cache(uc, address) \
|
||||||
uc_ctl(uc, UC_CTL_READ(UC_CTL_TB_REMOVE_CACHE, 1), (address))
|
uc_ctl(uc, UC_CTL_READ(UC_CTL_TB_REMOVE_CACHE, 1), (address))
|
||||||
#define uc_ctl_request_cache(uc, address) \
|
#define uc_ctl_request_cache(uc, address, tb) \
|
||||||
uc_ctl(uc, UC_CTL_READ(UC_CTL_TB_REQUEST_CACHE, 1), (address))
|
uc_ctl(uc, UC_CTL_READ_WRITE(UC_CTL_TB_REQUEST_CACHE, 2), (address), (tb))
|
||||||
|
|
||||||
// Opaque storage for CPU context, used with uc_context_*()
|
// Opaque storage for CPU context, used with uc_context_*()
|
||||||
struct uc_context;
|
struct uc_context;
|
||||||
|
@ -999,7 +999,7 @@ static void uc_invalidate_tb(struct uc_struct *uc, uint64_t start_addr, size_t l
|
|||||||
tb_invalidate_phys_range(uc, start, end);
|
tb_invalidate_phys_range(uc, start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static TranslationBlock* uc_gen_tb(struct uc_struct *uc, uint64_t addr)
|
static uc_err uc_gen_tb(struct uc_struct *uc, uint64_t addr, uc_tb *out_tb)
|
||||||
{
|
{
|
||||||
TranslationBlock *tb;
|
TranslationBlock *tb;
|
||||||
target_ulong cs_base, pc;
|
target_ulong cs_base, pc;
|
||||||
@ -1024,22 +1024,16 @@ static TranslationBlock* uc_gen_tb(struct uc_struct *uc, uint64_t addr)
|
|||||||
cflags &= ~CF_CLUSTER_MASK;
|
cflags &= ~CF_CLUSTER_MASK;
|
||||||
cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
|
cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
|
||||||
|
|
||||||
if (likely(tb &&
|
if (unlikely(!(tb &&
|
||||||
tb->pc == pc &&
|
tb->pc == pc &&
|
||||||
tb->cs_base == cs_base &&
|
tb->cs_base == cs_base &&
|
||||||
tb->flags == flags &&
|
tb->flags == flags &&
|
||||||
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
|
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
|
||||||
(tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cflags)) {
|
(tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cflags))) {
|
||||||
return tb;
|
|
||||||
}
|
|
||||||
|
|
||||||
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
|
||||||
cpu->tb_jmp_cache[hash] = tb;
|
cpu->tb_jmp_cache[hash] = tb;
|
||||||
|
|
||||||
if (tb != NULL) {
|
|
||||||
return tb;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tb == NULL) {
|
if (tb == NULL) {
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
|
||||||
@ -1047,8 +1041,20 @@ static TranslationBlock* uc_gen_tb(struct uc_struct *uc, uint64_t addr)
|
|||||||
/* We add the TB in the virtual pc hash table for the fast lookup */
|
/* We add the TB in the virtual pc hash table for the fast lookup */
|
||||||
cpu->tb_jmp_cache[hash] = tb;
|
cpu->tb_jmp_cache[hash] = tb;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return tb;
|
// If we still couldn't generate a TB, it must be out of memory.
|
||||||
|
if (tb == NULL) {
|
||||||
|
return UC_ERR_NOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (out_tb != NULL) {
|
||||||
|
out_tb->pc = tb->pc;
|
||||||
|
out_tb->size = tb->size;
|
||||||
|
out_tb->icount = tb->icount;
|
||||||
|
}
|
||||||
|
|
||||||
|
return UC_ERR_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Must be called before using the QEMU cpus. 'tb_size' is the size
|
/* Must be called before using the QEMU cpus. 'tb_size' is the size
|
||||||
|
@ -182,9 +182,12 @@ static void test_uc_ctl_tb_cache()
|
|||||||
{
|
{
|
||||||
uc_engine *uc;
|
uc_engine *uc;
|
||||||
uc_err err;
|
uc_err err;
|
||||||
|
uc_tb tb;
|
||||||
char code[CODE_LEN];
|
char code[CODE_LEN];
|
||||||
double standard, cached, evicted;
|
double standard, cached, evicted;
|
||||||
|
|
||||||
|
printf("Controling the TB cache in a finer granularity by uc_ctl.\n");
|
||||||
|
|
||||||
// Fill the code buffer with NOP.
|
// Fill the code buffer with NOP.
|
||||||
memset(code, 0x90, CODE_LEN);
|
memset(code, 0x90, CODE_LEN);
|
||||||
|
|
||||||
@ -213,7 +216,10 @@ static void test_uc_ctl_tb_cache()
|
|||||||
|
|
||||||
// Now we request cache for all TBs.
|
// Now we request cache for all TBs.
|
||||||
for (int i = 0; i < TB_COUNT; i++) {
|
for (int i = 0; i < TB_COUNT; i++) {
|
||||||
err = uc_ctl_request_cache(uc, ADDRESS + i * TCG_MAX_INSNS);
|
err = uc_ctl_request_cache(uc, ADDRESS + i * TCG_MAX_INSNS, &tb);
|
||||||
|
printf(">>> TB is cached at 0x%" PRIx64 " which has %" PRIu16
|
||||||
|
" instructions with %" PRIu16 " bytes.\n",
|
||||||
|
tb.pc, tb.icount, tb.size);
|
||||||
if (err) {
|
if (err) {
|
||||||
printf("Failed on uc_ctl() with error returned: %u\n", err);
|
printf("Failed on uc_ctl() with error returned: %u\n", err);
|
||||||
return;
|
return;
|
||||||
|
@ -138,7 +138,7 @@ static void test_uc_ctl_tb_cache()
|
|||||||
standard = time_emulation(uc, code_start, code_start + sizeof(code) - 1);
|
standard = time_emulation(uc, code_start, code_start + sizeof(code) - 1);
|
||||||
|
|
||||||
for (int i = 0; i < TB_COUNT; i++) {
|
for (int i = 0; i < TB_COUNT; i++) {
|
||||||
OK(uc_ctl_request_cache(uc, code_start + i * TCG_MAX_INSNS));
|
OK(uc_ctl_request_cache(uc, code_start + i * TCG_MAX_INSNS, NULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
cached = time_emulation(uc, code_start, code_start + sizeof(code) - 1);
|
cached = time_emulation(uc, code_start, code_start + sizeof(code) - 1);
|
||||||
|
5
uc.c
5
uc.c
@ -1906,9 +1906,10 @@ uc_err uc_ctl(uc_engine *uc, uc_control_type control, ...)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case UC_CTL_TB_REQUEST_CACHE: {
|
case UC_CTL_TB_REQUEST_CACHE: {
|
||||||
if (rw == UC_CTL_IO_READ) {
|
if (rw == UC_CTL_IO_READ_WRITE) {
|
||||||
uint64_t addr = va_arg(args, uint64_t);
|
uint64_t addr = va_arg(args, uint64_t);
|
||||||
uc->uc_gen_tb(uc, addr);
|
uc_tb *tb = va_arg(args, uc_tb *);
|
||||||
|
err = uc->uc_gen_tb(uc, addr, tb);
|
||||||
} else {
|
} else {
|
||||||
err = UC_ERR_ARG;
|
err = UC_ERR_ARG;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user