Merge remote-tracking branch 'upstream/master' into change-handle-based-api

This commit is contained in:
Jonathon Reinhart
2015-09-03 22:01:52 -04:00
33 changed files with 1369 additions and 99 deletions

277
uc.c
View File

@ -31,6 +31,9 @@
#include "qemu/include/hw/boards.h"
static uint8_t *copy_region(struct uc_struct *uc, MemoryRegion *mr);
static bool split_region(struct uc_struct *uc, MemoryRegion *mr, uint64_t address, size_t size, bool do_delete);
UNICORN_EXPORT
unsigned int uc_version(unsigned int *major, unsigned int *minor)
{
@ -58,8 +61,8 @@ const char *uc_strerror(uc_err code)
return "Unknown error code";
case UC_ERR_OK:
return "OK (UC_ERR_OK)";
case UC_ERR_OOM:
return "Out of memory (UC_ERR_OOM)";
case UC_ERR_NOMEM:
return "No memory available or memory not present (UC_ERR_NOMEM)";
case UC_ERR_ARCH:
return "Invalid/unsupported architecture(UC_ERR_ARCH)";
case UC_ERR_HANDLE:
@ -80,10 +83,14 @@ const char *uc_strerror(uc_err code)
return "Invalid hook type (UC_ERR_HOOK)";
case UC_ERR_MAP:
return "Invalid memory mapping (UC_ERR_MAP)";
case UC_ERR_MEM_WRITE_NW:
return "Write to non-writable (UC_ERR_MEM_WRITE_NW)";
case UC_ERR_MEM_READ_NR:
return "Read from non-readable (UC_ERR_MEM_READ_NR)";
case UC_ERR_WRITE_PROT:
return "Write to write-protected memory (UC_ERR_WRITE_PROT)";
case UC_ERR_READ_PROT:
return "Read from non-readable memory (UC_ERR_READ_PROT)";
case UC_ERR_EXEC_PROT:
return "Fetch from non-executable memory (UC_ERR_EXEC_PROT)";
case UC_ERR_INVAL:
return "Invalid argumet (UC_ERR_INVAL)";
}
}
@ -129,7 +136,7 @@ uc_err uc_open(uc_arch arch, uc_mode mode, ucengine **result)
uc = calloc(1, sizeof(*uc));
if (!uc) {
// memory insufficient
return UC_ERR_OOM;
return UC_ERR_NOMEM;
}
uc->errnum = UC_ERR_OK;
@ -522,7 +529,7 @@ static int _hook_code(ucengine *uc, int type, uint64_t begin, uint64_t end,
i = hook_add(uc, type, begin, end, callback, user_data);
if (i == 0)
return UC_ERR_OOM; // FIXME
return UC_ERR_NOMEM; // FIXME
*hh = i;
@ -538,7 +545,7 @@ static uc_err _hook_mem_access(ucengine *uc, uc_hook_t type,
i = hook_add(uc, type, begin, end, callback, user_data);
if (i == 0)
return UC_ERR_OOM; // FIXME
return UC_ERR_NOMEM; // FIXME
*hh = i;
@ -552,24 +559,25 @@ uc_err uc_mem_map(ucengine *uc, uint64_t address, size_t size, uint32_t perms)
if (size == 0)
// invalid memory mapping
return UC_ERR_MAP;
return UC_ERR_INVAL;
// address must be aligned to 4KB
if ((address & (4*1024 - 1)) != 0)
return UC_ERR_MAP;
// address must be aligned to uc->target_page_size
if ((address & uc->target_page_align) != 0)
return UC_ERR_INVAL;
// size must be multiple of 4KB
if ((size & (4*1024 - 1)) != 0)
return UC_ERR_MAP;
// size must be multiple of uc->target_page_size
if ((size & uc->target_page_align) != 0)
return UC_ERR_INVAL;
// check for only valid permissions
if ((perms & ~(UC_PROT_READ | UC_PROT_WRITE)) != 0)
return UC_ERR_MAP;
if ((perms & ~UC_PROT_ALL) != 0)
return UC_ERR_INVAL;
if ((uc->mapped_block_count & (MEM_BLOCK_INCR - 1)) == 0) { //time to grow
regions = (MemoryRegion**)realloc(uc->mapped_blocks, sizeof(MemoryRegion*) * (uc->mapped_block_count + MEM_BLOCK_INCR));
regions = (MemoryRegion**)realloc(uc->mapped_blocks,
sizeof(MemoryRegion*) * (uc->mapped_block_count + MEM_BLOCK_INCR));
if (regions == NULL) {
return UC_ERR_OOM;
return UC_ERR_NOMEM;
}
uc->mapped_blocks = regions;
}
@ -579,7 +587,224 @@ uc_err uc_mem_map(ucengine *uc, uint64_t address, size_t size, uint32_t perms)
return UC_ERR_OK;
}
MemoryRegion *memory_mapping(ucengine* uc, uint64_t address)
// Create a backup copy of the indicated MemoryRegion.
// Generally used in prepartion for splitting a MemoryRegion.
static uint8_t *copy_region(uch handle, MemoryRegion *mr)
{
uint8_t *block = (uint8_t *)malloc(int128_get64(mr->size));
if (block != NULL) {
uc_err err = uc_mem_read(handle, mr->addr, block, int128_get64(mr->size));
if (err != UC_ERR_OK) {
free(block);
block = NULL;
}
}
return block;
}
/*
Split the given MemoryRegion at the indicated address for the indicated size
this may result in the create of up to 3 spanning sections. If the delete
parameter is true, the no new section will be created to replace the indicate
range. This functions exists to support uc_mem_protect and uc_mem_unmap.
This is a static function and callers have already done some preliminary
parameter validation.
The do_delete argument indicates that we are being called to support
uc_mem_unmap. In this case we save some time by choosing NOT to remap
the areas that are intended to get unmapped
*/
// TODO: investigate whether qemu region manipulation functions already offered
// this capability
static bool split_region(uch handle, MemoryRegion *mr, uint64_t address,
size_t size, bool do_delete)
{
uint8_t *backup;
uint32_t perms;
uint64_t begin, end, chunk_end;
size_t l_size, m_size, r_size;
chunk_end = address + size;
// if this region belongs to area [address, address+size],
// then there is no work to do.
if (address <= mr->addr && chunk_end >= mr->end)
return true;
if (size == 0)
// trivial case
return true;
if (address >= mr->end || chunk_end <= mr->addr)
// impossible case
return false;
backup = copy_region(handle, mr);
if (backup == NULL)
return false;
// save the essential information required for the split before mr gets deleted
perms = mr->perms;
begin = mr->addr;
end = mr->end;
// unmap this region first, then do split it later
if (uc_mem_unmap(handle, mr->addr, int128_get64(mr->size)) != UC_ERR_OK)
goto error;
/* overlapping cases
* |------mr------|
* case 1 |---size--|
* case 2 |--size--|
* case 3 |---size--|
*/
// adjust some things
if (address < begin)
address = begin;
if (chunk_end > end)
chunk_end = end;
// compute sub region sizes
l_size = (size_t)(address - begin);
r_size = (size_t)(end - chunk_end);
m_size = (size_t)(chunk_end - address);
// If there are error in any of the below operations, things are too far gone
// at that point to recover. Could try to remap orignal region, but these smaller
// allocation just failed so no guarantee that we can recover the original
// allocation at this point
if (l_size > 0) {
if (uc_mem_map(handle, begin, l_size, perms) != UC_ERR_OK)
goto error;
if (uc_mem_write(handle, begin, backup, l_size) != UC_ERR_OK)
goto error;
}
if (m_size > 0 && !do_delete) {
if (uc_mem_map(handle, address, m_size, perms) != UC_ERR_OK)
goto error;
if (uc_mem_write(handle, address, backup + l_size, m_size) != UC_ERR_OK)
goto error;
}
if (r_size > 0) {
if (uc_mem_map(handle, chunk_end, r_size, perms) != UC_ERR_OK)
goto error;
if (uc_mem_write(handle, chunk_end, backup + l_size + m_size, r_size) != UC_ERR_OK)
goto error;
}
return true;
error:
free(backup);
return false;
}
UNICORN_EXPORT
uc_err uc_mem_protect(uch handle, uint64_t address, size_t size, uint32_t perms)
{
struct uc_struct* uc = (struct uc_struct *)handle;
MemoryRegion *mr;
uint64_t addr = address;
size_t count, len;
if (handle == 0)
// invalid handle
return UC_ERR_UCH;
if (size == 0)
// trivial case, no change
return UC_ERR_OK;
// address must be aligned to uc->target_page_size
if ((address & uc->target_page_align) != 0)
return UC_ERR_INVAL;
// size must be multiple of uc->target_page_size
if ((size & uc->target_page_align) != 0)
return UC_ERR_INVAL;
// check for only valid permissions
if ((perms & ~UC_PROT_ALL) != 0)
return UC_ERR_INVAL;
// check that user's entire requested block is mapped
if (!check_mem_area(uc, address, size))
return UC_ERR_NOMEM;
// Now we know entire region is mapped, so change permissions
// We may need to split regions if this area spans adjacent regions
addr = address;
count = 0;
while(count < size) {
mr = memory_mapping(uc, addr);
len = MIN(size - count, mr->end - addr);
if (!split_region(handle, mr, addr, len, false))
return UC_ERR_NOMEM;
mr = memory_mapping(uc, addr);
mr->perms = perms;
uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0);
count += len;
addr += len;
}
return UC_ERR_OK;
}
UNICORN_EXPORT
uc_err uc_mem_unmap(uch handle, uint64_t address, size_t size)
{
MemoryRegion *mr;
struct uc_struct* uc = (struct uc_struct *)handle;
uint64_t addr;
size_t count, len;
if (handle == 0)
// invalid handle
return UC_ERR_UCH;
if (size == 0)
// nothing to unmap
return UC_ERR_OK;
// address must be aligned to uc->target_page_size
if ((address & uc->target_page_align) != 0)
return UC_ERR_INVAL;
// size must be multiple of uc->target_page_size
if ((size & uc->target_page_align) != 0)
return UC_ERR_MAP;
// check that user's entire requested block is mapped
if (!check_mem_area(uc, address, size))
return UC_ERR_NOMEM;
// Now we know entire region is mapped, so do the unmap
// We may need to split regions if this area spans adjacent regions
addr = address;
count = 0;
while(count < size) {
mr = memory_mapping(uc, addr);
len = MIN(size - count, mr->end - addr);
if (!split_region(handle, mr, addr, len, true))
return UC_ERR_NOMEM;
// if we can retrieve the mapping, then no splitting took place
// so unmap here
mr = memory_mapping(uc, addr);
if (mr != NULL)
uc->memory_unmap(uc, mr);
count += len;
addr += len;
}
return UC_ERR_OK;
}
MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address)
{
unsigned int i;
@ -607,7 +832,7 @@ static uc_err _hook_mem_invalid(struct uc_struct* uc, uc_cb_eventmem_t callback,
uc->hook_mem_idx = i;
return UC_ERR_OK;
} else
return UC_ERR_OOM;
return UC_ERR_NOMEM;
}
@ -626,7 +851,7 @@ static uc_err _hook_intr(struct uc_struct* uc, void *callback,
uc->hook_intr_idx = i;
return UC_ERR_OK;
} else
return UC_ERR_OOM;
return UC_ERR_NOMEM;
}
@ -650,7 +875,7 @@ static uc_err _hook_insn(struct uc_struct *uc, unsigned int insn_id, void *callb
uc->hook_out_idx = i;
return UC_ERR_OK;
} else
return UC_ERR_OOM;
return UC_ERR_NOMEM;
case UC_X86_INS_IN:
// FIXME: only one event handler at the same time
i = hook_find_new(uc);
@ -661,7 +886,7 @@ static uc_err _hook_insn(struct uc_struct *uc, unsigned int insn_id, void *callb
uc->hook_in_idx = i;
return UC_ERR_OK;
} else
return UC_ERR_OOM;
return UC_ERR_NOMEM;
case UC_X86_INS_SYSCALL:
case UC_X86_INS_SYSENTER:
// FIXME: only one event handler at the same time
@ -673,7 +898,7 @@ static uc_err _hook_insn(struct uc_struct *uc, unsigned int insn_id, void *callb
uc->hook_syscall_idx = i;
return UC_ERR_OK;
} else
return UC_ERR_OOM;
return UC_ERR_NOMEM;
}
break;
}