solve merging conflict
This commit is contained in:
277
uc.c
277
uc.c
@ -31,6 +31,9 @@
|
||||
|
||||
#include "qemu/include/hw/boards.h"
|
||||
|
||||
static uint8_t *copy_region(uch uc, MemoryRegion *mr);
|
||||
static bool split_region(uch handle, MemoryRegion *mr, uint64_t address, size_t size, bool do_delete);
|
||||
|
||||
UNICORN_EXPORT
|
||||
unsigned int uc_version(unsigned int *major, unsigned int *minor)
|
||||
{
|
||||
@ -65,8 +68,8 @@ const char *uc_strerror(uc_err code)
|
||||
return "Unknown error code";
|
||||
case UC_ERR_OK:
|
||||
return "OK (UC_ERR_OK)";
|
||||
case UC_ERR_OOM:
|
||||
return "Out of memory (UC_ERR_OOM)";
|
||||
case UC_ERR_NOMEM:
|
||||
return "No memory available or memory not present (UC_ERR_NOMEM)";
|
||||
case UC_ERR_ARCH:
|
||||
return "Invalid/unsupported architecture(UC_ERR_ARCH)";
|
||||
case UC_ERR_HANDLE:
|
||||
@ -89,10 +92,14 @@ const char *uc_strerror(uc_err code)
|
||||
return "Invalid hook type (UC_ERR_HOOK)";
|
||||
case UC_ERR_MAP:
|
||||
return "Invalid memory mapping (UC_ERR_MAP)";
|
||||
case UC_ERR_MEM_WRITE_NW:
|
||||
return "Write to non-writable (UC_ERR_MEM_WRITE_NW)";
|
||||
case UC_ERR_MEM_READ_NR:
|
||||
return "Read from non-readable (UC_ERR_MEM_READ_NR)";
|
||||
case UC_ERR_WRITE_PROT:
|
||||
return "Write to write-protected memory (UC_ERR_WRITE_PROT)";
|
||||
case UC_ERR_READ_PROT:
|
||||
return "Read from non-readable memory (UC_ERR_READ_PROT)";
|
||||
case UC_ERR_EXEC_PROT:
|
||||
return "Fetch from non-executable memory (UC_ERR_EXEC_PROT)";
|
||||
case UC_ERR_INVAL:
|
||||
return "Invalid argumet (UC_ERR_INVAL)";
|
||||
}
|
||||
}
|
||||
|
||||
@ -138,7 +145,7 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uch *handle)
|
||||
uc = calloc(1, sizeof(*uc));
|
||||
if (!uc) {
|
||||
// memory insufficient
|
||||
return UC_ERR_OOM;
|
||||
return UC_ERR_NOMEM;
|
||||
}
|
||||
|
||||
uc->errnum = UC_ERR_OK;
|
||||
@ -585,7 +592,7 @@ static int _hook_code(uch handle, int type, uint64_t begin, uint64_t end,
|
||||
|
||||
i = hook_add(handle, type, begin, end, callback, user_data);
|
||||
if (i == 0)
|
||||
return UC_ERR_OOM; // FIXME
|
||||
return UC_ERR_NOMEM; // FIXME
|
||||
|
||||
*h2 = i;
|
||||
|
||||
@ -601,7 +608,7 @@ static uc_err _hook_mem_access(uch handle, uc_hook_t type,
|
||||
|
||||
i = hook_add(handle, type, begin, end, callback, user_data);
|
||||
if (i == 0)
|
||||
return UC_ERR_OOM; // FIXME
|
||||
return UC_ERR_NOMEM; // FIXME
|
||||
|
||||
*h2 = i;
|
||||
|
||||
@ -620,24 +627,24 @@ uc_err uc_mem_map(uch handle, uint64_t address, size_t size, uint32_t perms)
|
||||
|
||||
if (size == 0)
|
||||
// invalid memory mapping
|
||||
return UC_ERR_MAP;
|
||||
return UC_ERR_INVAL;
|
||||
|
||||
// address must be aligned to 4KB
|
||||
if ((address & (4*1024 - 1)) != 0)
|
||||
return UC_ERR_MAP;
|
||||
// address must be aligned to uc->target_page_size
|
||||
if ((address & uc->target_page_align) != 0)
|
||||
return UC_ERR_INVAL;
|
||||
|
||||
// size must be multiple of 4KB
|
||||
if ((size & (4*1024 - 1)) != 0)
|
||||
return UC_ERR_MAP;
|
||||
// size must be multiple of uc->target_page_size
|
||||
if ((size & uc->target_page_align) != 0)
|
||||
return UC_ERR_INVAL;
|
||||
|
||||
// check for only valid permissions
|
||||
if ((perms & ~(UC_PROT_READ | UC_PROT_WRITE)) != 0)
|
||||
return UC_ERR_MAP;
|
||||
if ((perms & ~UC_PROT_ALL) != 0)
|
||||
return UC_ERR_INVAL;
|
||||
|
||||
if ((uc->mapped_block_count & (MEM_BLOCK_INCR - 1)) == 0) { //time to grow
|
||||
regions = (MemoryRegion**)realloc(uc->mapped_blocks, sizeof(MemoryRegion*) * (uc->mapped_block_count + MEM_BLOCK_INCR));
|
||||
if (regions == NULL) {
|
||||
return UC_ERR_OOM;
|
||||
return UC_ERR_NOMEM;
|
||||
}
|
||||
uc->mapped_blocks = regions;
|
||||
}
|
||||
@ -647,6 +654,228 @@ uc_err uc_mem_map(uch handle, uint64_t address, size_t size, uint32_t perms)
|
||||
return UC_ERR_OK;
|
||||
}
|
||||
|
||||
//create a backup copy of the indicated MemoryRegion
|
||||
//generally used in prepartion for splitting a MemoryRegion
|
||||
static uint8_t *copy_region(uch handle, MemoryRegion *mr)
|
||||
{
|
||||
uint8_t *block = (uint8_t *)malloc(int128_get64(mr->size));
|
||||
if (block != NULL) {
|
||||
uc_err err = uc_mem_read(handle, mr->addr, block, int128_get64(mr->size));
|
||||
if (err != UC_ERR_OK) {
|
||||
free(block);
|
||||
block = NULL;
|
||||
}
|
||||
}
|
||||
return block;
|
||||
}
|
||||
|
||||
/*
|
||||
Split the given MemoryRegion at the indicated address for the indicated size
|
||||
this may result in the create of up to 3 spanning sections. If the delete
|
||||
parameter is true, the no new section will be created to replace the indicate
|
||||
range. This functions exists to support uc_mem_protect and uc_mem_unmap.
|
||||
|
||||
This is a static function and callers have already done some preliminary
|
||||
parameter validation.
|
||||
*/
|
||||
//TODO: investigate whether qemu region manipulation functions already offer this capability
|
||||
static bool split_region(uch handle, MemoryRegion *mr, uint64_t address, size_t size, bool do_delete)
|
||||
{
|
||||
uint8_t *backup;
|
||||
uint32_t perms;
|
||||
uint64_t begin, end, chunk_end;
|
||||
size_t l_size, m_size, r_size;
|
||||
chunk_end = address + size;
|
||||
if (address <= mr->addr && chunk_end >= mr->end) {
|
||||
//trivial case, if we are deleting, just unmap
|
||||
if (do_delete)
|
||||
return uc_mem_unmap(handle, mr->addr, int128_get64(mr->size)) == UC_ERR_OK;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (size == 0)
|
||||
//trivial case
|
||||
return true;
|
||||
|
||||
if (address >= mr->end || chunk_end <= mr->addr)
|
||||
//impossible case
|
||||
return false;
|
||||
|
||||
backup = copy_region(handle, mr);
|
||||
if (backup == NULL)
|
||||
return false;
|
||||
|
||||
//save the essential information required for the split before mr gets deleted
|
||||
perms = mr->perms;
|
||||
begin = mr->addr;
|
||||
end = mr->end;
|
||||
|
||||
if (uc_mem_unmap(handle, mr->addr, int128_get64(mr->size)) != UC_ERR_OK)
|
||||
goto error;
|
||||
|
||||
/* overlapping cases
|
||||
* |------mr------|
|
||||
* case 1 |---size--|
|
||||
* case 2 |--size--|
|
||||
* case 3 |---size--|
|
||||
*/
|
||||
|
||||
//adjust some things
|
||||
if (address < begin)
|
||||
address = begin;
|
||||
if (chunk_end > end)
|
||||
chunk_end = end;
|
||||
|
||||
//compute sub region sizes
|
||||
l_size = (size_t)(address - begin);
|
||||
r_size = (size_t)(end - chunk_end);
|
||||
m_size = (size_t)(chunk_end - address);
|
||||
|
||||
//If there are error in any of the below operations, things are too far gone
|
||||
//at that point to recover. Could try to remap orignal region, but these smaller
|
||||
//allocation just failed so no guarantee that we can recover the original
|
||||
//allocation at this point
|
||||
if (l_size > 0) {
|
||||
if (uc_mem_map(handle, begin, l_size, perms) != UC_ERR_OK)
|
||||
goto error;
|
||||
if (uc_mem_write(handle, begin, backup, l_size) != UC_ERR_OK)
|
||||
goto error;
|
||||
}
|
||||
if (m_size > 0 && !do_delete) {
|
||||
if (uc_mem_map(handle, address, m_size, perms) != UC_ERR_OK)
|
||||
goto error;
|
||||
if (uc_mem_write(handle, address, backup + l_size, m_size) != UC_ERR_OK)
|
||||
goto error;
|
||||
}
|
||||
if (r_size > 0) {
|
||||
if (uc_mem_map(handle, chunk_end, r_size, perms) != UC_ERR_OK)
|
||||
goto error;
|
||||
if (uc_mem_write(handle, chunk_end, backup + l_size + m_size, r_size) != UC_ERR_OK)
|
||||
goto error;
|
||||
}
|
||||
return true;
|
||||
error:
|
||||
free(backup);
|
||||
return false;
|
||||
}
|
||||
|
||||
UNICORN_EXPORT
|
||||
uc_err uc_mem_protect(uch handle, uint64_t address, size_t size, uint32_t perms)
|
||||
{
|
||||
struct uc_struct* uc = (struct uc_struct *)handle;
|
||||
MemoryRegion *mr;
|
||||
|
||||
if (handle == 0)
|
||||
// invalid handle
|
||||
return UC_ERR_UCH;
|
||||
|
||||
if (size == 0)
|
||||
// trivial case, no change
|
||||
return UC_ERR_OK;
|
||||
|
||||
// address must be aligned to uc->target_page_size
|
||||
if ((address & uc->target_page_align) != 0)
|
||||
return UC_ERR_INVAL;
|
||||
|
||||
// size must be multiple of uc->target_page_size
|
||||
if ((size & uc->target_page_align) != 0)
|
||||
return UC_ERR_INVAL;
|
||||
|
||||
// check for only valid permissions
|
||||
if ((perms & ~UC_PROT_ALL) != 0)
|
||||
return UC_ERR_INVAL;
|
||||
|
||||
//check that user's entire requested block is mapped
|
||||
if (!check_mem_area(uc, address, size))
|
||||
return UC_ERR_NOMEM;
|
||||
|
||||
//Now we know entire region is mapped, so change permissions
|
||||
//If request exactly matches a region we don't need to split
|
||||
mr = memory_mapping(uc, address);
|
||||
if (address != mr->addr || size != int128_get64(mr->size)) {
|
||||
//ouch, we are going to need to subdivide blocks
|
||||
uint64_t addr = address;
|
||||
size_t count = 0, len;
|
||||
while(count < size) {
|
||||
MemoryRegion *mr = memory_mapping(uc, addr);
|
||||
len = MIN(size - count, mr->end - addr);
|
||||
if (!split_region(handle, mr, addr, len, false))
|
||||
return UC_ERR_NOMEM;
|
||||
count += len;
|
||||
addr += len;
|
||||
}
|
||||
//Grab a pointer to the newly split MemoryRegion
|
||||
mr = memory_mapping(uc, address);
|
||||
if (mr == NULL) {
|
||||
//this should never happern if splitting succeeded
|
||||
return UC_ERR_NOMEM;
|
||||
}
|
||||
}
|
||||
//regions exactly matches an existing region just change perms
|
||||
mr->perms = perms;
|
||||
uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0);
|
||||
|
||||
return UC_ERR_OK;
|
||||
}
|
||||
|
||||
UNICORN_EXPORT
|
||||
uc_err uc_mem_unmap(uch handle, uint64_t address, size_t size)
|
||||
{
|
||||
MemoryRegion *mr;
|
||||
unsigned int i;
|
||||
struct uc_struct* uc = (struct uc_struct *)handle;
|
||||
|
||||
if (handle == 0)
|
||||
// invalid handle
|
||||
return UC_ERR_UCH;
|
||||
|
||||
if (size == 0)
|
||||
// nothing to unmap
|
||||
return UC_ERR_OK;
|
||||
|
||||
// address must be aligned to uc->target_page_size
|
||||
if ((address & uc->target_page_align) != 0)
|
||||
return UC_ERR_INVAL;
|
||||
|
||||
// size must be multiple of uc->target_page_size
|
||||
if ((size & uc->target_page_align) != 0)
|
||||
return UC_ERR_MAP;
|
||||
|
||||
//check that user's entire requested block is mapped
|
||||
if (!check_mem_area(uc, address, size))
|
||||
return UC_ERR_NOMEM;
|
||||
|
||||
//Now we know entire region is mapped, so begin the delete
|
||||
//check trivial case first
|
||||
mr = memory_mapping(uc, address);
|
||||
if (address == mr->addr && size == int128_get64(mr->size)) {
|
||||
//regions exactly matches an existing region just unmap it
|
||||
//this termiantes a possible recursion between this function and split_region
|
||||
uc->memory_unmap(uc, mr);
|
||||
for (i = 0; i < uc->mapped_block_count; i++) {
|
||||
if (uc->mapped_blocks[i] == mr) {
|
||||
uc->mapped_block_count--;
|
||||
//shift remainder of array down over deleted pointer
|
||||
memcpy(&uc->mapped_blocks[i], &uc->mapped_blocks[i + 1], sizeof(MemoryRegion*) * (uc->mapped_block_count - i));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
//ouch, we are going to need to subdivide blocks
|
||||
size_t count = 0, len;
|
||||
while(count < size) {
|
||||
MemoryRegion *mr = memory_mapping(uc, address);
|
||||
len = MIN(size - count, mr->end - address);
|
||||
if (!split_region(handle, mr, address, len, true))
|
||||
return UC_ERR_NOMEM;
|
||||
count += len;
|
||||
address += len;
|
||||
}
|
||||
}
|
||||
return UC_ERR_OK;
|
||||
}
|
||||
|
||||
MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -675,7 +904,7 @@ static uc_err _hook_mem_invalid(struct uc_struct* uc, uc_cb_eventmem_t callback,
|
||||
uc->hook_mem_idx = i;
|
||||
return UC_ERR_OK;
|
||||
} else
|
||||
return UC_ERR_OOM;
|
||||
return UC_ERR_NOMEM;
|
||||
}
|
||||
|
||||
|
||||
@ -694,7 +923,7 @@ static uc_err _hook_intr(struct uc_struct* uc, void *callback,
|
||||
uc->hook_intr_idx = i;
|
||||
return UC_ERR_OK;
|
||||
} else
|
||||
return UC_ERR_OOM;
|
||||
return UC_ERR_NOMEM;
|
||||
}
|
||||
|
||||
|
||||
@ -718,7 +947,7 @@ static uc_err _hook_insn(struct uc_struct *uc, unsigned int insn_id, void *callb
|
||||
uc->hook_out_idx = i;
|
||||
return UC_ERR_OK;
|
||||
} else
|
||||
return UC_ERR_OOM;
|
||||
return UC_ERR_NOMEM;
|
||||
case UC_X86_INS_IN:
|
||||
// FIXME: only one event handler at the same time
|
||||
i = hook_find_new(uc);
|
||||
@ -729,7 +958,7 @@ static uc_err _hook_insn(struct uc_struct *uc, unsigned int insn_id, void *callb
|
||||
uc->hook_in_idx = i;
|
||||
return UC_ERR_OK;
|
||||
} else
|
||||
return UC_ERR_OOM;
|
||||
return UC_ERR_NOMEM;
|
||||
case UC_X86_INS_SYSCALL:
|
||||
case UC_X86_INS_SYSENTER:
|
||||
// FIXME: only one event handler at the same time
|
||||
@ -741,7 +970,7 @@ static uc_err _hook_insn(struct uc_struct *uc, unsigned int insn_id, void *callb
|
||||
uc->hook_syscall_idx = i;
|
||||
return UC_ERR_OK;
|
||||
} else
|
||||
return UC_ERR_OOM;
|
||||
return UC_ERR_NOMEM;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
Reference in New Issue
Block a user