|
|
@ -177,27 +177,35 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
|
|
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
|
|
|
uintptr_t haddr;
|
|
|
|
uintptr_t haddr;
|
|
|
|
DATA_TYPE res;
|
|
|
|
DATA_TYPE res;
|
|
|
|
|
|
|
|
int mem_access, error_code;
|
|
|
|
|
|
|
|
|
|
|
|
struct uc_struct *uc = env->uc;
|
|
|
|
struct uc_struct *uc = env->uc;
|
|
|
|
MemoryRegion *mr = memory_mapping(uc, addr);
|
|
|
|
MemoryRegion *mr = memory_mapping(uc, addr);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// memory can be unmapped while reading or fetching
|
|
|
|
|
|
|
|
if (mr == NULL) {
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
// Unicorn: callback on fetch from unmapped memory
|
|
|
|
mem_access = UC_MEM_FETCH;
|
|
|
|
if (mr == NULL) { // memory is not mapped
|
|
|
|
error_code = UC_ERR_MEM_FETCH;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
mem_access = UC_MEM_READ;
|
|
|
|
|
|
|
|
error_code = UC_ERR_MEM_READ;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
|
uc, UC_MEM_FETCH, addr, DATA_SIZE, 0,
|
|
|
|
uc, mem_access, addr, DATA_SIZE, 0,
|
|
|
|
uc->hook_callbacks[uc->hook_mem_idx].user_data)) {
|
|
|
|
uc->hook_callbacks[uc->hook_mem_idx].user_data)) {
|
|
|
|
env->invalid_error = UC_ERR_OK;
|
|
|
|
env->invalid_error = UC_ERR_OK;
|
|
|
|
mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time?
|
|
|
|
mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time?
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
env->invalid_error = UC_ERR_MEM_FETCH;
|
|
|
|
env->invalid_error = error_code;
|
|
|
|
// printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr);
|
|
|
|
// printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr);
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
return 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
// Unicorn: callback on fetch from NX
|
|
|
|
// Unicorn: callback on fetch from NX
|
|
|
|
if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable
|
|
|
|
if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
@ -223,22 +231,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Unicorn: callback on invalid memory
|
|
|
|
|
|
|
|
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && env->uc->hook_mem_idx && mr == NULL) {
|
|
|
|
|
|
|
|
if (!((uc_cb_eventmem_t)env->uc->hook_callbacks[env->uc->hook_mem_idx].callback)(
|
|
|
|
|
|
|
|
env->uc, UC_MEM_READ, addr, DATA_SIZE, 0,
|
|
|
|
|
|
|
|
env->uc->hook_callbacks[env->uc->hook_mem_idx].user_data)) {
|
|
|
|
|
|
|
|
// save error & quit
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_MEM_READ;
|
|
|
|
|
|
|
|
// printf("***** Invalid memory read at " TARGET_FMT_lx "\n", addr);
|
|
|
|
|
|
|
|
cpu_exit(env->uc->current_cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_OK;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Unicorn: callback on non-readable memory
|
|
|
|
// Unicorn: callback on non-readable memory
|
|
|
|
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
|
|
|
|
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
@ -263,8 +255,16 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
|
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
// mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_READ_UNALIGNED;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
|
|
|
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
|
|
@ -307,8 +307,16 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
unsigned shift;
|
|
|
|
unsigned shift;
|
|
|
|
do_unaligned_access:
|
|
|
|
do_unaligned_access:
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
// mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_READ_UNALIGNED;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
addr1 = addr & ~(DATA_SIZE - 1);
|
|
|
|
addr1 = addr & ~(DATA_SIZE - 1);
|
|
|
|
addr2 = addr1 + DATA_SIZE;
|
|
|
|
addr2 = addr1 + DATA_SIZE;
|
|
|
@ -326,8 +334,16 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
/* Handle aligned access or unaligned access in the same page. */
|
|
|
|
/* Handle aligned access or unaligned access in the same page. */
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
// mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_READ_UNALIGNED;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
@ -351,27 +367,35 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
|
|
|
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
|
|
|
|
uintptr_t haddr;
|
|
|
|
uintptr_t haddr;
|
|
|
|
DATA_TYPE res;
|
|
|
|
DATA_TYPE res;
|
|
|
|
|
|
|
|
int mem_access, error_code;
|
|
|
|
|
|
|
|
|
|
|
|
struct uc_struct *uc = env->uc;
|
|
|
|
struct uc_struct *uc = env->uc;
|
|
|
|
MemoryRegion *mr = memory_mapping(uc, addr);
|
|
|
|
MemoryRegion *mr = memory_mapping(uc, addr);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// memory can be unmapped while reading or fetching
|
|
|
|
|
|
|
|
if (mr == NULL) {
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
// Unicorn: callback on fetch from unmapped memory
|
|
|
|
mem_access = UC_MEM_FETCH;
|
|
|
|
if (mr == NULL) { // memory is not mapped
|
|
|
|
error_code = UC_ERR_MEM_FETCH;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
mem_access = UC_MEM_READ;
|
|
|
|
|
|
|
|
error_code = UC_ERR_MEM_READ;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
|
uc, UC_MEM_FETCH, addr, DATA_SIZE, 0,
|
|
|
|
uc, mem_access, addr, DATA_SIZE, 0,
|
|
|
|
uc->hook_callbacks[uc->hook_mem_idx].user_data)) {
|
|
|
|
uc->hook_callbacks[uc->hook_mem_idx].user_data)) {
|
|
|
|
env->invalid_error = UC_ERR_OK;
|
|
|
|
env->invalid_error = UC_ERR_OK;
|
|
|
|
mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time?
|
|
|
|
mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time?
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
env->invalid_error = UC_ERR_MEM_FETCH;
|
|
|
|
env->invalid_error = error_code;
|
|
|
|
// printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr);
|
|
|
|
// printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr);
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
return 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
// Unicorn: callback on fetch from NX
|
|
|
|
// Unicorn: callback on fetch from NX
|
|
|
|
if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable
|
|
|
|
if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
@ -397,22 +421,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Unicorn: callback on invalid memory
|
|
|
|
|
|
|
|
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && env->uc->hook_mem_idx && mr == NULL) {
|
|
|
|
|
|
|
|
if (!((uc_cb_eventmem_t)env->uc->hook_callbacks[env->uc->hook_mem_idx].callback)(
|
|
|
|
|
|
|
|
env->uc, UC_MEM_READ, addr, DATA_SIZE, 0,
|
|
|
|
|
|
|
|
env->uc->hook_callbacks[env->uc->hook_mem_idx].user_data)) {
|
|
|
|
|
|
|
|
// save error & quit
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_MEM_READ;
|
|
|
|
|
|
|
|
// printf("***** Invalid memory read at " TARGET_FMT_lx "\n", addr);
|
|
|
|
|
|
|
|
cpu_exit(env->uc->current_cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_OK;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Unicorn: callback on non-readable memory
|
|
|
|
// Unicorn: callback on non-readable memory
|
|
|
|
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
|
|
|
|
if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
|
if (uc->hook_mem_idx != 0 && ((uc_cb_eventmem_t)uc->hook_callbacks[uc->hook_mem_idx].callback)(
|
|
|
@ -436,8 +444,16 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
|
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
// mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_READ_UNALIGNED;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
|
|
|
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
|
|
@ -479,8 +495,16 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
unsigned shift;
|
|
|
|
unsigned shift;
|
|
|
|
do_unaligned_access:
|
|
|
|
do_unaligned_access:
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
// mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_READ_UNALIGNED;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
addr1 = addr & ~(DATA_SIZE - 1);
|
|
|
|
addr1 = addr & ~(DATA_SIZE - 1);
|
|
|
|
addr2 = addr1 + DATA_SIZE;
|
|
|
|
addr2 = addr1 + DATA_SIZE;
|
|
|
@ -498,8 +522,16 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
|
|
|
|
/* Handle aligned access or unaligned access in the same page. */
|
|
|
|
/* Handle aligned access or unaligned access in the same page. */
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
// mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
#if defined(SOFTMMU_CODE_ACCESS)
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_FETCH_UNALIGNED;
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_READ_UNALIGNED;
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
@ -615,8 +647,12 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|
|
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
|
|
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
//cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
// mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
if (!VICTIM_TLB_HIT(addr_write)) {
|
|
|
|
if (!VICTIM_TLB_HIT(addr_write)) {
|
|
|
@ -656,6 +692,10 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
/* XXX: not efficient, but simple */
|
|
|
|
/* XXX: not efficient, but simple */
|
|
|
|
/* Note: relies on the fact that tlb_fill() does not remove the
|
|
|
|
/* Note: relies on the fact that tlb_fill() does not remove the
|
|
|
@ -678,6 +718,10 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
@ -751,6 +795,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
if (!VICTIM_TLB_HIT(addr_write)) {
|
|
|
|
if (!VICTIM_TLB_HIT(addr_write)) {
|
|
|
@ -790,6 +838,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
#ifdef ALIGNED_ONLY
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
/* XXX: not efficient, but simple */
|
|
|
|
/* XXX: not efficient, but simple */
|
|
|
|
/* Note: relies on the fact that tlb_fill() does not remove the
|
|
|
|
/* Note: relies on the fact that tlb_fill() does not remove the
|
|
|
@ -812,6 +864,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
if ((addr & (DATA_SIZE - 1)) != 0) {
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
mmu_idx, retaddr);
|
|
|
|
|
|
|
|
env->invalid_addr = addr;
|
|
|
|
|
|
|
|
env->invalid_error = UC_ERR_WRITE_UNALIGNED;
|
|
|
|
|
|
|
|
cpu_exit(uc->current_cpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|