import Unicorn2

This commit is contained in:
Nguyen Anh Quynh
2021-10-03 22:14:44 +08:00
parent 772558119a
commit aaaea14214
837 changed files with 368717 additions and 200912 deletions

View File

@ -1,35 +0,0 @@
/*
* Internal memory management interfaces
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Avi Kivity <avi@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#ifndef EXEC_MEMORY_H
#define EXEC_MEMORY_H
/*
* Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless
* you're one of them.
*/
#include "exec/memory.h"
#ifndef CONFIG_USER_ONLY
/* Get the root memory region. This interface should only be used temporarily
* until a proper bus interface is available.
*/
MemoryRegion *get_system_memory(struct uc_struct *uc);
extern AddressSpace address_space_memory;
#endif
#endif

View File

@ -19,22 +19,29 @@
#ifndef CPU_ALL_H
#define CPU_ALL_H
#include "qemu-common.h"
#include "exec/cpu-common.h"
#include "exec/memory.h"
#include "qemu/thread.h"
#include "qom/cpu.h"
#include "hw/core/cpu.h"
#include <uc_priv.h>
#if 0
#include "qemu/rcu.h"
#endif
#define EXCP_INTERRUPT 0x10000 /* async interruption */
#define EXCP_HLT 0x10001 /* hlt instruction reached */
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
/* some important defines:
*
* WORDS_ALIGNED : if defined, the host cpu can only make word aligned
* memory accesses.
*
* HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
* otherwise little endian.
*
* (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
*
* TARGET_WORDS_BIGENDIAN : same for target cpu
*/
@ -115,43 +122,9 @@ static inline void tswap64s(uint64_t *s)
#define bswaptls(s) bswap64s(s)
#endif
/* CPU memory access without any memory or io remapping */
/*
* the generic syntax for the memory accesses is:
*
* load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
*
* store: st{type}{size}{endian}_{access_type}(ptr, val)
*
* type is:
* (empty): integer access
* f : float access
*
* sign is:
* (empty): for floats or 32 bit size
* u : unsigned
* s : signed
*
* size is:
* b: 8 bits
* w: 16 bits
* l: 32 bits
* q: 64 bits
*
* endian is:
* (empty): target cpu endianness or 8 bit access
* r : reversed target cpu endianness (not implemented yet)
* be : big endian (not implemented yet)
* le : little endian (not implemented yet)
*
* access_type is:
* raw : host memory access
* user : user mode access using soft MMU
* kernel : kernel mode access using soft MMU
/* Target-endianness CPU memory access functions. These fit into the
* {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
*/
/* target-endianness CPU memory access functions */
#if defined(TARGET_WORDS_BIGENDIAN)
#define lduw_p(p) lduw_be_p(p)
#define ldsw_p(p) ldsw_be_p(p)
@ -164,6 +137,8 @@ static inline void tswap64s(uint64_t *s)
#define stq_p(p, v) stq_be_p(p, v)
#define stfl_p(p, v) stfl_be_p(p, v)
#define stfq_p(p, v) stfq_be_p(p, v)
#define ldn_p(p, sz) ldn_be_p(p, sz)
#define stn_p(p, sz, v) stn_be_p(p, sz, v)
#else
#define lduw_p(p) lduw_le_p(p)
#define ldsw_p(p) ldsw_le_p(p)
@ -176,39 +151,102 @@ static inline void tswap64s(uint64_t *s)
#define stq_p(p, v) stq_le_p(p, v)
#define stfl_p(p, v) stfl_le_p(p, v)
#define stfq_p(p, v) stfq_le_p(p, v)
#define ldn_p(p, sz) ldn_le_p(p, sz)
#define stn_p(p, sz, v) stn_le_p(p, sz, v)
#endif
/* MMU memory access macros */
#include "exec/hwaddr.h"
#if defined(CONFIG_USER_ONLY)
#include <assert.h>
#include "exec/user/abitypes.h"
/* On some host systems the guest address space is reserved on the host.
* This allows the guest address space to be offset to a convenient location.
*/
#if defined(CONFIG_USE_GUEST_BASE)
extern unsigned long guest_base;
extern int have_guest_base;
extern unsigned long reserved_va;
#define GUEST_BASE guest_base
#define RESERVED_VA reserved_va
#ifdef UNICORN_ARCH_POSTFIX
#define SUFFIX UNICORN_ARCH_POSTFIX
#else
#define GUEST_BASE 0ul
#define RESERVED_VA 0ul
#define SUFFIX
#endif
#define ARG1 as
#define ARG1_DECL AddressSpace *as
#define TARGET_ENDIANNESS
#include "exec/memory_ldst.inc.h"
#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \
(1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
#ifdef UNICORN_ARCH_POSTFIX
#define SUFFIX glue(_cached_slow, UNICORN_ARCH_POSTFIX)
#else
#define SUFFIX _cached_slow
#endif
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
#define TARGET_ENDIANNESS
#include "exec/memory_ldst.inc.h"
static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
{
#ifdef UNICORN_ARCH_POSTFIX
glue(address_space_stl_notdirty, UNICORN_ARCH_POSTFIX)
(as->uc, as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
#else
address_space_stl_notdirty(as->uc, as, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
#endif
}
#ifdef UNICORN_ARCH_POSTFIX
#define SUFFIX UNICORN_ARCH_POSTFIX
#else
#define SUFFIX
#endif
#define ARG1 as
#define ARG1_DECL AddressSpace *as
#define TARGET_ENDIANNESS
#include "exec/memory_ldst_phys.inc.h"
/* Inline fast path for direct RAM access. */
#define ENDIANNESS
#include "exec/memory_ldst_cached.inc.h"
#ifdef UNICORN_ARCH_POSTFIX
#define SUFFIX glue(_cached, UNICORN_ARCH_POSTFIX)
#else
#define SUFFIX _cached
#endif
#define ARG1 cache
#define ARG1_DECL MemoryRegionCache *cache
#define TARGET_ENDIANNESS
#include "exec/memory_ldst_phys.inc.h"
/* page related stuff */
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
#ifdef TARGET_PAGE_BITS_VARY
typedef struct TargetPageBits {
bool decided;
int bits;
target_long mask;
} TargetPageBits;
#if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
extern const TargetPageBits target_page;
#else
extern TargetPageBits target_page;
#endif
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
#ifdef CONFIG_DEBUG_TCG
#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
#define TARGET_PAGE_MASK ({ assert(target_page.decided); target_page.mask; })
#else
#define TARGET_PAGE_BITS uc->init_target_page->bits
#define TARGET_PAGE_MASK uc->init_target_page->mask
#endif
#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) // qq
#else
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_MASK ((target_ulong)-1 << TARGET_PAGE_BITS)
#endif
#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
#define HOST_PAGE_ALIGN(uc, addr) ROUND_UP((addr), uc->qemu_host_page_size)
#if 0
#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), uc->qemu_real_host_page_size)
#endif
/* same as PROT_xxx */
#define PAGE_READ 0x0001
@ -219,16 +257,9 @@ extern unsigned long reserved_va;
/* original state of the write flag (used when tracking self-modifying
code */
#define PAGE_WRITE_ORG 0x0010
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
/* FIXME: Code that sets/uses this is broken and needs to go away. */
#define PAGE_RESERVED 0x0020
#endif
#if defined(CONFIG_USER_ONLY)
//void page_dump(FILE *f);
int page_get_flags(target_ulong address);
#endif
/* Invalidate the TLB entry immediately, helpful for s390x
* Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
#define PAGE_WRITE_INV 0x0040
CPUArchState *cpu_copy(CPUArchState *env);
@ -284,26 +315,131 @@ CPUArchState *cpu_copy(CPUArchState *env);
| CPU_INTERRUPT_TGT_EXT_3 \
| CPU_INTERRUPT_TGT_EXT_4)
#if !defined(CONFIG_USER_ONLY)
/* memory API */
/* Flags stored in the low bits of the TLB virtual address. These are
defined so that fast path ram access is all zeros. */
/*
* Flags stored in the low bits of the TLB virtual address.
* These are defined so that fast path ram access is all zeros.
* The flags all must be between TARGET_PAGE_BITS and
* maximum address alignment bit.
*
* Use TARGET_PAGE_BITS_MIN so that these bits are constant
* when TARGET_PAGE_BITS_VARY is in effect.
*/
/* Zero if TLB entry is valid. */
#define TLB_INVALID_MASK (1 << 3)
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
/* Set if TLB entry references a clean RAM page. The iotlb entry will
contain the page physical address. */
#define TLB_NOTDIRTY (1 << 4)
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << 5)
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
/* Set if TLB entry contains a watchpoint. */
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
/* Set if TLB entry requires byte swap. */
#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
/* Set if TLB entry writes ignored. */
#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
ram_addr_t last_ram_offset(struct uc_struct *uc);
void qemu_mutex_lock_ramlist(struct uc_struct *uc);
void qemu_mutex_unlock_ramlist(struct uc_struct *uc);
#endif /* !CONFIG_USER_ONLY */
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
| TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
* TLB entry @tlb_addr
*
* @addr: virtual address to test (must be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
static inline bool tlb_hit_page(struct uc_struct *uc, target_ulong tlb_addr, target_ulong addr)
{
return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
}
/**
* tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
*
* @addr: virtual address to test (need not be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
static inline bool tlb_hit(struct uc_struct *uc, target_ulong tlb_addr, target_ulong addr)
{
return tlb_hit_page(uc, tlb_addr, addr & TARGET_PAGE_MASK);
}
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
uint8_t *buf, int len, int is_write);
void *ptr, target_ulong len, bool is_write);
int cpu_exec(struct uc_struct *uc, CPUState *cpu);
/**
* cpu_set_cpustate_pointers(cpu)
* @cpu: The cpu object
*
* Set the generic pointers in CPUState into the outer object.
*/
static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
{
cpu->parent_obj.env_ptr = &cpu->env;
cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
}
/**
* env_archcpu(env)
* @env: The architecture environment
*
* Return the ArchCPU associated with the environment.
*/
static inline ArchCPU *env_archcpu(CPUArchState *env)
{
return container_of(env, ArchCPU, env);
}
/**
* env_cpu(env)
* @env: The architecture environment
*
* Return the CPUState associated with the environment.
*/
static inline CPUState *env_cpu(CPUArchState *env)
{
return &env_archcpu(env)->parent_obj;
}
/**
* env_neg(env)
* @env: The architecture environment
*
* Return the CPUNegativeOffsetState associated with the environment.
*/
static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
{
ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
return &arch_cpu->neg;
}
/**
* cpu_neg(cpu)
* @cpu: The generic CPUState
*
* Return the CPUNegativeOffsetState associated with the cpu.
*/
static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
{
ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
return &arch_cpu->neg;
}
/**
* env_tlb(env)
* @env: The architecture environment
*
* Return the CPUTLB state associated with the environment.
*/
static inline CPUTLB *env_tlb(CPUArchState *env)
{
return &env_neg(env)->tlb;
}
#endif /* CPU_ALL_H */

View File

@ -1,24 +1,16 @@
#ifndef CPU_COMMON_H
#define CPU_COMMON_H 1
#define CPU_COMMON_H
/* CPU interfaces that are target independent. */
struct uc_struct;
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
#endif
#include "qemu/bswap.h"
#include "qemu/queue.h"
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
void qemu_init_cpu_list(void);
void cpu_list_lock(void);
void cpu_list_unlock(void);
typedef enum MMUAccessType {
MMU_DATA_LOAD = 0,
MMU_DATA_STORE = 1,
MMU_INST_FETCH = 2
} MMUAccessType;
#if !defined(CONFIG_USER_ONLY)
void tcg_flush_softmmu_tlb(struct uc_struct *uc);
enum device_endian {
DEVICE_NATIVE_ENDIAN,
@ -26,95 +18,57 @@ enum device_endian {
DEVICE_LITTLE_ENDIAN,
};
/* address in the RAM (different from a physical address) */
#if defined(CONFIG_XEN_BACKEND)
typedef uint64_t ram_addr_t;
# define RAM_ADDR_MAX UINT64_MAX
# define RAM_ADDR_FMT "%" PRIx64
#if defined(HOST_WORDS_BIGENDIAN)
#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
#else
#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
#endif
/* address in the RAM (different from a physical address) */
typedef uintptr_t ram_addr_t;
# define RAM_ADDR_MAX UINTPTR_MAX
# define RAM_ADDR_FMT "%" PRIxPTR
#endif
extern ram_addr_t ram_size;
/* memory API */
typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
void qemu_ram_remap(struct uc_struct *uc, ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
MemoryRegion *qemu_ram_addr_from_host(struct uc_struct* uc, void *ptr, ram_addr_t *ram_addr);
void qemu_ram_set_idstr(struct uc_struct *uc, ram_addr_t addr, const char *name, DeviceState *dev);
void qemu_ram_unset_idstr(struct uc_struct *uc, ram_addr_t addr);
ram_addr_t qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr);
RAMBlock *qemu_ram_block_from_host(struct uc_struct *uc, void *ptr,
bool round_offset, ram_addr_t *offset);
ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host);
void *qemu_ram_get_host_addr(RAMBlock *rb);
ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
bool qemu_ram_is_shared(RAMBlock *rb);
bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
int len, int is_write);
size_t qemu_ram_pagesize(RAMBlock *block);
size_t qemu_ram_pagesize_largest(void);
bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, void *buf,
hwaddr len, bool is_write);
static inline void cpu_physical_memory_read(AddressSpace *as, hwaddr addr,
void *buf, int len)
void *buf, hwaddr len)
{
cpu_physical_memory_rw(as, addr, buf, len, 0);
cpu_physical_memory_rw(as, addr, buf, len, false);
}
static inline void cpu_physical_memory_write(AddressSpace *as, hwaddr addr,
const void *buf, int len)
const void *buf, hwaddr len)
{
cpu_physical_memory_rw(as, addr, (void *)buf, len, 1);
cpu_physical_memory_rw(as, addr, (void *)buf, len, true);
}
void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr,
hwaddr *plen,
int is_write);
bool is_write);
void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len,
int is_write, hwaddr access_len);
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
bool is_write, hwaddr access_len);
bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr);
/* Coalesced MMIO regions are areas where write operations can be reordered.
* This usually implies that write operations are side-effect free. This allows
* batching which can make a major impact on performance when using
* virtualization.
*/
void qemu_flush_coalesced_mmio_buffer(void);
void cpu_flush_icache_range(AddressSpace *as, hwaddr start, hwaddr len);
uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
int ram_block_discard_range(struct uc_struct *uc, RAMBlock *rb, uint64_t start, size_t length);
#ifdef NEED_CPU_H
uint32_t lduw_phys(AddressSpace *as, hwaddr addr);
uint32_t ldl_phys(AddressSpace *as, hwaddr addr);
uint64_t ldq_phys(AddressSpace *as, hwaddr addr);
void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val);
void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val);
void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val);
#endif
void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
const uint8_t *buf, int len);
void cpu_flush_icache_range(AddressSpace *as, hwaddr start, int len);
extern struct MemoryRegion io_mem_rom;
extern struct MemoryRegion io_mem_notdirty;
typedef void (RAMBlockIterFunc)(void *host_addr,
ram_addr_t offset, ram_addr_t length, void *opaque);
void qemu_ram_foreach_block(struct uc_struct *uc, RAMBlockIterFunc func, void *opaque);
#endif
#endif /* !CPU_COMMON_H */
#endif /* CPU_COMMON_H */

View File

@ -23,16 +23,35 @@
#error cpu.h included from common code
#endif
#include "config.h"
#include "unicorn/platform.h"
#include "qemu/osdep.h"
#include "qemu/queue.h"
#ifndef CONFIG_USER_ONLY
#include "qemu/host-utils.h"
#include "qemu/thread.h"
#include "tcg-target.h"
#include "exec/hwaddr.h"
#endif
#include "exec/memattrs.h"
#include "hw/core/cpu.h"
#include "cpu-param.h"
#ifndef TARGET_LONG_BITS
#error TARGET_LONG_BITS must be defined before including this header
# error TARGET_LONG_BITS must be defined in cpu-param.h
#endif
#ifndef NB_MMU_MODES
# error NB_MMU_MODES must be defined in cpu-param.h
#endif
#ifndef TARGET_PHYS_ADDR_SPACE_BITS
# error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
#ifndef TARGET_VIRT_ADDR_SPACE_BITS
# error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h
#endif
#ifndef TARGET_PAGE_BITS
# ifdef TARGET_PAGE_BITS_VARY
# ifndef TARGET_PAGE_BITS_MIN
# error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h
# endif
# else
# error TARGET_PAGE_BITS must be defined in cpu-param.h
# endif
#endif
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
@ -54,23 +73,6 @@ typedef uint64_t target_ulong;
#error TARGET_LONG_SIZE undefined
#endif
#define EXCP_INTERRUPT 0x10000 /* async interruption */
#define EXCP_HLT 0x10001 /* hlt instruction reached */
#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
#if !defined(CONFIG_USER_ONLY)
#define CPU_TLB_BITS 8
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
/* use a fully associative victim tlb of 8 entries */
#define CPU_VTLB_SIZE 8
@ -80,6 +82,24 @@ typedef uint64_t target_ulong;
#define CPU_TLB_ENTRY_BITS 5
#endif
#define CPU_TLB_DYN_MIN_BITS 6
#define CPU_TLB_DYN_DEFAULT_BITS 8
# if HOST_LONG_BITS == 32
/* Make sure we do not require a double-word shift for the TLB load */
# define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS)
# else /* HOST_LONG_BITS == 64 */
/*
* Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) ==
* 2**34 == 16G of address space. This is roughly what one would expect a
* TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel
* Skylake's Level-2 STLB has 16 1G entries.
* Also, make sure we do not size the TLB past the guest's address space.
*/
# define CPU_TLB_DYN_MAX_BITS \
MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS)
# endif
typedef struct CPUTLBEntry {
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
@ -87,65 +107,123 @@ typedef struct CPUTLBEntry {
bit 3 : indicates that the entry is invalid
bit 2..0 : zero
*/
target_ulong addr_read;
target_ulong addr_write;
target_ulong addr_code;
/* Addend to virtual address to get host address. IO accesses
use the corresponding iotlb value. */
uintptr_t addend;
/* padding to get a power of two size */
#ifdef _MSC_VER
# define TARGET_ULONG_SIZE (TARGET_LONG_BITS/8)
# ifdef _WIN64
# define UINTPTR_SIZE 8
# else
# define UINTPTR_SIZE 4
# endif
#define DUMMY_SIZE (1 << CPU_TLB_ENTRY_BITS) - \
(TARGET_ULONG_SIZE * 3 + \
((-TARGET_ULONG_SIZE * 3) & (UINTPTR_SIZE - 1)) + \
UINTPTR_SIZE)
#if DUMMY_SIZE > 0
uint8_t dummy[DUMMY_SIZE];
#endif
#else // _MSC_VER
uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
(sizeof(target_ulong) * 3 +
((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
sizeof(uintptr_t))];
#endif // _MSC_VER
union {
struct {
target_ulong addr_read;
target_ulong addr_write;
target_ulong addr_code;
/* Addend to virtual address to get host address. IO accesses
use the corresponding iotlb value. */
uintptr_t addend;
};
/* padding to get a power of two size */
uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
};
} CPUTLBEntry;
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
#define CPU_COMMON_TLB \
/* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
target_ulong tlb_flush_addr; \
target_ulong tlb_flush_mask; \
target_ulong vtlb_index; \
/* The IOTLB is not accessed directly inline by generated TCG code,
* so the CPUIOTLBEntry layout is not as critical as that of the
* CPUTLBEntry. (This is also why we don't want to combine the two
* structs into one.)
*/
typedef struct CPUIOTLBEntry {
/*
* @addr contains:
* - in the lower TARGET_PAGE_BITS, a physical section number
* - with the lower TARGET_PAGE_BITS masked off, an offset which
* must be added to the virtual address to obtain:
* + the ram_addr_t of the target RAM (if the physical section
* number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
* + the offset within the target MemoryRegion (otherwise)
*/
hwaddr addr;
MemTxAttrs attrs;
} CPUIOTLBEntry;
#else
/*
* Data elements that are per MMU mode, minus the bits accessed by
* the TCG fast path.
*/
typedef struct CPUTLBDesc {
/*
* Describe a region covering all of the large pages allocated
* into the tlb. When any page within this region is flushed,
* we must flush the entire tlb. The region is matched if
* (addr & large_page_mask) == large_page_addr.
*/
target_ulong large_page_addr;
target_ulong large_page_mask;
/* host time (in ns) at the beginning of the time window */
int64_t window_begin_ns;
/* maximum number of entries observed in the window */
size_t window_max_entries;
size_t n_used_entries;
/* The next index to use in the tlb victim table. */
size_t vindex;
/* The tlb victim table, in two parts. */
CPUTLBEntry vtable[CPU_VTLB_SIZE];
CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
/* The iotlb. */
CPUIOTLBEntry *iotlb;
} CPUTLBDesc;
#define CPU_COMMON_TLB
/*
* Data elements that are per MMU mode, accessed by the fast path.
* The structure is aligned to aid loading the pair with one insn.
*/
typedef struct CPUTLBDescFast {
/* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
uintptr_t mask;
/* The array of tlb entries itself. */
CPUTLBEntry *table;
} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
/*
* Data elements that are shared between all MMU modes.
*/
typedef struct CPUTLBCommon {
/*
* Within dirty, for each bit N, modifications have been made to
* mmu_idx N since the last time that mmu_idx was flushed.
* Protected by tlb_c.lock.
*/
uint16_t dirty;
/*
* Statistics. These are not lock protected, but are read and
* written atomically. This allows the monitor to print a snapshot
* of the stats without interfering with the cpu.
*/
size_t full_flush_count;
size_t part_flush_count;
size_t elide_flush_count;
} CPUTLBCommon;
/*
* The entire softmmu tlb, for all MMU modes.
* The meaning of each of the MMU modes is defined in the target code.
* Since this is placed within CPUNegativeOffsetState, the smallest
* negative offsets are at the end of the struct.
*/
typedef struct CPUTLB {
CPUTLBCommon c;
CPUTLBDesc d[NB_MMU_MODES];
CPUTLBDescFast f[NB_MMU_MODES];
} CPUTLB;
/* This will be used by TCG backends to compute offsets. */
#define TLB_MASK_TABLE_OFS(IDX) \
((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env))
/*
* This structure must be placed in ArchCPU immediately
* before CPUArchState, as a field named "neg".
*/
typedef struct CPUNegativeOffsetState {
CPUTLB tlb;
IcountDecr icount_decr;
} CPUNegativeOffsetState;
#endif
#define CPU_TEMP_BUF_NLONGS 128
// Unicorn engine
// @invalid_addr: invalid memory access address
// @invalid_error: error code for memory access (1 = READ, 2 = WRITE)
#define CPU_COMMON \
/* soft mmu support */ \
CPU_COMMON_TLB \
uint64_t invalid_addr; \
int invalid_error;
#endif

View File

@ -23,325 +23,134 @@
*
* Used by target op helpers.
*
* MMU mode suffixes are defined in target cpu.h.
* The syntax for the accessors is:
*
* load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
* cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)
* cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
*
* store: cpu_st{size}_{mmusuffix}(env, ptr, val)
* cpu_st{size}_{mmusuffix}_ra(env, ptr, val, retaddr)
* cpu_st{size}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
*
* sign is:
* (empty): for 32 and 64 bit sizes
* u : unsigned
* s : signed
*
* size is:
* b: 8 bits
* w: 16 bits
* l: 32 bits
* q: 64 bits
*
* mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
* The "mmuidx" suffix carries an extra mmu_idx argument that specifies
* the index to use; the "data" and "code" suffixes take the index from
* cpu_mmu_index().
*/
#ifndef CPU_LDST_H
#define CPU_LDST_H
#if defined(CONFIG_USER_ONLY)
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
#include "cpu-defs.h"
#include "cpu.h"
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
#define h2g_valid(x) 1
#else
#define h2g_valid(x) ({ \
unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
(__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
(!RESERVED_VA || (__guest < RESERVED_VA)); \
})
typedef target_ulong abi_ptr;
#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr);
uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr);
void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr);
void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr);
void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t retaddr);
/* Needed for TCG_OVERSIZED_GUEST */
#include "tcg/tcg.h"
static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
{
return entry->addr_write;
}
/* Find the TLB index corresponding to the mmu_idx + address pair. */
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr)
{
#ifdef TARGET_ARM
struct uc_struct *uc = env->uc;
#endif
uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
#define h2g_nocheck(x) ({ \
unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
(abi_ulong)__ret; \
})
return (addr >> TARGET_PAGE_BITS) & size_mask;
}
#define h2g(x) ({ \
/* Check if given address fits target address space */ \
assert(h2g_valid(x)); \
h2g_nocheck(x); \
})
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
target_ulong addr)
{
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
}
#define saddr(x) g2h(x)
#define laddr(x) g2h(x)
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
#else /* !CONFIG_USER_ONLY */
/* NOTE: we use double casts if pointers and target_ulong have
different sizes */
#define saddr(x) (uint8_t *)(intptr_t)(x)
#define laddr(x) (uint8_t *)(intptr_t)(x)
#endif
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
#define ldub_raw(p) ldub_p(laddr((p)))
#define ldsb_raw(p) ldsb_p(laddr((p)))
#define lduw_raw(p) lduw_p(laddr((p)))
#define ldsw_raw(p) ldsw_p(laddr((p)))
#define ldl_raw(p) ldl_p(laddr((p)))
#define ldq_raw(p) ldq_p(laddr((p)))
#define ldfl_raw(p) ldfl_p(laddr((p)))
#define ldfq_raw(p) ldfq_p(laddr((p)))
#define stb_raw(p, v) stb_p(saddr((p)), v)
#define stw_raw(p, v) stw_p(saddr((p)), v)
#define stl_raw(p, v) stl_p(saddr((p)), v)
#define stq_raw(p, v) stq_p(saddr((p)), v)
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
#if defined(CONFIG_USER_ONLY)
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
/* if user mode, no other memory access functions */
#define ldub(p) ldub_raw(p)
#define ldsb(p) ldsb_raw(p)
#define lduw(p) lduw_raw(p)
#define ldsw(p) ldsw_raw(p)
#define ldl(p) ldl_raw(p)
#define ldq(p) ldq_raw(p)
#define ldfl(p) ldfl_raw(p)
#define ldfq(p) ldfq_raw(p)
#define stb(p, v) stb_raw(p, v)
#define stw(p, v) stw_raw(p, v)
#define stl(p, v) stl_raw(p, v)
#define stq(p, v) stq_raw(p, v)
#define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v)
static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
{
return (int8_t)cpu_ldub_code(env, addr);
}
#define cpu_ldub_code(env1, p) ldub_raw(p)
#define cpu_ldsb_code(env1, p) ldsb_raw(p)
#define cpu_lduw_code(env1, p) lduw_raw(p)
#define cpu_ldsw_code(env1, p) ldsw_raw(p)
#define cpu_ldl_code(env1, p) ldl_raw(p)
#define cpu_ldq_code(env1, p) ldq_raw(p)
#define cpu_ldub_data(env, addr) ldub_raw(addr)
#define cpu_lduw_data(env, addr) lduw_raw(addr)
#define cpu_ldsw_data(env, addr) ldsw_raw(addr)
#define cpu_ldl_data(env, addr) ldl_raw(addr)
#define cpu_ldq_data(env, addr) ldq_raw(addr)
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
#define cpu_stq_data(env, addr, data) stq_raw(addr, data)
#define cpu_ldub_kernel(env, addr) ldub_raw(addr)
#define cpu_lduw_kernel(env, addr) lduw_raw(addr)
#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
#define cpu_ldl_kernel(env, addr) ldl_raw(addr)
#define cpu_ldq_kernel(env, addr) ldq_raw(addr)
#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define lduw_kernel(p) lduw_raw(p)
#define ldsw_kernel(p) ldsw_raw(p)
#define ldl_kernel(p) ldl_raw(p)
#define ldq_kernel(p) ldq_raw(p)
#define ldfl_kernel(p) ldfl_raw(p)
#define ldfq_kernel(p) ldfq_raw(p)
#define stb_kernel(p, v) stb_raw(p, v)
#define stw_kernel(p, v) stw_raw(p, v)
#define stl_kernel(p, v) stl_raw(p, v)
#define stq_kernel(p, v) stq_raw(p, v)
#define stfl_kernel(p, v) stfl_raw(p, v)
#define stfq_kernel(p, vt) stfq_raw(p, v)
#define cpu_ldub_data(env, addr) ldub_raw(addr)
#define cpu_lduw_data(env, addr) lduw_raw(addr)
#define cpu_ldl_data(env, addr) ldl_raw(addr)
#define cpu_stb_data(env, addr, data) stb_raw(addr, data)
#define cpu_stw_data(env, addr, data) stw_raw(addr, data)
#define cpu_stl_data(env, addr, data) stl_raw(addr, data)
#else
/* XXX: find something cleaner.
* Furthermore, this is false for 64 bits targets
*/
#define ldul_user ldl_user
#define ldul_kernel ldl_kernel
#define ldul_hypv ldl_hypv
#define ldul_executive ldl_executive
#define ldul_supervisor ldl_supervisor
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
#include "tcg.h"
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stb_mmu(CPUArchState *env, target_ulong addr,
uint8_t val, int mmu_idx);
void helper_stw_mmu(CPUArchState *env, target_ulong addr,
uint16_t val, int mmu_idx);
void helper_stl_mmu(CPUArchState *env, target_ulong addr,
uint32_t val, int mmu_idx);
void helper_stq_mmu(CPUArchState *env, target_ulong addr,
uint64_t val, int mmu_idx);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#define CPU_MMU_INDEX 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#define CPU_MMU_INDEX 1
#define MEMSUFFIX MMU_MODE1_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#if (NB_MMU_MODES >= 3)
#define CPU_MMU_INDEX 2
#define MEMSUFFIX MMU_MODE2_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 3) */
#if (NB_MMU_MODES >= 4)
#define CPU_MMU_INDEX 3
#define MEMSUFFIX MMU_MODE3_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 4) */
#if (NB_MMU_MODES >= 5)
#define CPU_MMU_INDEX 4
#define MEMSUFFIX MMU_MODE4_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 5) */
#if (NB_MMU_MODES >= 6)
#define CPU_MMU_INDEX 5
#define MEMSUFFIX MMU_MODE5_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 6) */
#if (NB_MMU_MODES > 6)
#error "NB_MMU_MODES > 6 is not supported for now"
#endif /* (NB_MMU_MODES > 6) */
/* these access are slower, they must be as rare as possible */
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#define ldub(p) ldub_data(p)
#define ldsb(p) ldsb_data(p)
#define lduw(p) lduw_data(p)
#define ldsw(p) ldsw_data(p)
#define ldl(p) ldl_data(p)
#define ldq(p) ldq_data(p)
#define stb(p, v) stb_data(p, v)
#define stw(p, v) stw_data(p, v)
#define stl(p, v) stl_data(p, v)
#define stq(p, v) stq_data(p, v)
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define MEMSUFFIX _code
#define SOFTMMU_CODE_ACCESS
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#undef SOFTMMU_CODE_ACCESS
static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
{
return (int16_t)cpu_lduw_code(env, addr);
}
/**
* tlb_vaddr_to_host:
@ -351,50 +160,12 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
* If the TLB contains a host virtual address suitable for direct RAM
* access, then return it. Otherwise (TLB miss, TLB entry is for an
* I/O access, etc) return NULL.
*
* This is the equivalent of the initial fast-path code used by
* TCG backends for guest load and store accesses.
* If we can translate a host virtual address suitable for direct RAM
* access, without causing a guest exception, then return it.
* Otherwise (TLB entry is for an I/O access, guest software
* TLB fill required, etc) return NULL.
*/
static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
int access_type, int mmu_idx)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
target_ulong tlb_addr;
uintptr_t haddr;
switch (access_type) {
case 0:
tlb_addr = tlbentry->addr_read;
break;
case 1:
tlb_addr = tlbentry->addr_write;
break;
case 2:
tlb_addr = tlbentry->addr_code;
break;
default:
g_assert_not_reached();
}
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */
return NULL;
}
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
return NULL;
}
haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend);
return (void *)haddr;
}
#endif /* defined(CONFIG_USER_ONLY) */
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx);
#endif /* CPU_LDST_H */

View File

@ -1,193 +0,0 @@
/*
* Software MMU support
*
* Generate inline load/store functions for one MMU mode and data
* size.
*
* Generate a store function as well as signed and unsigned loads. For
* 32 and 64 bit cases, also generate floating point functions with
* the same size.
*
* Not used directly but included from cpu_ldst.h.
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#if DATA_SIZE == 8
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
#elif DATA_SIZE == 4
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
#elif DATA_SIZE == 2
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define DATA_STYPE int16_t
#elif DATA_SIZE == 1
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define DATA_STYPE int8_t
#else
#error unsupported data size
#endif
#if DATA_SIZE == 8
#define RES_TYPE uint64_t
#else
#define RES_TYPE uint32_t
#endif
#ifdef SOFTMMU_CODE_ACCESS
#define ADDR_READ addr_code
#define MMUSUFFIX _cmmu
#else
#define ADDR_READ addr_read
#define MMUSUFFIX _mmu
#endif
/* generic load/store macros */
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
int page_index;
RES_TYPE res;
target_ulong addr;
int mmu_idx;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
} else {
uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend);
res = glue(glue(ld, USUFFIX), _raw)(hostaddr);
}
return res;
}
#if DATA_SIZE <= 2
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
int res, page_index;
target_ulong addr;
int mmu_idx;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
MMUSUFFIX)(env, addr, mmu_idx);
} else {
uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend);
res = glue(glue(lds, SUFFIX), _raw)(hostaddr);
}
return res;
}
#endif
#ifndef SOFTMMU_CODE_ACCESS
/* generic store macro */
static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
RES_TYPE v)
{
int page_index;
target_ulong addr;
int mmu_idx;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
} else {
uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend);
glue(glue(st, SUFFIX), _raw)(hostaddr, v);
}
}
#if DATA_SIZE == 8
static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env,
target_ulong ptr)
{
union {
float64 d;
uint64_t i;
} u;
u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr);
return u.d;
}
static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env,
target_ulong ptr, float64 v)
{
union {
float64 d;
uint64_t i;
} u;
u.d = v;
glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i);
}
#endif /* DATA_SIZE == 8 */
#if DATA_SIZE == 4
static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env,
target_ulong ptr)
{
union {
float32 f;
uint32_t i;
} u;
u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr);
return u.f;
}
static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env,
target_ulong ptr, float32 v)
{
union {
float32 f;
uint32_t i;
} u;
u.f = v;
glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i);
}
#endif /* DATA_SIZE == 4 */
#endif /* !SOFTMMU_CODE_ACCESS */
#undef RES_TYPE
#undef DATA_TYPE
#undef DATA_STYPE
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
#undef MMUSUFFIX
#undef ADDR_READ

View File

@ -16,33 +16,13 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef CPUTLB_H
#define CPUTLB_H
#if !defined(CONFIG_USER_ONLY)
#include "exec/cpu-common.h"
/* cputlb.c */
void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr);
void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
target_ulong vaddr);
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
uintptr_t start, uintptr_t length);
void cpu_tlb_reset_dirty_all(struct uc_struct *uc, ram_addr_t start1, ram_addr_t length);
void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
//extern int tlb_flush_count;
/* exec.c */
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
MemoryRegionSection *
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
hwaddr *plen);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section,
target_ulong vaddr,
hwaddr paddr, hwaddr xlat,
int prot,
target_ulong *address);
bool memory_region_is_unassigned(struct uc_struct* uc, MemoryRegion *mr);
#endif
void tlb_unprotect_code(struct uc_struct *uc, ram_addr_t ram_addr);
#endif

View File

@ -17,10 +17,13 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _EXEC_ALL_H_
#define _EXEC_ALL_H_
#ifndef EXEC_ALL_H
#define EXEC_ALL_H
#include "qemu-common.h"
#include "hw/core/cpu.h"
#include "exec/tb-context.h"
#include "exec/cpu_ldst.h"
#include "sysemu/cpus.h"
/* allow to see translation results - the slowdown should be negligible, so we leave it */
#define DEBUG_DISAS
@ -28,289 +31,372 @@
/* Page tracking code uses ram addresses in system mode, and virtual
addresses in userspace mode. Define tb_page_addr_t to be an appropriate
type. */
#if defined(CONFIG_USER_ONLY)
typedef abi_ulong tb_page_addr_t;
#else
typedef ram_addr_t tb_page_addr_t;
#endif
/* is_jmp field values */
#define DISAS_NEXT 0 /* next instruction can be analyzed */
#define DISAS_JUMP 1 /* only pc was modified dynamically */
#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
#define DISAS_TB_JUMP 3 /* only pc was modified statically */
struct TranslationBlock;
typedef struct TranslationBlock TranslationBlock;
/* XXX: make safe guess about sizes */
#define MAX_OP_PER_INSTR 266
#if HOST_LONG_BITS == 32
#define MAX_OPC_PARAM_PER_ARG 2
#else
#define MAX_OPC_PARAM_PER_ARG 1
#endif
#define MAX_OPC_PARAM_IARGS 5
#define MAX_OPC_PARAM_OARGS 1
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
* and up to 4 + N parameters on 64-bit archs
* (N = number of input arguments + output arguments). */
#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
/* Maximum size a TCG op can expand to. This is complicated because a
single op may require several host instructions and register reloads.
For now take a wild guess at 192 bytes, which should allow at least
a couple of fixup instructions per argument. */
#define TCG_MAX_OP_SIZE 192
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
#include "qemu/log.h"
void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
int pc_pos);
bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
target_ulong *data);
void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
/**
* cpu_restore_state:
* @cpu: the vCPU state is to be restore to
* @searched_pc: the host PC the fault occurred at
* @will_exit: true if the TB executed will be interrupted after some
cpu adjustments. Required for maintaining the correct
icount valus
* @return: true if state was restored, false otherwise
*
* Attempt to restore the state for a fault occurring in translated
* code. If the searched_pc is not in translated code no state is
* restored and the function returns false.
*/
bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base, int flags,
target_ulong pc, target_ulong cs_base,
uint32_t flags,
int cflags);
void cpu_exec_init(CPUArchState *env, void *opaque);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
/**
* cpu_loop_exit_requested:
* @cpu: The CPU state to be tested
*
* Indicate if somebody asked for a return of the CPU to the main loop
* (e.g., via cpu_exit() or cpu_interrupt()).
*
* This is helpful for architectures that support interruptible
* instructions. After writing back all state to registers/memory, this
* call can be used to check if it makes sense to return to the main loop
* or to continue executing the interruptible instruction.
*/
static inline bool cpu_loop_exit_requested(CPUState *cpu)
{
return (int32_t)cpu_neg(cpu)->icount_decr.u32 < 0;
}
void cpu_reloading_memory_map(void);
/**
* cpu_address_space_init:
* @cpu: CPU to add this address space to
* @asidx: integer index of this address space
* @mr: the root memory region of address space
*
* Add the specified address space to the CPU's cpu_ases list.
* The address space added with @asidx 0 is the one used for the
* convenience pointer cpu->as.
* The target-specific code which registers ASes is responsible
* for defining what semantics address space 0, 1, 2, etc have.
*
* Before the first call to this function, the caller must set
* cpu->num_ases to the total number of address spaces it needs
* to support.
*/
void cpu_address_space_init(CPUState *cpu, int asidx, MemoryRegion *mr);
void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
#if !defined(CONFIG_USER_ONLY)
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
/**
* tlb_init - initialize a CPU's TLB
* @cpu: CPU whose TLB should be initialized
*/
void tlb_init(CPUState *cpu);
/**
* tlb_flush_page:
* @cpu: CPU whose TLB should be flushed
* @addr: virtual address of page to be flushed
*
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
void tlb_flush_page(CPUState *cpu, target_ulong addr);
void tlb_flush(CPUState *cpu, int flush_global);
/**
* tlb_flush_page_all_cpus:
* @cpu: src CPU of the flush
* @addr: virtual address of page to be flushed
*
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
/**
* tlb_flush_page_all_cpus_synced:
* @cpu: src CPU of the flush
* @addr: virtual address of page to be flushed
*
* Flush one page from the TLB of the specified CPU, for all MMU
* indexes like tlb_flush_page_all_cpus except the source vCPUs work
* is scheduled as safe work meaning all flushes will be complete once
* the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB.
*/
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
*
* Flush the entire TLB for the specified CPU. Most CPU architectures
* allow the implementation to drop entries from the TLB at any time
* so this is generally safe. If more selective flushing is required
* use one of the other functions for efficiency.
*/
void tlb_flush(CPUState *cpu);
/**
* tlb_flush_all_cpus:
* @cpu: src CPU of the flush
*/
void tlb_flush_all_cpus(CPUState *src_cpu);
/**
* tlb_flush_all_cpus_synced:
* @cpu: src CPU of the flush
*
* Like tlb_flush_all_cpus except this except the source vCPUs work is
* scheduled as safe work meaning all flushes will be complete once
* the source vCPUs safe work is complete. This will depend on when
* the guests translation ends the TB.
*/
void tlb_flush_all_cpus_synced(CPUState *src_cpu);
/**
* tlb_flush_page_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
* @addr: virtual address of page to be flushed
* @idxmap: bitmap of MMU indexes to flush
*
* Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus:
* @cpu: Originating CPU of the flush
* @addr: virtual address of page to be flushed
* @idxmap: bitmap of MMU indexes to flush
*
* Flush one page from the TLB of all CPUs, for the specified
* MMU indexes.
*/
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
uint16_t idxmap);
/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
* @addr: virtual address of page to be flushed
* @idxmap: bitmap of MMU indexes to flush
*
* Flush one page from the TLB of all CPUs, for the specified MMU
* indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
* vCPUs work is scheduled as safe work meaning all flushes will be
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
*/
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
uint16_t idxmap);
/**
* tlb_flush_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
* @wait: If true ensure synchronisation by exiting the cpu_loop
* @idxmap: bitmap of MMU indexes to flush
*
* Flush all entries from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
/**
* tlb_flush_by_mmuidx_all_cpus:
* @cpu: Originating CPU of the flush
* @idxmap: bitmap of MMU indexes to flush
*
* Flush all entries from all TLBs of all CPUs, for the specified
* MMU indexes.
*/
void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
/**
* tlb_flush_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
* @idxmap: bitmap of MMU indexes to flush
*
* Flush all entries from all TLBs of all CPUs, for the specified
* MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
* vCPUs work is scheduled as safe work meaning all flushes will be
* complete once the source vCPUs safe work is complete. This will
* depend on when the guests translation ends the TB.
*/
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
* @vaddr: virtual address of page to add entry for
* @paddr: physical address of the page
* @attrs: memory transaction attributes
* @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
* @mmu_idx: MMU index to insert TLB entry for
* @size: size of the page in bytes
*
* Add an entry to this CPU's TLB (a mapping from virtual address
* @vaddr to physical address @paddr) with the specified memory
* transaction attributes. This is generally called by the target CPU
* specific code after it has been called through the tlb_fill()
* entry point and performed a successful page table walk to find
* the physical address and attributes for the virtual address
* which provoked the TLB miss.
*
* At most one entry for a given virtual address is permitted. Only a
* single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
* used by tlb_flush_page.
*/
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, MemTxAttrs attrs,
int prot, int mmu_idx, target_ulong size);
/* tlb_set_page:
*
* This function is equivalent to calling tlb_set_page_with_attrs()
* with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
* as a convenience for CPUs which don't use memory transaction attributes.
*/
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
#else
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
}
static inline void tlb_flush(CPUState *cpu, int flush_global)
static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
#define CODE_GEN_PHYS_HASH_BITS 15
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
/* Estimated block size for TB allocation. */
/* ??? The following is based on a 2015 survey of x86_64 host output.
Better would seem to be some sort of dynamically sized TB array,
adapting to the block sizes actually being produced. */
#define CODE_GEN_AVG_BLOCK_SIZE 400
/* estimated block size for TB allocation */
/* XXX: use a per code average code fragment size and modulate it
according to the host CPU */
#if defined(CONFIG_SOFTMMU)
#define CODE_GEN_AVG_BLOCK_SIZE 128
#else
#define CODE_GEN_AVG_BLOCK_SIZE 64
#endif
#if defined(__arm__) || defined(_ARCH_PPC) \
|| defined(__x86_64__) || defined(__i386__) \
|| defined(__sparc__) || defined(__aarch64__) \
|| defined(__s390x__) || defined(__mips__) \
|| defined(CONFIG_TCG_INTERPRETER)
#define USE_DIRECT_JUMP
#endif
/*
* Translation Cache-related fields of a TB.
* This struct exists just for convenience; we keep track of TB's in a binary
* search tree, and the only fields needed to compare TB's in the tree are
* @ptr and @size.
* Note: the address of search data can be obtained by adding @size to @ptr.
*/
struct tb_tc {
void *ptr; /* pointer to the translated code */
size_t size;
};
struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */
uint64_t flags; /* flags defining in which context the code was generated */
uint32_t flags; /* flags defining in which context the code was generated */
uint16_t size; /* size of target code for this block (1 <=
size <= TARGET_PAGE_SIZE) */
uint16_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x7fff
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
uint16_t icount;
uint32_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x00007fff
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
#define CF_NOCACHE 0x00010000 /* To be freed after execution */
#define CF_USE_ICOUNT 0x00020000
#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
#define CF_CLUSTER_SHIFT 24
/* cflags' mask for hashing/comparison */
#define CF_HASH_MASK \
(CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
void *tc_ptr; /* pointer to the translated code */
/* next matching tb for physical address. */
struct TranslationBlock *phys_hash_next;
/* Per-vCPU dynamic tracing state used to generate this TB */
uint32_t trace_vcpu_dstate;
struct tb_tc tc;
/* original tb when cflags has CF_NOCACHE */
struct TranslationBlock *orig_tb;
/* first and second physical page containing code. The lower bit
of the pointer tells the index in page_next[] */
struct TranslationBlock *page_next[2];
of the pointer tells the index in page_next[].
The list is protected by the TB's page('s) lock(s) */
uintptr_t page_next[2];
tb_page_addr_t page_addr[2];
/* the following data are used to directly call another TB from
the code of this one. */
uint16_t tb_next_offset[2]; /* offset of original jump target */
#ifdef USE_DIRECT_JUMP
uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
#else
uintptr_t tb_next[2]; /* address of jump generated code */
#endif
/* list of TBs jumping to this one. This is a circular list using
the two least significant bits of the pointers to tell what is
the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
jmp_first */
struct TranslationBlock *jmp_next[2];
struct TranslationBlock *jmp_first;
uint32_t icount;
/* The following data are used to directly call another TB from
* the code of this one. This can be done either by emitting direct or
* indirect native jump instructions. These jumps are reset so that the TB
* just continues its execution. The TB can be linked to another one by
* setting one of the jump targets (or patching the jump instruction). Only
* two of such jumps are supported.
*/
uint16_t jmp_reset_offset[2]; /* offset of original jump target */
#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
uintptr_t jmp_target_arg[2]; /* target address or offset */
/*
* Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
* Each TB can have two outgoing jumps, and therefore can participate
* in two lists. The list entries are kept in jmp_list_next[2]. The least
* significant bit (LSB) of the pointers in these lists is used to encode
* which of the two list entries is to be used in the pointed TB.
*
* List traversals are protected by jmp_lock. The destination TB of each
* outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
* can be acquired from any origin TB.
*
* jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
* being invalidated, so that no further outgoing jumps from it can be set.
*
* jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
* to a destination TB that has CF_INVALID set.
*/
uintptr_t jmp_list_head;
uintptr_t jmp_list_next[2];
uintptr_t jmp_dest[2];
uint32_t hash; // unicorn needs this hash to remove this TB from QHT cache
};
typedef struct TBContext TBContext;
// extern bool parallel_cpus;
struct TBContext {
TranslationBlock *tbs;
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
int nb_tbs;
/* statistics */
int tb_flush_count;
int tb_phys_invalidate_count;
int tb_invalidated_flag;
};
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
/* Hide the atomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb)
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
return tb->cflags;
}
static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
/* current cflags for hashing/comparison */
static inline uint32_t curr_cflags(void)
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK));
return 0;
}
static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
{
return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
}
/* TranslationBlock invalidate API */
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TCGContext *tcg_ctx, TranslationBlock *tb, tb_page_addr_t page_addr);
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
uint32_t cf_mask);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
void tb_exec_lock(TCGContext*);
void tb_exec_unlock(TCGContext*);
void tb_free(struct uc_struct *uc, TranslationBlock *tb);
void tb_flush(CPUArchState *env);
void tb_phys_invalidate(struct uc_struct *uc,
TranslationBlock *tb, tb_page_addr_t page_addr);
#if defined(USE_DIRECT_JUMP)
#if defined(CONFIG_TCG_INTERPRETER)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
*(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
/* no need to flush icache explicitly */
}
#elif defined(_ARCH_PPC)
void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
#define tb_set_jmp_target1 ppc_tb_set_jmp_target
#elif defined(__i386__) || defined(__x86_64__)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
/* no need to flush icache explicitly */
}
#elif defined(__s390x__)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
intptr_t disp = addr - (jmp_addr - 2);
stl_be_p((void*)jmp_addr, disp / 2);
/* no need to flush icache explicitly */
}
#elif defined(__aarch64__)
void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
#elif defined(__arm__)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
#if !QEMU_GNUC_PREREQ(4, 1)
register unsigned long _beg __asm ("a1");
register unsigned long _end __asm ("a2");
register unsigned long _flg __asm ("a3");
#endif
/* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
*(uint32_t *)jmp_addr =
(*(uint32_t *)jmp_addr & ~0xffffff)
| (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
#if QEMU_GNUC_PREREQ(4, 1)
__builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
#else
/* flush icache */
_beg = jmp_addr;
_end = jmp_addr + 4;
_flg = 0;
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
#endif
}
#elif defined(__sparc__) || defined(__mips__)
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
#else
#error tb_set_jmp_target1 is missing
#endif
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, uintptr_t addr)
{
uint16_t offset = tb->tb_jmp_offset[n];
tb_set_jmp_target1((uintptr_t)((char*)tb->tc_ptr + offset), addr);
}
#else
/* set the jump target */
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, uintptr_t addr)
{
tb->tb_next[n] = addr;
}
#endif
static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next)
{
/* NOTE: this test is only needed for thread safety */
if (!tb->jmp_next[n]) {
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
/* add in TB jmp circular list */
tb->jmp_next[n] = tb_next->jmp_first;
tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
}
}
/* GETRA is the true target of the return instruction that we'll execute,
defined here for simplicity of defining the follow-up macros. */
#if defined(CONFIG_TCG_INTERPRETER)
extern uintptr_t tci_tb_ptr;
# define GETRA() tci_tb_ptr
#elif defined(_MSC_VER)
/* GETPC is the true target of the return instruction that we'll execute. */
#ifdef _MSC_VER
#include <intrin.h>
# define GETRA() (uintptr_t)_ReturnAddress()
# define GETPC() (uintptr_t)_ReturnAddress()
#else
# define GETRA() \
# define GETPC() \
((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
#endif
@ -321,59 +407,73 @@ extern uintptr_t tci_tb_ptr;
to indicate the compressed mode; subtracting two works around that. It
is also the case that there are no host isas that contain a call insn
smaller than 4 bytes, so we don't worry about special-casing this. */
#if defined(CONFIG_TCG_INTERPRETER)
# define GETPC_ADJ 0
#define GETPC_ADJ 2
#if defined(CONFIG_DEBUG_TCG)
void assert_no_pages_locked(void);
#else
# define GETPC_ADJ 2
#endif
#define GETPC() (GETRA() - GETPC_ADJ)
#if !defined(CONFIG_USER_ONLY)
void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
uint64_t *pvalue, unsigned size);
bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
uint64_t value, unsigned size);
void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
#endif
#if defined(CONFIG_USER_ONLY)
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
static inline void assert_no_pages_locked(void)
{
return addr;
}
#else
/* cputlb.c */
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
#endif
/* vl.c */
extern int singlestep;
/* cpu-exec.c */
extern volatile sig_atomic_t exit_request;
/**
* cpu_can_do_io:
* @cpu: The CPU for which to check IO.
* iotlb_to_section:
* @cpu: CPU performing the access
* @index: TCG CPU IOTLB entry
*
* Deterministic execution requires that IO only be performed on the last
* instruction of a TB so that interrupts take effect immediately.
*
* Returns: %true if memory-mapped IO is safe, %false otherwise.
* Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
* it refers to. @index will have been initially created and returned
* by memory_region_section_get_iotlb().
*/
static inline bool cpu_can_do_io(CPUState *cpu)
{
return true;
}
struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
void phys_mem_clean(struct uc_struct* uc);
static inline void mmap_lock(void) {}
static inline void mmap_unlock(void) {}
/**
* get_page_addr_code() - full-system version
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* If we cannot translate and execute from the entire RAM page, or if
* the region is not backed by RAM, returns -1. Otherwise, returns the
* ram_addr_t corresponding to the guest code at @addr.
*
* Note: this function can trigger an exception.
*/
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
/**
* get_page_addr_code_hostp() - full-system version
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* See get_page_addr_code() (full-system version) for documentation on the
* return value.
*
* Sets *@hostp (when @hostp is non-NULL) as follows.
* If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
* to the host address where @addr's content is kept.
*
* Note: this function can trigger an exception.
*/
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp);
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
/* exec.c */
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
MemTxAttrs attrs, int *prot);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section);
#endif

View File

@ -1,72 +1,69 @@
#ifndef GEN_ICOUNT_H
#define GEN_ICOUNT_H 1
#define GEN_ICOUNT_H
#include "qemu/timer.h"
/* Helpers for instruction counting code generation. */
//static TCGArg *icount_arg;
//static int icount_label;
static inline void gen_tb_start(TCGContext *tcg_ctx)
static inline void gen_io_start(TCGContext *tcg_ctx)
{
// TCGv_i32 count;
TCGv_i32 flag;
TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 1);
tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) -
offsetof(ArchCPU, env));
tcg_temp_free_i32(tcg_ctx, tmp);
}
/*
* cpu->can_do_io is cleared automatically at the beginning of
* each translation block. The cost is minimal and only paid
* for -icount, plus it would be very easy to forget doing it
* in the translator. Therefore, backends only need to call
* gen_io_start.
*/
static inline void gen_io_end(TCGContext *tcg_ctx)
{
TCGv_i32 tmp = tcg_const_i32(tcg_ctx, 0);
tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env,
offsetof(ArchCPU, parent_obj.can_do_io) -
offsetof(ArchCPU, env));
tcg_temp_free_i32(tcg_ctx, tmp);
}
static inline void gen_tb_start(TCGContext *tcg_ctx, TranslationBlock *tb)
{
TCGv_i32 count;
tcg_ctx->exitreq_label = gen_new_label(tcg_ctx);
flag = tcg_temp_new_i32(tcg_ctx);
tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env,
offsetof(CPUState, tcg_exit_req) - ENV_OFFSET);
tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label);
tcg_temp_free_i32(tcg_ctx, flag);
#if 0
if (!use_icount)
// first TB ever does not need to check exit request
if (tcg_ctx->uc->first_tb) {
// next TB is not the first anymore
tcg_ctx->uc->first_tb = false;
return;
}
icount_label = gen_new_label();
count = tcg_temp_local_new_i32();
tcg_gen_ld_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
/* This is a horrid hack to allow fixing up the value later. */
icount_arg = tcg_ctx.gen_opparam_ptr + 1;
tcg_gen_subi_i32(count, count, 0xdeadbeef);
count = tcg_temp_new_i32(tcg_ctx);
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
tcg_gen_st16_i32(count, cpu_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
tcg_temp_free_i32(count);
#endif
tcg_gen_ld_i32(tcg_ctx, count, tcg_ctx->cpu_env,
offsetof(ArchCPU, neg.icount_decr.u32) -
offsetof(ArchCPU, env));
tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
tcg_temp_free_i32(tcg_ctx, count);
}
static inline void gen_tb_end(TCGContext *tcg_ctx, TranslationBlock *tb, int num_insns)
{
gen_set_label(tcg_ctx, tcg_ctx->exitreq_label);
tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + TB_EXIT_REQUESTED);
#if 0
if (use_icount) {
*icount_arg = num_insns;
gen_set_label(icount_label);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
if (tb_cflags(tb) & CF_USE_ICOUNT) {
/* Update the num_insn immediate parameter now that we know
* the actual insn count. */
tcg_set_insn_param(tcg_ctx->icount_start_insn, 1, num_insns);
}
#endif
gen_set_label(tcg_ctx, tcg_ctx->exitreq_label);
tcg_gen_exit_tb(tcg_ctx, tb, TB_EXIT_REQUESTED);
}
#if 0
static inline void gen_io_start(void)
{
TCGv_i32 tmp = tcg_const_i32(1);
tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp);
}
static inline void gen_io_end(void)
{
TCGv_i32 tmp = tcg_const_i32(0);
tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp);
}
#endif
#endif

View File

@ -2,38 +2,38 @@
This one expands generation functions for tcg opcodes. */
#ifndef HELPER_GEN_H
#define HELPER_GEN_H 1
#define HELPER_GEN_H
#include <exec/helper-head.h>
#include "exec/helper-head.h"
#define DEF_HELPER_FLAGS_0(name, flags, ret) \
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl0(ret)) \
{ \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \
}
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
dh_arg_decl(t1, 1)) \
dh_arg_decl(t1, 1)) \
{ \
TCGArg args[1] = { dh_arg(t1, 1) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \
TCGTemp *args[1] = { dh_arg(t1, 1) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \
}
#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
{ \
TCGArg args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \
TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \
}
#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
{ \
TCGArg args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \
TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \
}
#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
@ -41,9 +41,9 @@ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(r
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
{ \
TCGArg args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
dh_arg(t3, 3), dh_arg(t4, 4) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \
}
#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
@ -51,13 +51,35 @@ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(r
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
{ \
TCGArg args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
dh_arg(t4, 4), dh_arg(t5, 5) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \
}
#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
{ \
TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 6, args); \
}
#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
dh_arg_decl(t7, 7)) \
{ \
TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
dh_arg(t7, 7) }; \
tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 7, args); \
}
#include "helper.h"
#include "tcg-runtime.h"
#include "accel/tcg/tcg-runtime.h"
#undef DEF_HELPER_FLAGS_0
#undef DEF_HELPER_FLAGS_1
@ -65,6 +87,8 @@ static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(r
#undef DEF_HELPER_FLAGS_3
#undef DEF_HELPER_FLAGS_4
#undef DEF_HELPER_FLAGS_5
#undef DEF_HELPER_FLAGS_6
#undef DEF_HELPER_FLAGS_7
#undef GEN_HELPER
#endif /* HELPER_GEN_H */

View File

@ -15,36 +15,24 @@
GEN_HELPER 2 to do runtime registration helper functions.
*/
#ifndef DEF_HELPER_H
#define DEF_HELPER_H 1
#include "qemu/osdep.h"
#ifndef EXEC_HELPER_HEAD_H
#define EXEC_HELPER_HEAD_H
#define HELPER(name) glue(helper_, name)
#define GET_TCGV_i32 GET_TCGV_I32
#define GET_TCGV_i64 GET_TCGV_I64
#define GET_TCGV_ptr GET_TCGV_PTR
/* Some types that make sense in C, but not for TCG. */
#define dh_alias_i32 i32
#define dh_alias_s32 i32
#define dh_alias_int i32
#define dh_alias_i64 i64
#define dh_alias_s64 i64
#define dh_alias_f16 i32
#define dh_alias_f32 i32
#define dh_alias_f64 i64
#ifdef TARGET_LONG_BITS
# if TARGET_LONG_BITS == 32
# define dh_alias_tl i32
# else
# define dh_alias_tl i64
# endif
#endif
#define dh_alias_ptr ptr
#define dh_alias_cptr ptr
#define dh_alias_void void
#define dh_alias_noreturn noreturn
#define dh_alias_env ptr
#define dh_alias(t) glue(dh_alias_, t)
#define dh_ctype_i32 uint32_t
@ -52,15 +40,28 @@
#define dh_ctype_int int
#define dh_ctype_i64 uint64_t
#define dh_ctype_s64 int64_t
#define dh_ctype_f16 uint32_t
#define dh_ctype_f32 float32
#define dh_ctype_f64 float64
#define dh_ctype_tl target_ulong
#define dh_ctype_ptr void *
#define dh_ctype_cptr const void *
#define dh_ctype_void void
#define dh_ctype_noreturn void QEMU_NORETURN
#define dh_ctype_env CPUArchState *
#define dh_ctype(t) dh_ctype_##t
#ifdef NEED_CPU_H
# ifdef TARGET_LONG_BITS
# if TARGET_LONG_BITS == 32
# define dh_alias_tl i32
# else
# define dh_alias_tl i64
# endif
# endif
# define dh_alias_env ptr
# define dh_ctype_tl target_ulong
# define dh_ctype_env CPUArchState *
#endif
/* We can't use glue() here because it falls foul of C preprocessor
recursive expansion rules. */
#define dh_retvar_decl0_void void
@ -77,11 +78,11 @@
#define dh_retvar_decl_ptr TCGv_ptr retval,
#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t))
#define dh_retvar_void TCG_CALL_DUMMY_ARG
#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG
#define dh_retvar_i32 GET_TCGV_i32(retval)
#define dh_retvar_i64 GET_TCGV_i64(retval)
#define dh_retvar_ptr GET_TCGV_ptr(retval)
#define dh_retvar_void NULL
#define dh_retvar_noreturn NULL
#define dh_retvar_i32 tcgv_i32_temp(tcg_ctx, retval)
#define dh_retvar_i64 tcgv_i64_temp(tcg_ctx, retval)
#define dh_retvar_ptr tcgv_ptr_temp(tcg_ctx, retval)
#define dh_retvar(t) glue(dh_retvar_, dh_alias(t))
#define dh_is_64bit_void 0
@ -89,6 +90,7 @@
#define dh_is_64bit_i32 0
#define dh_is_64bit_i64 1
#define dh_is_64bit_ptr (sizeof(void *) == 8)
#define dh_is_64bit_cptr dh_is_64bit_ptr
#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t))
#define dh_is_signed_void 0
@ -97,6 +99,7 @@
#define dh_is_signed_s32 1
#define dh_is_signed_i64 0
#define dh_is_signed_s64 1
#define dh_is_signed_f16 0
#define dh_is_signed_f32 0
#define dh_is_signed_f64 0
#define dh_is_signed_tl 0
@ -105,14 +108,29 @@
extension instructions that may be required, e.g. ia64's addp4. But
for now we don't support any 64-bit targets with 32-bit pointers. */
#define dh_is_signed_ptr 0
#define dh_is_signed_cptr dh_is_signed_ptr
#define dh_is_signed_env dh_is_signed_ptr
#define dh_is_signed(t) dh_is_signed_##t
#define dh_callflag_i32 0
#define dh_callflag_s32 0
#define dh_callflag_int 0
#define dh_callflag_i64 0
#define dh_callflag_s64 0
#define dh_callflag_f16 0
#define dh_callflag_f32 0
#define dh_callflag_f64 0
#define dh_callflag_ptr 0
#define dh_callflag_cptr dh_callflag_ptr
#define dh_callflag_void 0
#define dh_callflag_noreturn TCG_CALL_NO_RETURN
#define dh_callflag(t) glue(dh_callflag_, dh_alias(t))
#define dh_sizemask(t, n) \
((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1)))
#define dh_arg(t, n) \
glue(GET_TCGV_, dh_alias(t))(glue(arg, n))
glue(glue(tcgv_, dh_alias(t)), _temp)(tcg_ctx, glue(arg, n))
#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n)
@ -128,7 +146,11 @@
DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
#define DEF_HELPER_6(name, ret, t1, t2, t3, t4, t5, t6) \
DEF_HELPER_FLAGS_6(name, 0, ret, t1, t2, t3, t4, t5, t6)
#define DEF_HELPER_7(name, ret, t1, t2, t3, t4, t5, t6, t7) \
DEF_HELPER_FLAGS_7(name, 0, ret, t1, t2, t3, t4, t5, t6, t7)
/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
#endif /* DEF_HELPER_H */
#endif /* EXEC_HELPER_HEAD_H */

View File

@ -2,9 +2,9 @@
This one expands prototypes for the helper functions. */
#ifndef HELPER_PROTO_H
#define HELPER_PROTO_H 1
#define HELPER_PROTO_H
#include <exec/helper-head.h>
#include "exec/helper-head.h"
#define DEF_HELPER_FLAGS_0(name, flags, ret) \
dh_ctype(ret) HELPER(name) (void);
@ -26,8 +26,17 @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
dh_ctype(t4), dh_ctype(t5));
#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
dh_ctype(t4), dh_ctype(t5), dh_ctype(t6));
#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
dh_ctype(t7));
#include "helper.h"
#include "tcg-runtime.h"
#include "accel/tcg/tcg-runtime.h"
#undef DEF_HELPER_FLAGS_0
#undef DEF_HELPER_FLAGS_1
@ -35,5 +44,7 @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
#undef DEF_HELPER_FLAGS_3
#undef DEF_HELPER_FLAGS_4
#undef DEF_HELPER_FLAGS_5
#undef DEF_HELPER_FLAGS_6
#undef DEF_HELPER_FLAGS_7
#endif /* HELPER_PROTO_H */

View File

@ -2,47 +2,73 @@
This one defines data structures private to tcg.c. */
#ifndef HELPER_TCG_H
#define HELPER_TCG_H 1
#define HELPER_TCG_H
#include <exec/helper-head.h>
#include "exec/helper-head.h"
/* Need one more level of indirection before stringification
to get all the macros expanded first. */
#define str(s) #s
#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \
{ HELPER(NAME), #NAME, FLAGS, \
dh_sizemask(ret, 0) },
{ .func = HELPER(NAME), .name = str(NAME), \
.flags = FLAGS | dh_callflag(ret), \
.sizemask = dh_sizemask(ret, 0) },
#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \
{ HELPER(NAME), #NAME, FLAGS, \
dh_sizemask(ret, 0) | dh_sizemask(t1, 1) },
{ .func = HELPER(NAME), .name = str(NAME), \
.flags = FLAGS | dh_callflag(ret), \
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) },
#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \
{ HELPER(NAME), #NAME, FLAGS, \
dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
{ .func = HELPER(NAME), .name = str(NAME), \
.flags = FLAGS | dh_callflag(ret), \
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
| dh_sizemask(t2, 2) },
#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \
{ HELPER(NAME), #NAME, FLAGS, \
dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
{ .func = HELPER(NAME), .name = str(NAME), \
.flags = FLAGS | dh_callflag(ret), \
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) },
#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \
{ HELPER(NAME), #NAME, FLAGS, \
dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
{ .func = HELPER(NAME), .name = str(NAME), \
.flags = FLAGS | dh_callflag(ret), \
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) },
#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \
{ HELPER(NAME), #NAME, FLAGS, \
dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
{ .func = HELPER(NAME), .name = str(NAME), \
.flags = FLAGS | dh_callflag(ret), \
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
| dh_sizemask(t5, 5) },
#include "helper.h"
#include "tcg-runtime.h"
#define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \
{ .func = HELPER(NAME), .name = str(NAME), \
.flags = FLAGS | dh_callflag(ret), \
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
| dh_sizemask(t5, 5) | dh_sizemask(t6, 6) },
#define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \
{ .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \
.sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
| dh_sizemask(t5, 5) | dh_sizemask(t6, 6) | dh_sizemask(t7, 7) },
#include "helper.h"
#include "accel/tcg/tcg-runtime.h"
#undef str
#undef DEF_HELPER_FLAGS_0
#undef DEF_HELPER_FLAGS_1
#undef DEF_HELPER_FLAGS_2
#undef DEF_HELPER_FLAGS_3
#undef DEF_HELPER_FLAGS_4
#undef DEF_HELPER_FLAGS_5
#undef DEF_HELPER_FLAGS_6
#undef DEF_HELPER_FLAGS_7
#endif /* HELPER_TCG_H */

View File

@ -3,12 +3,11 @@
#ifndef HWADDR_H
#define HWADDR_H
#define HWADDR_BITS 64
/* hwaddr is the type of a physical address (its size can
be different from 'target_ulong'). */
#include "unicorn/platform.h"
typedef uint64_t hwaddr;
#define HWADDR_MAX UINT64_MAX
#define TARGET_FMT_plx "%016" PRIx64

View File

@ -24,13 +24,8 @@
#ifndef IOPORT_H
#define IOPORT_H
#include "qemu-common.h"
#include "qom/object.h"
#include "exec/memory.h"
typedef uint32_t pio_addr_t;
#define FMT_pioaddr PRIx32
#define MAX_IOPORTS (64 * 1024)
#define IOPORTS_MASK (MAX_IOPORTS - 1)
@ -45,15 +40,31 @@ typedef struct MemoryRegionPortio {
#define PORTIO_END_OF_LIST() { }
#ifndef CONFIG_USER_ONLY
extern const MemoryRegionOps unassigned_io_ops;
#endif
void cpu_outb(struct uc_struct *uc, pio_addr_t addr, uint8_t val);
void cpu_outw(struct uc_struct *uc, pio_addr_t addr, uint16_t val);
void cpu_outl(struct uc_struct *uc, pio_addr_t addr, uint32_t val);
uint8_t cpu_inb(struct uc_struct *uc, pio_addr_t addr);
uint16_t cpu_inw(struct uc_struct *uc, pio_addr_t addr);
uint32_t cpu_inl(struct uc_struct *uc, pio_addr_t addr);
void cpu_outb(struct uc_struct *uc, uint32_t addr, uint8_t val);
void cpu_outw(struct uc_struct *uc, uint32_t addr, uint16_t val);
void cpu_outl(struct uc_struct *uc, uint32_t addr, uint32_t val);
uint8_t cpu_inb(struct uc_struct *uc, uint32_t addr);
uint16_t cpu_inw(struct uc_struct *uc, uint32_t addr);
uint32_t cpu_inl(struct uc_struct *uc, uint32_t addr);
typedef struct PortioList {
const struct MemoryRegionPortio *ports;
struct MemoryRegion *address_space;
unsigned nr;
struct MemoryRegion **regions;
void *opaque;
const char *name;
} PortioList;
void portio_list_init(PortioList *piolist,
const struct MemoryRegionPortio *callbacks,
void *opaque, const char *name);
void portio_list_destroy(PortioList *piolist);
void portio_list_add(PortioList *piolist,
struct MemoryRegion *address_space,
uint32_t addr);
void portio_list_del(PortioList *piolist);
#endif /* IOPORT_H */

View File

@ -0,0 +1,71 @@
/*
* Memory transaction attributes
*
* Copyright (c) 2015 Linaro Limited.
*
* Authors:
* Peter Maydell <peter.maydell@linaro.org>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef MEMATTRS_H
#define MEMATTRS_H
/* Every memory transaction has associated with it a set of
* attributes. Some of these are generic (such as the ID of
* the bus master); some are specific to a particular kind of
* bus (such as the ARM Secure/NonSecure bit). We define them
* all as non-overlapping bitfields in a single struct to avoid
* confusion if different parts of QEMU used the same bit for
* different semantics.
*/
typedef struct MemTxAttrs {
/* Bus masters which don't specify any attributes will get this
* (via the MEMTXATTRS_UNSPECIFIED constant), so that we can
* distinguish "all attributes deliberately clear" from
* "didn't specify" if necessary.
*/
unsigned int unspecified:1;
/* ARM/AMBA: TrustZone Secure access
* x86: System Management Mode access
*/
unsigned int secure:1;
/* Memory access is usermode (unprivileged) */
unsigned int user:1;
/* Requester ID (for MSI for example) */
unsigned int requester_id:16;
/* Invert endianness for this page */
unsigned int byte_swap:1;
/*
* The following are target-specific page-table bits. These are not
* related to actual memory transactions at all. However, this structure
* is part of the tlb_fill interface, cached in the cputlb structure,
* and has unused bits. These fields will be read by target-specific
* helpers using env->iotlb[mmu_idx][tlb_index()].attrs.target_tlb_bitN.
*/
unsigned int target_tlb_bit0 : 1;
unsigned int target_tlb_bit1 : 1;
unsigned int target_tlb_bit2 : 1;
} MemTxAttrs;
/* Bus masters which don't specify any attributes will get this,
* which has all attribute bits clear except the topmost one
* (so that we can distinguish "all attributes deliberately clear"
* from "didn't specify" if necessary).
*/
#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 })
/* New-style MMIO accessors can indicate that the transaction failed.
* A zero (MEMTX_OK) response means success; anything else is a failure
* of some kind. The memory subsystem will bitwise-OR together results
* if it is synthesizing an operation from multiple smaller accesses.
*/
#define MEMTX_OK 0
#define MEMTX_ERROR (1U << 0) /* device returned an error */
#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
typedef uint32_t MemTxResult;
#endif

134
qemu/include/exec/memop.h Normal file
View File

@ -0,0 +1,134 @@
/*
* Constants for memory operations
*
* Authors:
* Richard Henderson <rth@twiddle.net>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef MEMOP_H
#define MEMOP_H
#include "qemu/host-utils.h"
typedef enum MemOp {
MO_8 = 0,
MO_16 = 1,
MO_32 = 2,
MO_64 = 3,
MO_SIZE = 3, /* Mask for the above. */
MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
MO_BSWAP = 8, /* Host reverse endian. */
#ifdef HOST_WORDS_BIGENDIAN
MO_LE = MO_BSWAP,
MO_BE = 0,
#else
MO_LE = 0,
MO_BE = MO_BSWAP,
#endif
#ifdef NEED_CPU_H
#ifdef TARGET_WORDS_BIGENDIAN
MO_TE = MO_BE,
#else
MO_TE = MO_LE,
#endif
#endif
/*
* MO_UNALN accesses are never checked for alignment.
* MO_ALIGN accesses will result in a call to the CPU's
* do_unaligned_access hook if the guest address is not aligned.
* The default depends on whether the target CPU defines
* TARGET_ALIGNED_ONLY.
*
* Some architectures (e.g. ARMv8) need the address which is aligned
* to a size more than the size of the memory access.
* Some architectures (e.g. SPARCv9) need an address which is aligned,
* but less strictly than the natural alignment.
*
* MO_ALIGN supposes the alignment size is the size of a memory access.
*
* There are three options:
* - unaligned access permitted (MO_UNALN).
* - an alignment to the size of an access (MO_ALIGN);
* - an alignment to a specified size, which may be more or less than
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
*/
MO_ASHIFT = 4,
MO_AMASK = 7 << MO_ASHIFT,
#ifdef NEED_CPU_H
#ifdef TARGET_ALIGNED_ONLY
MO_ALIGN = 0,
MO_UNALN = MO_AMASK,
#else
MO_ALIGN = MO_AMASK,
MO_UNALN = 0,
#endif
#endif
MO_ALIGN_2 = 1 << MO_ASHIFT,
MO_ALIGN_4 = 2 << MO_ASHIFT,
MO_ALIGN_8 = 3 << MO_ASHIFT,
MO_ALIGN_16 = 4 << MO_ASHIFT,
MO_ALIGN_32 = 5 << MO_ASHIFT,
MO_ALIGN_64 = 6 << MO_ASHIFT,
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
MO_Q = MO_64,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
MO_LEQ = MO_LE | MO_Q,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
MO_BEQ = MO_BE | MO_Q,
#ifdef NEED_CPU_H
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
MO_TEQ = MO_TE | MO_Q,
#endif
MO_SSIZE = MO_SIZE | MO_SIGN,
} MemOp;
/* MemOp to size in bytes. */
static inline unsigned memop_size(MemOp op)
{
return 1 << (op & MO_SIZE);
}
/* Size in bytes to MemOp. */
static inline MemOp size_memop(unsigned size)
{
#ifdef CONFIG_DEBUG_TCG
/* Power of 2 up to 8. */
assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
#endif
return ctz32(size);
}
/* Big endianness from MemOp. */
static inline bool memop_big_endian(MemOp op)
{
return (op & MO_BSWAP) == MO_BE;
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Declarations for obsolete exec.c functions
* Declarations for functions which are internal to the memory subsystem.
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates
*
@ -12,25 +12,40 @@
*/
/*
* This header is for use by exec.c and memory.c ONLY. Do not include it.
* The functions declared here will be removed soon.
* This header is for use by exec.c, memory.c and accel/tcg/cputlb.c ONLY,
* for declarations which are shared between the memory subsystem's
* internals and the TCG TLB code. Do not include it from elsewhere.
*/
#ifndef MEMORY_INTERNAL_H
#define MEMORY_INTERNAL_H
#ifndef CONFIG_USER_ONLY
typedef struct AddressSpaceDispatch AddressSpaceDispatch;
#include "cpu.h"
void address_space_init_dispatch(AddressSpace *as);
void address_space_destroy_dispatch(AddressSpace *as);
static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
{
return fv->dispatch;
}
static inline AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
{
return flatview_to_dispatch(address_space_to_flatview(as));
}
FlatView *address_space_get_flatview(AddressSpace *as);
void flatview_unref(FlatView *view);
extern const MemoryRegionOps unassigned_mem_ops;
bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
unsigned size, bool is_write);
bool memory_region_access_valid(struct uc_struct *uc, MemoryRegion *mr, hwaddr addr,
unsigned size, bool is_write,
MemTxAttrs attrs);
void address_space_unregister(AddressSpace *as);
void flatview_add_to_dispatch(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section);
AddressSpaceDispatch *address_space_dispatch_new(struct uc_struct *uc, FlatView *fv);
void address_space_dispatch_compact(AddressSpaceDispatch *d);
void address_space_dispatch_free(AddressSpaceDispatch *d);
#endif
void mtree_print_dispatch(struct AddressSpaceDispatch *d,
MemoryRegion *root);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,71 @@
/*
* Physical memory access templates
*
* Copyright (c) 2003 Fabrice Bellard
* Copyright (c) 2015 Linaro, Inc.
* Copyright (c) 2016 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifdef TARGET_ENDIANNESS
extern uint32_t glue(address_space_lduw, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern uint32_t glue(address_space_ldl, SUFFIX)(struct uc_struct *, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern uint64_t glue(address_space_ldq, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stl_notdirty, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stw, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stl, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stq, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
#else
extern uint32_t glue(address_space_ldub, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern uint32_t glue(address_space_lduw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern uint32_t glue(address_space_lduw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern uint32_t glue(address_space_ldl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern uint32_t glue(address_space_ldl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern uint64_t glue(address_space_ldq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern uint64_t glue(address_space_ldq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stb, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stw_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stw_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stl_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stl_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stq_le, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
extern void glue(address_space_stq_be, SUFFIX)(struct uc_struct *uc, ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
#endif
#undef ARG1_DECL
#undef ARG1
#undef SUFFIX
#undef TARGET_ENDIANNESS

View File

@ -0,0 +1,126 @@
/*
* Memory access templates for MemoryRegionCache
*
* Copyright (c) 2018 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifdef UNICORN_ARCH_POSTFIX
#define ADDRESS_SPACE_LD_CACHED(size) \
glue(glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached)), UNICORN_ARCH_POSTFIX)
#define ADDRESS_SPACE_LD_CACHED_SLOW(size) \
glue(glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached_slow)), UNICORN_ARCH_POSTFIX)
#define LD_P(size) \
glue(glue(ld, size), glue(ENDIANNESS, _p))
#else
#define ADDRESS_SPACE_LD_CACHED(size) \
glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached))
#define ADDRESS_SPACE_LD_CACHED_SLOW(size) \
glue(glue(address_space_ld, size), glue(ENDIANNESS, _cached_slow))
#define LD_P(size) \
glue(glue(ld, size), glue(ENDIANNESS, _p))
#endif
static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(struct uc_struct *uc, MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 4 <= cache->len - addr);
if (likely(cache->ptr)) {
return LD_P(l)((char *)cache->ptr + addr);
} else {
return ADDRESS_SPACE_LD_CACHED_SLOW(l)(uc, cache, addr, attrs, result);
}
}
static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(struct uc_struct *uc, MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 8 <= cache->len - addr);
if (likely(cache->ptr)) {
return LD_P(q)((char *)cache->ptr + addr);
} else {
return ADDRESS_SPACE_LD_CACHED_SLOW(q)(uc, cache, addr, attrs, result);
}
}
static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(struct uc_struct *uc, MemoryRegionCache *cache,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 2 <= cache->len - addr);
if (likely(cache->ptr)) {
return LD_P(uw)((char *)cache->ptr + addr);
} else {
return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(uc, cache, addr, attrs, result);
}
}
#undef ADDRESS_SPACE_LD_CACHED
#undef ADDRESS_SPACE_LD_CACHED_SLOW
#undef LD_P
#ifdef UNICORN_ARCH_POSTFIX
#define ADDRESS_SPACE_ST_CACHED(size) \
glue(glue(glue(address_space_st, size), glue(ENDIANNESS, _cached)), UNICORN_ARCH_POSTFIX)
#define ADDRESS_SPACE_ST_CACHED_SLOW(size) \
glue(glue(glue(address_space_st, size), glue(ENDIANNESS, _cached_slow)), UNICORN_ARCH_POSTFIX)
#define ST_P(size) \
glue(glue(st, size), glue(ENDIANNESS, _p))
#else
#define ADDRESS_SPACE_ST_CACHED(size) \
glue(glue(address_space_st, size), glue(ENDIANNESS, _cached))
#define ADDRESS_SPACE_ST_CACHED_SLOW(size) \
glue(glue(address_space_st, size), glue(ENDIANNESS, _cached_slow))
#define ST_P(size) \
glue(glue(st, size), glue(ENDIANNESS, _p))
#endif
static inline void ADDRESS_SPACE_ST_CACHED(l)(struct uc_struct *uc, MemoryRegionCache *cache,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 4 <= cache->len - addr);
if (likely(cache->ptr)) {
ST_P(l)((char *)cache->ptr + addr, val);
} else {
ADDRESS_SPACE_ST_CACHED_SLOW(l)(uc, cache, addr, val, attrs, result);
}
}
static inline void ADDRESS_SPACE_ST_CACHED(w)(struct uc_struct *uc, MemoryRegionCache *cache,
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 2 <= cache->len - addr);
if (likely(cache->ptr)) {
ST_P(w)((char *)cache->ptr + addr, val);
} else {
ADDRESS_SPACE_ST_CACHED_SLOW(w)(uc, cache, addr, val, attrs, result);
}
}
static inline void ADDRESS_SPACE_ST_CACHED(q)(struct uc_struct *uc, MemoryRegionCache *cache,
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
{
assert(addr < cache->len && 8 <= cache->len - addr);
if (likely(cache->ptr)) {
ST_P(q)((char *)cache->ptr + addr, val);
} else {
ADDRESS_SPACE_ST_CACHED_SLOW(q)(uc, cache, addr, val, attrs, result);
}
}
#undef ADDRESS_SPACE_ST_CACHED
#undef ADDRESS_SPACE_ST_CACHED_SLOW
#undef ST_P
#undef ENDIANNESS

View File

@ -0,0 +1,147 @@
/*
* Physical memory access templates
*
* Copyright (c) 2003 Fabrice Bellard
* Copyright (c) 2015 Linaro, Inc.
* Copyright (c) 2016 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifdef TARGET_ENDIANNESS
static inline uint32_t glue(ldl_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldl, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline uint64_t glue(ldq_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldq, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline uint32_t glue(lduw_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_lduw, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stl_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val)
{
glue(address_space_stl, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stw_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val)
{
glue(address_space_stw, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stq_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val)
{
glue(address_space_stq, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
#else
static inline uint32_t glue(ldl_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldl_le, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline uint32_t glue(ldl_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldl_be, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline uint64_t glue(ldq_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldq_le, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline uint64_t glue(ldq_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldq_be, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline uint32_t glue(ldub_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_ldub, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline uint32_t glue(lduw_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_lduw_le, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline uint32_t glue(lduw_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr)
{
return glue(address_space_lduw_be, SUFFIX)(uc, ARG1, addr,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stl_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val)
{
glue(address_space_stl_le, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stl_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val)
{
glue(address_space_stl_be, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stb_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val)
{
glue(address_space_stb, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stw_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val)
{
glue(address_space_stw_le, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stw_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint32_t val)
{
glue(address_space_stw_be, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stq_le_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val)
{
glue(address_space_stq_le, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
static inline void glue(stq_be_phys, SUFFIX)(struct uc_struct *uc, ARG1_DECL, hwaddr addr, uint64_t val)
{
glue(address_space_stq_be, SUFFIX)(uc, ARG1, addr, val,
MEMTXATTRS_UNSPECIFIED, NULL);
}
#endif
#undef ARG1_DECL
#undef ARG1
#undef SUFFIX
#undef TARGET_ENDIANNESS

View File

@ -0,0 +1,96 @@
/* Poison identifiers that should not be used when building
target independent device code. */
#ifndef HW_POISON_H
#define HW_POISON_H
#ifdef __GNUC__
#pragma GCC poison TARGET_I386
#pragma GCC poison TARGET_X86_64
#pragma GCC poison TARGET_AARCH64
#pragma GCC poison TARGET_ALPHA
#pragma GCC poison TARGET_ARM
#pragma GCC poison TARGET_CRIS
#pragma GCC poison TARGET_HPPA
#pragma GCC poison TARGET_LM32
#pragma GCC poison TARGET_M68K
#pragma GCC poison TARGET_MICROBLAZE
#pragma GCC poison TARGET_MIPS
#pragma GCC poison TARGET_ABI_MIPSN32
#pragma GCC poison TARGET_ABI_MIPSO32
#pragma GCC poison TARGET_MIPS64
#pragma GCC poison TARGET_ABI_MIPSN64
#pragma GCC poison TARGET_MOXIE
#pragma GCC poison TARGET_NIOS2
#pragma GCC poison TARGET_OPENRISC
#pragma GCC poison TARGET_PPC
#pragma GCC poison TARGET_PPC64
#pragma GCC poison TARGET_ABI32
#pragma GCC poison TARGET_RX
#pragma GCC poison TARGET_S390X
#pragma GCC poison TARGET_SH4
#pragma GCC poison TARGET_SPARC
#pragma GCC poison TARGET_SPARC64
#pragma GCC poison TARGET_TILEGX
#pragma GCC poison TARGET_TRICORE
#pragma GCC poison TARGET_UNICORE32
#pragma GCC poison TARGET_XTENSA
#pragma GCC poison TARGET_ALIGNED_ONLY
#pragma GCC poison TARGET_HAS_BFLT
#pragma GCC poison TARGET_NAME
#pragma GCC poison TARGET_SUPPORTS_MTTCG
#pragma GCC poison TARGET_WORDS_BIGENDIAN
#pragma GCC poison BSWAP_NEEDED
#pragma GCC poison TARGET_LONG_BITS
#pragma GCC poison TARGET_FMT_lx
#pragma GCC poison TARGET_FMT_ld
#pragma GCC poison TARGET_FMT_lu
#pragma GCC poison TARGET_PAGE_SIZE
#pragma GCC poison TARGET_PAGE_MASK
#pragma GCC poison TARGET_PAGE_BITS
#pragma GCC poison TARGET_PAGE_ALIGN
#pragma GCC poison CPUArchState
#pragma GCC poison CPU_INTERRUPT_HARD
#pragma GCC poison CPU_INTERRUPT_EXITTB
#pragma GCC poison CPU_INTERRUPT_HALT
#pragma GCC poison CPU_INTERRUPT_DEBUG
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3
#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4
#pragma GCC poison CPU_INTERRUPT_TGT_INT_0
#pragma GCC poison CPU_INTERRUPT_TGT_INT_1
#pragma GCC poison CPU_INTERRUPT_TGT_INT_2
#pragma GCC poison CONFIG_ALPHA_DIS
#pragma GCC poison CONFIG_ARM_A64_DIS
#pragma GCC poison CONFIG_ARM_DIS
#pragma GCC poison CONFIG_CRIS_DIS
#pragma GCC poison CONFIG_HPPA_DIS
#pragma GCC poison CONFIG_I386_DIS
#pragma GCC poison CONFIG_LM32_DIS
#pragma GCC poison CONFIG_M68K_DIS
#pragma GCC poison CONFIG_MICROBLAZE_DIS
#pragma GCC poison CONFIG_MIPS_DIS
#pragma GCC poison CONFIG_NANOMIPS_DIS
#pragma GCC poison CONFIG_MOXIE_DIS
#pragma GCC poison CONFIG_NIOS2_DIS
#pragma GCC poison CONFIG_PPC_DIS
#pragma GCC poison CONFIG_RISCV_DIS
#pragma GCC poison CONFIG_S390_DIS
#pragma GCC poison CONFIG_SH4_DIS
#pragma GCC poison CONFIG_SPARC_DIS
#pragma GCC poison CONFIG_XTENSA_DIS
#pragma GCC poison CONFIG_LINUX_USER
#pragma GCC poison CONFIG_KVM
#pragma GCC poison CONFIG_SOFTMMU
#endif
#endif

View File

@ -19,145 +19,101 @@
#ifndef RAM_ADDR_H
#define RAM_ADDR_H
#include "uc_priv.h"
#include "cpu.h"
#include "sysemu/tcg.h"
#include "exec/ramlist.h"
#include "exec/ramblock.h"
#ifndef CONFIG_USER_ONLY
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
{
return (b && b->host && offset < b->used_length) ? true : false;
}
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
int qemu_get_ram_fd(struct uc_struct *uc, ram_addr_t addr);
void *qemu_get_ram_block_host_ptr(struct uc_struct *uc, ram_addr_t addr);
void *qemu_get_ram_ptr(struct uc_struct *uc, ram_addr_t addr);
void qemu_ram_free(struct uc_struct *c, ram_addr_t addr);
void qemu_ram_free_from_ptr(struct uc_struct *uc, ram_addr_t addr);
static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
{
assert(offset_in_ramblock(block, offset));
return (char *)block->host + offset;
}
static inline bool cpu_physical_memory_get_dirty(struct uc_struct *uc, ram_addr_t start,
static inline unsigned long int ramblock_recv_bitmap_offset(struct uc_struct *uc, void *host_addr,
RAMBlock *rb)
{
uint64_t host_addr_offset =
(uint64_t)(uintptr_t)((char *)host_addr - (char *)rb->host);
return host_addr_offset >> TARGET_PAGE_BITS;
}
RAMBlock *qemu_ram_alloc_from_ptr(struct uc_struct *uc, ram_addr_t size, void *host,
MemoryRegion *mr);
RAMBlock *qemu_ram_alloc(struct uc_struct *uc, ram_addr_t size, MemoryRegion *mr);
void qemu_ram_free(struct uc_struct *uc, RAMBlock *block);
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
void tb_invalidate_phys_range(struct uc_struct *uc, ram_addr_t start, ram_addr_t end);
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
unsigned long end, page, next;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
next = find_next_bit(uc->ram_list.dirty_memory[client], end, page);
return next < end;
return false;
}
static inline bool cpu_physical_memory_get_clean(struct uc_struct *uc, ram_addr_t start,
static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
unsigned long end, page, next;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
next = find_next_zero_bit(uc->ram_list.dirty_memory[client], end, page);
return next < end;
return false;
}
static inline bool cpu_physical_memory_get_dirty_flag(struct uc_struct *uc, ram_addr_t addr,
static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
unsigned client)
{
return cpu_physical_memory_get_dirty(uc, addr, 1, client);
return cpu_physical_memory_get_dirty(addr, 1, client);
}
static inline bool cpu_physical_memory_is_clean(struct uc_struct *uc, ram_addr_t addr)
static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
{
return !cpu_physical_memory_get_dirty_flag(uc, addr, DIRTY_MEMORY_CODE);
return true;
}
static inline bool cpu_physical_memory_range_includes_clean(struct uc_struct *uc, ram_addr_t start,
ram_addr_t length)
{
return cpu_physical_memory_get_clean(uc, start, length, DIRTY_MEMORY_CODE);
}
static inline void cpu_physical_memory_set_dirty_flag(struct uc_struct *uc, ram_addr_t addr,
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
{
assert(client < DIRTY_MEMORY_NUM);
set_bit(addr >> TARGET_PAGE_BITS, uc->ram_list.dirty_memory[client]);
}
static inline void cpu_physical_memory_set_dirty_range(struct uc_struct *uc, ram_addr_t start,
ram_addr_t length)
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
unsigned long end, page;
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
qemu_bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
}
#if !defined(_WIN32)
static inline void cpu_physical_memory_set_dirty_lebitmap(struct uc_struct *uc, unsigned long *bitmap,
static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages)
{
unsigned long i, j;
unsigned long page_number, c;
hwaddr addr;
ram_addr_t ram_addr;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
(hpratio == 1)) {
long k;
long nr = BITS_TO_LONGS(pages);
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
}
}
} else {
/*
* bitmap-traveling is faster than memory-traveling (for addr...)
* especially when most of the memory is not dirty.
*/
for (i = 0; i < len; i++) {
if (bitmap[i] != 0) {
c = leul_to_cpu(bitmap[i]);
do {
j = ctzl(c);
c &= ~(1ul << j);
page_number = (i * HOST_LONG_BITS + j) * hpratio;
addr = page_number * TARGET_PAGE_SIZE;
ram_addr = start + addr;
cpu_physical_memory_set_dirty_range(uc, ram_addr,
TARGET_PAGE_SIZE * hpratio);
} while (c != 0);
}
}
}
}
#endif /* not _WIN32 */
static inline void cpu_physical_memory_clear_dirty_range(struct uc_struct *uc, ram_addr_t start,
ram_addr_t length,
unsigned client)
{
unsigned long end, page;
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
qemu_bitmap_clear(uc->ram_list.dirty_memory[client], page, end - page);
static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
ram_addr_t length)
{
}
void cpu_physical_memory_reset_dirty(struct uc_struct *uc,
ram_addr_t start, ram_addr_t length, unsigned client);
#endif
/* Called with RCU critical section */
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
ram_addr_t start,
ram_addr_t length,
uint64_t *real_dirty_pages)
{
return 0;
}
#endif

View File

@ -0,0 +1,25 @@
/*
* Declarations for cpu physical memory functions
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Avi Kivity <avi@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* later. See the COPYING file in the top-level directory.
*
*/
/*
* This header is for use by exec.c and memory.c ONLY. Do not include it.
* The functions declared here will be removed soon.
*/
#ifndef QEMU_EXEC_RAMBLOCK_H
#define QEMU_EXEC_RAMBLOCK_H
#include "cpu-common.h"
#include "qemu.h"
#endif

View File

@ -0,0 +1,19 @@
#ifndef RAMLIST_H
#define RAMLIST_H
#include "qemu/queue.h"
#include "qemu/thread.h"
//#include "qemu/rcu.h"
//#include "qemu/rcu_queue.h"
#define DIRTY_MEMORY_VGA 0
#define DIRTY_MEMORY_CODE 1
#define DIRTY_MEMORY_MIGRATION 2
#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
#define INTERNAL_RAMBLOCK_FOREACH(block) \
QLIST_FOREACH(block, &uc->ram_list.blocks, next)
/* Never use the INTERNAL_ version except for defining other macros */
#define RAMBLOCK_FOREACH(block) INTERNAL_RAMBLOCK_FOREACH(block)
#endif /* RAMLIST_H */

View File

@ -0,0 +1,101 @@
/*
* Helper routines to provide target memory access for semihosting
* syscalls in system emulation mode.
*
* Copyright (c) 2007 CodeSourcery.
*
* This code is licensed under the GPL
*/
#ifndef SOFTMMU_SEMI_H
#define SOFTMMU_SEMI_H
#include "cpu.h"
static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr)
{
uint64_t val;
cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0);
return tswap64(val);
}
static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
{
uint32_t val;
cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0);
return tswap32(val);
}
static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
{
uint8_t val;
cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0);
return val;
}
#define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; })
#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
#define get_user_ual(arg, p) get_user_u32(arg, p)
static inline void softmmu_tput64(CPUArchState *env,
target_ulong addr, uint64_t val)
{
val = tswap64(val);
cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1);
}
static inline void softmmu_tput32(CPUArchState *env,
target_ulong addr, uint32_t val)
{
val = tswap32(val);
cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1);
}
#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; })
#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
#define put_user_ual(arg, p) put_user_u32(arg, p)
static void *softmmu_lock_user(CPUArchState *env,
target_ulong addr, target_ulong len, int copy)
{
uint8_t *p;
/* TODO: Make this something that isn't fixed size. */
p = malloc(len);
if (p && copy) {
cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0);
}
return p;
}
#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
static char *softmmu_lock_user_string(CPUArchState *env, target_ulong addr)
{
char *p;
char *s;
uint8_t c;
/* TODO: Make this something that isn't fixed size. */
s = p = malloc(1024);
if (!s) {
return NULL;
}
do {
cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0);
addr++;
*(p++) = c;
} while (c);
return s;
}
#define lock_user_string(p) softmmu_lock_user_string(env, p)
static void softmmu_unlock_user(CPUArchState *env, void *p, target_ulong addr,
target_ulong len)
{
if (len) {
cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1);
}
free(p);
}
#define unlock_user(s, args, len) softmmu_unlock_user(env, s, args, len)
#endif

View File

@ -0,0 +1,23 @@
/*
* Target page sizes and friends for non target files
*
* Copyright (c) 2017 Red Hat Inc
*
* Authors:
* David Alan Gilbert <dgilbert@redhat.com>
* Juan Quintela <quintela@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef EXEC_TARGET_PAGE_H
#define EXEC_TARGET_PAGE_H
struct uc_struct;
size_t qemu_target_page_size(struct uc_struct *uc);
int qemu_target_page_bits(struct uc_struct *uc);
int qemu_target_page_bits_min(void);
#endif

View File

@ -0,0 +1,39 @@
/*
* Internal structs that QEMU exports to TCG
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef QEMU_TB_CONTEXT_H
#define QEMU_TB_CONTEXT_H
#include "qemu/thread.h"
#include "qemu/qht.h"
#define CODE_GEN_HTABLE_BITS 15
#define CODE_GEN_HTABLE_SIZE (1 << CODE_GEN_HTABLE_BITS)
typedef struct TranslationBlock TranslationBlock;
typedef struct TBContext TBContext;
struct TBContext {
struct qht htable;
/* statistics */
unsigned tb_flush_count;
};
#endif

View File

@ -0,0 +1,57 @@
/*
* internal execution defines for qemu
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef EXEC_TB_HASH_H
#define EXEC_TB_HASH_H
#include "exec/cpu-defs.h"
#include "exec/exec-all.h"
#include "qemu/xxhash.h"
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
static inline unsigned int tb_jmp_cache_hash_page(struct uc_struct *uc, target_ulong pc)
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
}
static inline unsigned int tb_jmp_cache_hash_func(struct uc_struct *uc, target_ulong pc)
{
target_ulong tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK));
}
static inline
uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
uint32_t cf_mask, uint32_t trace_vcpu_dstate)
{
return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
}
#endif

View File

@ -0,0 +1,51 @@
/*
* Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
*
* License: GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef EXEC_TB_LOOKUP_H
#define EXEC_TB_LOOKUP_H
#ifdef NEED_CPU_H
#include "cpu.h"
#else
#include "exec/poison.h"
#endif
#include "exec/exec-all.h"
#include "exec/tb-hash.h"
/* Might cause an exception, so have a longjmp destination ready */
static inline TranslationBlock *
tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
uint32_t *flags, uint32_t cf_mask)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
uint32_t hash;
cpu_get_tb_cpu_state(env, pc, cs_base, flags);
hash = tb_jmp_cache_hash_func(env->uc, *pc);
tb = cpu->tb_jmp_cache[hash];
cf_mask &= ~CF_CLUSTER_MASK;
cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
if (likely(tb &&
tb->pc == *pc &&
tb->cs_base == *cs_base &&
tb->flags == *flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
(tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) {
return tb;
}
tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask);
if (tb == NULL) {
return NULL;
}
cpu->tb_jmp_cache[hash] = tb;
return tb;
}
#endif /* EXEC_TB_LOOKUP_H */

View File

@ -0,0 +1,177 @@
/*
* Generic intermediate code generation.
*
* Copyright (C) 2016-2017 Lluís Vilanova <vilanova@ac.upc.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef EXEC__TRANSLATOR_H
#define EXEC__TRANSLATOR_H
/*
* Include this header from a target-specific file, and add a
*
* DisasContextBase base;
*
* member in your target-specific DisasContext.
*/
#include "qemu/bswap.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "tcg/tcg.h"
/**
* DisasJumpType:
* @DISAS_NEXT: Next instruction in program order.
* @DISAS_TOO_MANY: Too many instructions translated.
* @DISAS_NORETURN: Following code is dead.
* @DISAS_TARGET_*: Start of target-specific conditions.
*
* What instruction to disassemble next.
*/
typedef enum DisasJumpType {
DISAS_NEXT,
DISAS_TOO_MANY,
DISAS_NORETURN,
DISAS_TARGET_0,
DISAS_TARGET_1,
DISAS_TARGET_2,
DISAS_TARGET_3,
DISAS_TARGET_4,
DISAS_TARGET_5,
DISAS_TARGET_6,
DISAS_TARGET_7,
DISAS_TARGET_8,
DISAS_TARGET_9,
DISAS_TARGET_10,
DISAS_TARGET_11,
} DisasJumpType;
/**
* DisasContextBase:
* @tb: Translation block for this disassembly.
* @pc_first: Address of first guest instruction in this TB.
* @pc_next: Address of next guest instruction in this TB (current during
* disassembly).
* @is_jmp: What instruction to disassemble next.
* @num_insns: Number of translated instructions (including current).
* @max_insns: Maximum number of instructions to be translated in this TB.
* @singlestep_enabled: "Hardware" single stepping enabled.
*
* Architecture-agnostic disassembly context.
*/
typedef struct DisasContextBase {
TranslationBlock *tb;
target_ulong pc_first;
target_ulong pc_next;
DisasJumpType is_jmp;
int num_insns;
int max_insns;
bool singlestep_enabled;
} DisasContextBase;
/**
* TranslatorOps:
* @init_disas_context:
* Initialize the target-specific portions of DisasContext struct.
* The generic DisasContextBase has already been initialized.
*
* @tb_start:
* Emit any code required before the start of the main loop,
* after the generic gen_tb_start().
*
* @insn_start:
* Emit the tcg_gen_insn_start opcode.
*
* @breakpoint_check:
* When called, the breakpoint has already been checked to match the PC,
* but the target may decide the breakpoint missed the address
* (e.g., due to conditions encoded in their flags). Return true to
* indicate that the breakpoint did hit, in which case no more breakpoints
* are checked. If the breakpoint did hit, emit any code required to
* signal the exception, and set db->is_jmp as necessary to terminate
* the main loop.
*
* @translate_insn:
* Disassemble one instruction and set db->pc_next for the start
* of the following instruction. Set db->is_jmp as necessary to
* terminate the main loop.
*
* @tb_stop:
* Emit any opcodes required to exit the TB, based on db->is_jmp.
*/
typedef struct TranslatorOps {
void (*init_disas_context)(DisasContextBase *db, CPUState *cpu);
void (*tb_start)(DisasContextBase *db, CPUState *cpu);
void (*insn_start)(DisasContextBase *db, CPUState *cpu);
bool (*breakpoint_check)(DisasContextBase *db, CPUState *cpu,
const CPUBreakpoint *bp);
void (*translate_insn)(DisasContextBase *db, CPUState *cpu);
void (*tb_stop)(DisasContextBase *db, CPUState *cpu);
} TranslatorOps;
/**
* translator_loop:
* @ops: Target-specific operations.
* @db: Disassembly context.
* @cpu: Target vCPU.
* @tb: Translation block.
* @max_insns: Maximum number of insns to translate.
*
* Generic translator loop.
*
* Translation will stop in the following cases (in order):
* - When is_jmp set by #TranslatorOps::breakpoint_check.
* - set to DISAS_TOO_MANY exits after translating one more insn
* - set to any other value than DISAS_NEXT exits immediately.
* - When is_jmp set by #TranslatorOps::translate_insn.
* - set to any value other than DISAS_NEXT exits immediately.
* - When the TCG operation buffer is full.
* - When single-stepping is enabled (system-wide or on the current vCPU).
* - When too many instructions have been translated.
*/
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns);
void translator_loop_temp_check(DisasContextBase *db);
/*
* Translator Load Functions
*
* These are intended to replace the direct usage of the cpu_ld*_code
* functions and are mandatory for front-ends that have been migrated
* to the common translator_loop. These functions are only intended
* to be called from the translation stage and should not be called
* from helper functions. Those functions should be converted to encode
* the relevant information at translation time.
*/
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
static inline type \
fullname ## _swap(TCGContext *tcg_ctx, CPUArchState *env, abi_ptr pc, bool do_swap) \
{ \
type ret = load_fn(env, pc); \
if (do_swap) { \
ret = swap_fn(ret); \
} \
return ret; \
} \
\
static inline type fullname(TCGContext *tcg_ctx, CPUArchState *env, abi_ptr pc) \
{ \
return fullname ## _swap(tcg_ctx, env, pc, false); \
}
GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */)
GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16)
GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16)
GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32)
GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
#undef GEN_TRANSLATOR_LD
#endif /* EXEC__TRANSLATOR_H */