import Unicorn2
This commit is contained in:
@ -1,68 +0,0 @@
|
||||
#ifndef QEMU_AES_H
|
||||
#define QEMU_AES_H
|
||||
|
||||
#define AES_MAXNR 14
|
||||
#define AES_BLOCK_SIZE 16
|
||||
|
||||
struct aes_key_st {
|
||||
uint32_t rd_key[4 *(AES_MAXNR + 1)];
|
||||
int rounds;
|
||||
};
|
||||
typedef struct aes_key_st AES_KEY;
|
||||
|
||||
/* FreeBSD has its own AES_set_decrypt_key in -lcrypto, avoid conflicts */
|
||||
#ifdef __FreeBSD__
|
||||
#define AES_set_encrypt_key QEMU_AES_set_encrypt_key
|
||||
#define AES_set_decrypt_key QEMU_AES_set_decrypt_key
|
||||
#define AES_encrypt QEMU_AES_encrypt
|
||||
#define AES_decrypt QEMU_AES_decrypt
|
||||
#define AES_cbc_encrypt QEMU_AES_cbc_encrypt
|
||||
#endif
|
||||
|
||||
int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
|
||||
AES_KEY *key);
|
||||
int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
|
||||
AES_KEY *key);
|
||||
|
||||
void AES_encrypt(const unsigned char *in, unsigned char *out,
|
||||
const AES_KEY *key);
|
||||
void AES_decrypt(const unsigned char *in, unsigned char *out,
|
||||
const AES_KEY *key);
|
||||
void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
|
||||
const unsigned long length, const AES_KEY *key,
|
||||
unsigned char *ivec, const int enc);
|
||||
|
||||
extern const uint8_t AES_sbox[256];
|
||||
extern const uint8_t AES_isbox[256];
|
||||
|
||||
/* AES ShiftRows and InvShiftRows */
|
||||
extern const uint8_t AES_shifts[16];
|
||||
extern const uint8_t AES_ishifts[16];
|
||||
|
||||
/* AES InvMixColumns */
|
||||
/* AES_imc[x][0] = [x].[0e, 09, 0d, 0b]; */
|
||||
/* AES_imc[x][1] = [x].[0b, 0e, 09, 0d]; */
|
||||
/* AES_imc[x][2] = [x].[0d, 0b, 0e, 09]; */
|
||||
/* AES_imc[x][3] = [x].[09, 0d, 0b, 0e]; */
|
||||
extern const uint32_t AES_imc[256][4];
|
||||
|
||||
/*
|
||||
AES_Te0[x] = S [x].[02, 01, 01, 03];
|
||||
AES_Te1[x] = S [x].[03, 02, 01, 01];
|
||||
AES_Te2[x] = S [x].[01, 03, 02, 01];
|
||||
AES_Te3[x] = S [x].[01, 01, 03, 02];
|
||||
AES_Te4[x] = S [x].[01, 01, 01, 01];
|
||||
|
||||
AES_Td0[x] = Si[x].[0e, 09, 0d, 0b];
|
||||
AES_Td1[x] = Si[x].[0b, 0e, 09, 0d];
|
||||
AES_Td2[x] = Si[x].[0d, 0b, 0e, 09];
|
||||
AES_Td3[x] = Si[x].[09, 0d, 0b, 0e];
|
||||
AES_Td4[x] = Si[x].[01, 01, 01, 01];
|
||||
*/
|
||||
|
||||
extern const uint32_t AES_Te0[256], AES_Te1[256], AES_Te2[256],
|
||||
AES_Te3[256], AES_Te4[256];
|
||||
extern const uint32_t AES_Td0[256], AES_Td1[256], AES_Td2[256],
|
||||
AES_Td3[256], AES_Td4[256];
|
||||
|
||||
#endif
|
@ -8,227 +8,251 @@
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
* See docs/atomics.txt for discussion about the guarantees each
|
||||
* atomic primitive is meant to provide.
|
||||
*/
|
||||
|
||||
#ifndef __QEMU_ATOMIC_H
|
||||
#define __QEMU_ATOMIC_H 1
|
||||
#ifndef QEMU_ATOMIC_H
|
||||
#define QEMU_ATOMIC_H
|
||||
|
||||
#include "qemu/compiler.h"
|
||||
|
||||
// we do not really support multiple CPUs, so we dont care
|
||||
#define smp_mb()
|
||||
#define smp_wmb()
|
||||
#define smp_rmb()
|
||||
#define barrier()
|
||||
|
||||
/* The variable that receives the old value of an atomically-accessed
|
||||
* variable must be non-qualified, because atomic builtins return values
|
||||
* through a pointer-type argument as in __atomic_load(&var, &old, MODEL).
|
||||
*
|
||||
* This macro has to handle types smaller than int manually, because of
|
||||
* implicit promotion. int and larger types, as well as pointers, can be
|
||||
* converted to a non-qualified type just by applying a binary operator.
|
||||
*/
|
||||
#define typeof_strip_qual(expr) \
|
||||
typeof( \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(expr), bool) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const bool) || \
|
||||
__builtin_types_compatible_p(typeof(expr), volatile bool) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const volatile bool), \
|
||||
(bool)1, \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(expr), signed char) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const signed char) || \
|
||||
__builtin_types_compatible_p(typeof(expr), volatile signed char) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const volatile signed char), \
|
||||
(signed char)1, \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(expr), unsigned char) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const unsigned char) || \
|
||||
__builtin_types_compatible_p(typeof(expr), volatile unsigned char) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const volatile unsigned char), \
|
||||
(unsigned char)1, \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(expr), signed short) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const signed short) || \
|
||||
__builtin_types_compatible_p(typeof(expr), volatile signed short) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const volatile signed short), \
|
||||
(signed short)1, \
|
||||
__builtin_choose_expr( \
|
||||
__builtin_types_compatible_p(typeof(expr), unsigned short) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const unsigned short) || \
|
||||
__builtin_types_compatible_p(typeof(expr), volatile unsigned short) || \
|
||||
__builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \
|
||||
(unsigned short)1, \
|
||||
(expr)+0))))))
|
||||
|
||||
#ifdef __ATOMIC_RELAXED
|
||||
/* For C11 atomic ops */
|
||||
|
||||
/* Compiler barrier */
|
||||
#ifdef _MSC_VER
|
||||
void _ReadWriteBarrier(void);
|
||||
#pragma intrinsic(_ReadWriteBarrier)
|
||||
#define barrier() do { _ReadWriteBarrier(); } while (0)
|
||||
#else
|
||||
#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
|
||||
#endif
|
||||
|
||||
#ifndef __ATOMIC_RELAXED
|
||||
|
||||
/*
|
||||
* We use GCC builtin if it's available, as that can use mfence on
|
||||
* 32-bit as well, e.g. if built with -march=pentium-m. However, on
|
||||
* i386 the spec is buggy, and the implementation followed it until
|
||||
* 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
|
||||
/* Sanity check that the size of an atomic operation isn't "overly large".
|
||||
* Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
|
||||
* want to use them because we ought not need them, and this lets us do a
|
||||
* bit of sanity checking that other 32-bit hosts might build.
|
||||
*
|
||||
* That said, we have a problem on 64-bit ILP32 hosts in that in order to
|
||||
* sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
|
||||
* We'd prefer not want to pull in everything else TCG related, so handle
|
||||
* those few cases by hand.
|
||||
*
|
||||
* Note that x32 is fully detected with __x86_64__ + _ILP32, and that for
|
||||
* Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) &
|
||||
* n64 (LP64) ABIs are both detected using __mips64.
|
||||
*/
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
#if !QEMU_GNUC_PREREQ(4, 4)
|
||||
#if defined __x86_64__
|
||||
# ifdef _MSC_VER
|
||||
// TODO: fix me!!!
|
||||
# define smp_mb() //{ __asm volatile("mfence" ::: "memory"); (void)0; }
|
||||
# else
|
||||
# define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
|
||||
# endif
|
||||
#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64)
|
||||
# define ATOMIC_REG_SIZE 8
|
||||
#else
|
||||
# ifdef _MSC_VER
|
||||
// TODO: fix me!!!
|
||||
# define smp_mb() //{ __asm volatile("lock; addl $0,0(%esp) " ::: "memory"); (void)0; }
|
||||
# else
|
||||
# define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
|
||||
# endif
|
||||
#endif
|
||||
#endif
|
||||
# define ATOMIC_REG_SIZE sizeof(void *)
|
||||
#endif
|
||||
|
||||
/* Weak atomic operations prevent the compiler moving other
|
||||
* loads/stores past the atomic operation load/store. However there is
|
||||
* no explicit memory barrier for the processor.
|
||||
*
|
||||
* The C11 memory model says that variables that are accessed from
|
||||
* different threads should at least be done with __ATOMIC_RELAXED
|
||||
* primitives or the result is undefined. Generally this has little to
|
||||
* no effect on the generated code but not using the atomic primitives
|
||||
* will get flagged by sanitizers as a violation.
|
||||
*/
|
||||
#define atomic_read__nocheck(ptr) \
|
||||
__atomic_load_n(ptr, __ATOMIC_RELAXED)
|
||||
|
||||
#ifdef __alpha__
|
||||
#define smp_read_barrier_depends() asm volatile("mb":::"memory")
|
||||
#endif
|
||||
#define atomic_read(ptr) \
|
||||
({ \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
atomic_read__nocheck(ptr); \
|
||||
})
|
||||
|
||||
#define atomic_set__nocheck(ptr, i) \
|
||||
__atomic_store_n(ptr, i, __ATOMIC_RELAXED)
|
||||
|
||||
#define atomic_set(ptr, i) do { \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
atomic_set__nocheck(ptr, i); \
|
||||
} while(0)
|
||||
|
||||
/* All the remaining operations are fully sequentially consistent */
|
||||
|
||||
#define atomic_xchg__nocheck(ptr, i) ({ \
|
||||
__atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
|
||||
})
|
||||
|
||||
#define atomic_xchg(ptr, i) ({ \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
atomic_xchg__nocheck(ptr, i); \
|
||||
})
|
||||
|
||||
/* Returns the eventual value, failed or not */
|
||||
#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
|
||||
typeof_strip_qual(*ptr) _old = (old); \
|
||||
(void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
|
||||
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
|
||||
_old; \
|
||||
})
|
||||
|
||||
#define atomic_cmpxchg(ptr, old, new) ({ \
|
||||
QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
|
||||
atomic_cmpxchg__nocheck(ptr, old, new); \
|
||||
})
|
||||
|
||||
/* Provide shorter names for GCC atomic builtins, return old value */
|
||||
#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
|
||||
|
||||
#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
|
||||
#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
|
||||
|
||||
/* And even shorter names that return void. */
|
||||
#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
|
||||
#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
|
||||
#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
|
||||
#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
|
||||
|
||||
#else /* __ATOMIC_RELAXED */
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
|
||||
|
||||
/*
|
||||
* Because of the strongly ordered storage model, wmb() and rmb() are nops
|
||||
* here (a compiler barrier only). QEMU doesn't do accesses to write-combining
|
||||
* qemu memory or non-temporal load/stores from C code.
|
||||
*/
|
||||
#define smp_wmb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
|
||||
/*
|
||||
* __sync_lock_test_and_set() is documented to be an acquire barrier only,
|
||||
* but it is a full barrier at the hardware level. Add a compiler barrier
|
||||
* to make it a full barrier also at the compiler level.
|
||||
*/
|
||||
#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
|
||||
|
||||
/*
|
||||
* Load/store with Java volatile semantics.
|
||||
*/
|
||||
#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
|
||||
|
||||
#elif defined(_ARCH_PPC)
|
||||
|
||||
/*
|
||||
* We use an eieio() for wmb() on powerpc. This assumes we don't
|
||||
* need to order cacheable and non-cacheable stores with respect to
|
||||
* each other.
|
||||
*
|
||||
* smp_mb has the same problem as on x86 for not-very-new GCC
|
||||
* (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
|
||||
*/
|
||||
#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
|
||||
#if defined(__powerpc64__)
|
||||
#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
|
||||
#else
|
||||
#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
||||
#endif
|
||||
#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
|
||||
|
||||
#endif /* _ARCH_PPC */
|
||||
|
||||
#endif /* C11 atomics */
|
||||
|
||||
/*
|
||||
* For (host) platforms we don't have explicit barrier definitions
|
||||
* for, we use the gcc __sync_synchronize() primitive to generate a
|
||||
* full barrier. This should be safe on all platforms, though it may
|
||||
* be overkill for smp_wmb() and smp_rmb().
|
||||
*/
|
||||
#ifndef smp_mb
|
||||
#define smp_mb() __sync_synchronize()
|
||||
#endif
|
||||
|
||||
#ifndef smp_wmb
|
||||
#ifdef __ATOMIC_RELEASE
|
||||
#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE)
|
||||
#else
|
||||
#define smp_wmb() __sync_synchronize()
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef smp_rmb
|
||||
#ifdef __ATOMIC_ACQUIRE
|
||||
#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE)
|
||||
#else
|
||||
#define smp_rmb() __sync_synchronize()
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef smp_read_barrier_depends
|
||||
#ifdef __ATOMIC_CONSUME
|
||||
#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME)
|
||||
#else
|
||||
#define smp_read_barrier_depends() barrier()
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef atomic_read
|
||||
#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr))
|
||||
#endif
|
||||
|
||||
#ifndef atomic_set
|
||||
#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i))
|
||||
#endif
|
||||
|
||||
/* These have the same semantics as Java volatile variables.
|
||||
* See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
|
||||
* "1. Issue a StoreStore barrier (wmb) before each volatile store."
|
||||
* 2. Issue a StoreLoad barrier after each volatile store.
|
||||
* Note that you could instead issue one before each volatile load, but
|
||||
* this would be slower for typical programs using volatiles in which
|
||||
* reads greatly outnumber writes. Alternatively, if available, you
|
||||
* can implement volatile store as an atomic instruction (for example
|
||||
* XCHG on x86) and omit the barrier. This may be more efficient if
|
||||
* atomic instructions are cheaper than StoreLoad barriers.
|
||||
* 3. Issue LoadLoad and LoadStore barriers after each volatile load."
|
||||
*
|
||||
* If you prefer to think in terms of "pairing" of memory barriers,
|
||||
* an atomic_mb_read pairs with an atomic_mb_set.
|
||||
*
|
||||
* And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq,
|
||||
* while an atomic_mb_set is a st.rel followed by a memory barrier.
|
||||
*
|
||||
* These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST
|
||||
* (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
|
||||
* Just always use the barriers manually by the rules above.
|
||||
*/
|
||||
#ifndef atomic_mb_read
|
||||
#define atomic_mb_read(ptr) ({ \
|
||||
typeof(*ptr) _val = atomic_read(ptr); \
|
||||
smp_rmb(); \
|
||||
_val; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifndef atomic_mb_set
|
||||
#define atomic_mb_set(ptr, i) do { \
|
||||
smp_wmb(); \
|
||||
atomic_set(ptr, i); \
|
||||
smp_mb(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef atomic_xchg
|
||||
#ifndef _MSC_VER
|
||||
#if defined(__clang__)
|
||||
#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
|
||||
#elif defined(__ATOMIC_SEQ_CST)
|
||||
#define atomic_xchg(ptr, i) ({ \
|
||||
typeof(*ptr) _new = (i), _old; \
|
||||
__atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
|
||||
_old; \
|
||||
})
|
||||
#else
|
||||
/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
|
||||
#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
|
||||
#define atomic_xchg(ptr, i) (__sync_lock_test_and_set(ptr, i))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/* These will only be atomic if the processor does the fetch or store
|
||||
* in a single issue memory operation
|
||||
*/
|
||||
#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
|
||||
#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
|
||||
|
||||
#define atomic_read(ptr) atomic_read__nocheck(ptr)
|
||||
#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
|
||||
|
||||
#define atomic_xchg__nocheck atomic_xchg
|
||||
|
||||
/* Provide shorter names for GCC atomic builtins. */
|
||||
#ifdef _MSC_VER
|
||||
// these return the new value (so we make it return the previous value)
|
||||
#define atomic_fetch_inc(ptr) ((InterlockedIncrement(ptr))-1)
|
||||
#define atomic_fetch_dec(ptr) ((InterlockedDecrement(ptr))+1)
|
||||
#define atomic_fetch_add(ptr, n) ((InterlockedAdd(ptr, n))-n)
|
||||
#define atomic_fetch_sub(ptr, n) ((InterlockedAdd(ptr, -n))+n)
|
||||
#else
|
||||
// these return the previous value
|
||||
#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
|
||||
#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
|
||||
#define atomic_fetch_add __sync_fetch_and_add
|
||||
#define atomic_fetch_sub __sync_fetch_and_sub
|
||||
#define atomic_fetch_and __sync_fetch_and_and
|
||||
#define atomic_fetch_or __sync_fetch_and_or
|
||||
#define atomic_cmpxchg __sync_val_compare_and_swap
|
||||
#endif
|
||||
#define atomic_fetch_inc(ptr) ((InterlockedIncrement(ptr))-1)
|
||||
#define atomic_fetch_dec(ptr) ((InterlockedDecrement(ptr))+1)
|
||||
#define atomic_fetch_add(ptr, n) ((InterlockedAdd(ptr, n))-n)
|
||||
#define atomic_fetch_sub(ptr, n) ((InterlockedAdd(ptr, -n))+n)
|
||||
#define atomic_fetch_and(ptr, n) ((InterlockedAnd(ptr, n)))
|
||||
#define atomic_fetch_or(ptr, n) ((InterlockedOr(ptr, n)))
|
||||
#define atomic_fetch_xor(ptr, n) ((InterlockedXor(ptr, n)))
|
||||
|
||||
#define atomic_inc_fetch(ptr) (InterlockedIncrement((long*)(ptr)))
|
||||
#define atomic_dec_fetch(ptr) (InterlockedDecrement((long*)(ptr)))
|
||||
#define atomic_add_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) + n)
|
||||
#define atomic_sub_fetch(ptr, n) (InterlockedExchangeAdd((long*)ptr, n) - n)
|
||||
#define atomic_and_fetch(ptr, n) (InterlockedAnd((long*)ptr, n) & n)
|
||||
#define atomic_or_fetch(ptr, n) (InterlockedOr((long*)ptr, n) | n)
|
||||
#define atomic_xor_fetch(ptr, n) (InterlockedXor((long*)ptr, n) ^ n)
|
||||
|
||||
#define atomic_cmpxchg(ptr, old, new) ((InterlockedCompareExchange(ptr, old, new)))
|
||||
#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
|
||||
|
||||
/* And even shorter names that return void. */
|
||||
#ifdef _MSC_VER
|
||||
#define atomic_inc(ptr) ((void) InterlockedIncrement(ptr))
|
||||
#define atomic_dec(ptr) ((void) InterlockedDecrement(ptr))
|
||||
#define atomic_add(ptr, n) ((void) InterlockedAdd(ptr, n))
|
||||
#define atomic_sub(ptr, n) ((void) InterlockedAdd(ptr, -n))
|
||||
#else
|
||||
#define atomic_and(ptr, n) ((void) InterlockedAnd(ptr, n))
|
||||
#define atomic_or(ptr, n) ((void) InterlockedOr(ptr, n))
|
||||
#define atomic_xor(ptr, n) ((void) InterlockedXor(ptr, n))
|
||||
#else // GCC/clang
|
||||
// these return the previous value
|
||||
#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
|
||||
#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
|
||||
#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
|
||||
#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
|
||||
#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
|
||||
#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
|
||||
#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
|
||||
|
||||
#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
|
||||
#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
|
||||
#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
|
||||
#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
|
||||
#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
|
||||
#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
|
||||
#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
|
||||
|
||||
#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
|
||||
#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
|
||||
|
||||
#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
|
||||
#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
|
||||
#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
|
||||
#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
|
||||
#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
|
||||
#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
|
||||
#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif /* __ATOMIC_RELAXED */
|
||||
|
||||
#endif /* QEMU_ATOMIC_H */
|
||||
|
175
qemu/include/qemu/atomic128.h
Normal file
175
qemu/include/qemu/atomic128.h
Normal file
@ -0,0 +1,175 @@
|
||||
/*
|
||||
* Simple interface for 128-bit atomic operations.
|
||||
*
|
||||
* Copyright (C) 2018 Linaro, Ltd.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
* See docs/devel/atomics.txt for discussion about the guarantees each
|
||||
* atomic primitive is meant to provide.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_ATOMIC128_H
|
||||
#define QEMU_ATOMIC128_H
|
||||
|
||||
#include "int128.h"
|
||||
|
||||
/*
|
||||
* GCC is a house divided about supporting large atomic operations.
|
||||
*
|
||||
* For hosts that only have large compare-and-swap, a legalistic reading
|
||||
* of the C++ standard means that one cannot implement __atomic_read on
|
||||
* read-only memory, and thus all atomic operations must synchronize
|
||||
* through libatomic.
|
||||
*
|
||||
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
|
||||
*
|
||||
* This interpretation is not especially helpful for QEMU.
|
||||
* For softmmu, all RAM is always read/write from the hypervisor.
|
||||
* For user-only, if the guest doesn't implement such an __atomic_read
|
||||
* then the host need not worry about it either.
|
||||
*
|
||||
* Moreover, using libatomic is not an option, because its interface is
|
||||
* built for std::atomic<T>, and requires that *all* accesses to such an
|
||||
* object go through the library. In our case we do not have an object
|
||||
* in the C/C++ sense, but a view of memory as seen by the guest.
|
||||
* The guest may issue a large atomic operation and then access those
|
||||
* pieces using word-sized accesses. From the hypervisor, we have no
|
||||
* way to connect those two actions.
|
||||
*
|
||||
* Therefore, special case each platform.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
return atomic_cmpxchg__nocheck(ptr, cmp, new);
|
||||
}
|
||||
# define HAVE_CMPXCHG128 1
|
||||
#elif defined(CONFIG_CMPXCHG128)
|
||||
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
/* compare and swap. the same as __sync_val_compare_and_swap().
|
||||
if the current value of *ptr is cmp, then write new into *ptr,
|
||||
return *ptr old value. */
|
||||
Int128 save = *ptr;
|
||||
if (!memcmp(ptr, &cmp, sizeof(cmp))) {
|
||||
*ptr = new;
|
||||
}
|
||||
return save;
|
||||
#else
|
||||
return __sync_val_compare_and_swap_16(ptr, cmp, new);
|
||||
#endif
|
||||
}
|
||||
# define HAVE_CMPXCHG128 1
|
||||
#elif defined(__aarch64__)
|
||||
/* Through gcc 8, aarch64 has no support for 128-bit at all. */
|
||||
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
|
||||
uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
|
||||
uint64_t oldl, oldh;
|
||||
uint32_t tmp;
|
||||
|
||||
asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t"
|
||||
"cmp %[oldl], %[cmpl]\n\t"
|
||||
"ccmp %[oldh], %[cmph], #0, eq\n\t"
|
||||
"b.ne 1f\n\t"
|
||||
"stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t"
|
||||
"cbnz %w[tmp], 0b\n"
|
||||
"1:"
|
||||
: [mem] "+m"(*ptr), [tmp] "=&r"(tmp),
|
||||
[oldl] "=&r"(oldl), [oldh] "=&r"(oldh)
|
||||
: [cmpl] "r"(cmpl), [cmph] "r"(cmph),
|
||||
[newl] "r"(newl), [newh] "r"(newh)
|
||||
: "memory", "cc");
|
||||
|
||||
return int128_make128(oldl, oldh);
|
||||
}
|
||||
# define HAVE_CMPXCHG128 1
|
||||
#else
|
||||
/* Fallback definition that must be optimized away, or error. */
|
||||
Int128 QEMU_ERROR("unsupported atomic")
|
||||
atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
|
||||
# define HAVE_CMPXCHG128 0
|
||||
#endif /* Some definition for HAVE_CMPXCHG128 */
|
||||
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
static inline Int128 atomic16_read(Int128 *ptr)
|
||||
{
|
||||
return atomic_read__nocheck(ptr);
|
||||
}
|
||||
|
||||
static inline void atomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
atomic_set__nocheck(ptr, val);
|
||||
}
|
||||
|
||||
# define HAVE_ATOMIC128 1
|
||||
#elif defined(__aarch64__)
|
||||
/* We can do better than cmpxchg for AArch64. */
|
||||
static inline Int128 atomic16_read(Int128 *ptr)
|
||||
{
|
||||
uint64_t l, h;
|
||||
uint32_t tmp;
|
||||
|
||||
/* The load must be paired with the store to guarantee not tearing. */
|
||||
asm("0: ldxp %[l], %[h], %[mem]\n\t"
|
||||
"stxp %w[tmp], %[l], %[h], %[mem]\n\t"
|
||||
"cbnz %w[tmp], 0b"
|
||||
: [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h));
|
||||
|
||||
return int128_make128(l, h);
|
||||
}
|
||||
|
||||
static inline void atomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
uint64_t l = int128_getlo(val), h = int128_gethi(val);
|
||||
uint64_t t1, t2;
|
||||
|
||||
/* Load into temporaries to acquire the exclusive access lock. */
|
||||
asm("0: ldxp %[t1], %[t2], %[mem]\n\t"
|
||||
"stxp %w[t1], %[l], %[h], %[mem]\n\t"
|
||||
"cbnz %w[t1], 0b"
|
||||
: [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2)
|
||||
: [l] "r"(l), [h] "r"(h));
|
||||
}
|
||||
|
||||
# define HAVE_ATOMIC128 1
|
||||
#elif HAVE_CMPXCHG128
|
||||
static inline Int128 atomic16_read(Int128 *ptr)
|
||||
{
|
||||
/* Maybe replace 0 with 0, returning the old value. */
|
||||
#ifdef _MSC_VER
|
||||
Int128 x = int128_make64(0);
|
||||
Int128 y = int128_make64(0);
|
||||
return atomic16_cmpxchg(ptr, x, y);
|
||||
#else
|
||||
return atomic16_cmpxchg(ptr, 0, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void atomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
Int128 old = *ptr, cmp;
|
||||
do {
|
||||
cmp = old;
|
||||
old = atomic16_cmpxchg(ptr, cmp, val);
|
||||
#ifdef _MSC_VER
|
||||
} while (memcmp(&old, &cmp, sizeof(old)));
|
||||
#else
|
||||
} while (old != cmp);
|
||||
#endif
|
||||
}
|
||||
|
||||
# define HAVE_ATOMIC128 1
|
||||
#else
|
||||
/* Fallback definitions that must be optimized away, or error. */
|
||||
Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr);
|
||||
void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val);
|
||||
# define HAVE_ATOMIC128 0
|
||||
#endif /* Some definition for HAVE_ATOMIC128 */
|
||||
|
||||
#endif /* QEMU_ATOMIC128_H */
|
@ -12,11 +12,7 @@
|
||||
#ifndef BITMAP_H
|
||||
#define BITMAP_H
|
||||
|
||||
#include "glib_compat.h"
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/bitops.h"
|
||||
|
||||
/*
|
||||
@ -26,8 +22,29 @@
|
||||
* Note that nbits should be always a compile time evaluable constant.
|
||||
* Otherwise many inlines will generate horrible code.
|
||||
*
|
||||
* bitmap_zero(dst, nbits) *dst = 0UL
|
||||
* bitmap_fill(dst, nbits) *dst = ~0UL
|
||||
* bitmap_copy(dst, src, nbits) *dst = *src
|
||||
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
|
||||
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
|
||||
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
|
||||
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
|
||||
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
|
||||
* bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
|
||||
* bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
|
||||
* bitmap_empty(src, nbits) Are all bits zero in *src?
|
||||
* bitmap_full(src, nbits) Are all bits set in *src?
|
||||
* qemu_bitmap_set(dst, pos, nbits) Set specified bit area
|
||||
* bitmap_set_atomic(dst, pos, nbits) Set specified bit area with atomic ops
|
||||
* qemu_bitmap_clear(dst, pos, nbits) Clear specified bit area
|
||||
* bitmap_test_and_clear_atomic(dst, pos, nbits) Test and clear area
|
||||
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
|
||||
* bitmap_to_le(dst, src, nbits) Convert bitmap to little endian
|
||||
* bitmap_from_le(dst, src, nbits) Convert bitmap from little endian
|
||||
* bitmap_copy_with_src_offset(dst, src, offset, nbits)
|
||||
* *dst = *src (with an offset into src)
|
||||
* bitmap_copy_with_dst_offset(dst, src, offset, nbits)
|
||||
* *dst = *src (with an offset into dst)
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -35,19 +52,214 @@
|
||||
*
|
||||
* set_bit(bit, addr) *addr |= bit
|
||||
* clear_bit(bit, addr) *addr &= ~bit
|
||||
* change_bit(bit, addr) *addr ^= bit
|
||||
* test_bit(bit, addr) Is bit set in *addr?
|
||||
* test_and_set_bit(bit, addr) Set bit and return old value
|
||||
* test_and_clear_bit(bit, addr) Clear bit and return old value
|
||||
* test_and_change_bit(bit, addr) Change bit and return old value
|
||||
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr
|
||||
* find_first_bit(addr, nbits) Position first set bit in *addr
|
||||
* find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
|
||||
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
|
||||
*/
|
||||
|
||||
#define BITMAP_LAST_WORD_MASK(nbits) \
|
||||
( \
|
||||
((nbits) % BITS_PER_LONG) ? \
|
||||
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
|
||||
)
|
||||
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
|
||||
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
|
||||
|
||||
#define DECLARE_BITMAP(name,bits) \
|
||||
unsigned long name[BITS_TO_LONGS(bits)]
|
||||
|
||||
#define small_nbits(nbits) \
|
||||
((nbits) <= BITS_PER_LONG)
|
||||
|
||||
int slow_bitmap_empty(const unsigned long *bitmap, long bits);
|
||||
int slow_bitmap_full(const unsigned long *bitmap, long bits);
|
||||
int slow_bitmap_equal(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits);
|
||||
void slow_bitmap_complement(unsigned long *dst, const unsigned long *src,
|
||||
long bits);
|
||||
int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits);
|
||||
void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits);
|
||||
void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits);
|
||||
int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits);
|
||||
int slow_bitmap_intersects(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits);
|
||||
long slow_bitmap_count_one(const unsigned long *bitmap, long nbits);
|
||||
|
||||
static inline unsigned long *bitmap_try_new(long nbits)
|
||||
{
|
||||
long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
|
||||
return g_try_malloc0(len);
|
||||
}
|
||||
|
||||
static inline unsigned long *bitmap_new(long nbits)
|
||||
{
|
||||
unsigned long *ptr = bitmap_try_new(nbits);
|
||||
if (ptr == NULL) {
|
||||
abort();
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline void bitmap_zero(unsigned long *dst, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
*dst = 0UL;
|
||||
} else {
|
||||
long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
|
||||
memset(dst, 0, len);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bitmap_fill(unsigned long *dst, long nbits)
|
||||
{
|
||||
size_t nlongs = BITS_TO_LONGS(nbits);
|
||||
if (!small_nbits(nbits)) {
|
||||
long len = (nlongs - 1) * sizeof(unsigned long);
|
||||
memset(dst, 0xff, len);
|
||||
}
|
||||
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
|
||||
long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
*dst = *src;
|
||||
} else {
|
||||
long len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
return (*dst = *src1 & *src2) != 0;
|
||||
}
|
||||
return slow_bitmap_and(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
*dst = *src1 | *src2;
|
||||
} else {
|
||||
slow_bitmap_or(dst, src1, src2, nbits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
*dst = *src1 ^ *src2;
|
||||
} else {
|
||||
slow_bitmap_xor(dst, src1, src2, nbits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
return (*dst = *src1 & ~(*src2)) != 0;
|
||||
}
|
||||
return slow_bitmap_andnot(dst, src1, src2, nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_complement(unsigned long *dst,
|
||||
const unsigned long *src,
|
||||
long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
|
||||
} else {
|
||||
slow_bitmap_complement(dst, src, nbits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bitmap_equal(const unsigned long *src1,
|
||||
const unsigned long *src2, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
} else {
|
||||
return slow_bitmap_equal(src1, src2, nbits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bitmap_empty(const unsigned long *src, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
} else {
|
||||
return slow_bitmap_empty(src, nbits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bitmap_full(const unsigned long *src, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
|
||||
} else {
|
||||
return slow_bitmap_full(src, nbits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bitmap_intersects(const unsigned long *src1,
|
||||
const unsigned long *src2, long nbits)
|
||||
{
|
||||
if (small_nbits(nbits)) {
|
||||
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
|
||||
} else {
|
||||
return slow_bitmap_intersects(src1, src2, nbits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline long bitmap_count_one(const unsigned long *bitmap, long nbits)
|
||||
{
|
||||
if (!nbits) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (small_nbits(nbits)) {
|
||||
return ctpopl(*bitmap & BITMAP_LAST_WORD_MASK(nbits));
|
||||
} else {
|
||||
return slow_bitmap_count_one(bitmap, nbits);
|
||||
}
|
||||
}
|
||||
|
||||
static inline long bitmap_count_one_with_offset(const unsigned long *bitmap,
|
||||
long offset, long nbits)
|
||||
{
|
||||
long aligned_offset = QEMU_ALIGN_DOWN(offset, BITS_PER_LONG);
|
||||
long redundant_bits = offset - aligned_offset;
|
||||
long bits_to_count = nbits + redundant_bits;
|
||||
const unsigned long *bitmap_start = bitmap +
|
||||
aligned_offset / BITS_PER_LONG;
|
||||
|
||||
return bitmap_count_one(bitmap_start, bits_to_count) -
|
||||
bitmap_count_one(bitmap_start, redundant_bits);
|
||||
}
|
||||
|
||||
void qemu_bitmap_set(unsigned long *map, long i, long len);
|
||||
void bitmap_set_atomic(unsigned long *map, long i, long len);
|
||||
void qemu_bitmap_clear(unsigned long *map, long start, long nr);
|
||||
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
|
||||
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
|
||||
long nr);
|
||||
unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
unsigned long size,
|
||||
unsigned long start,
|
||||
unsigned long nr,
|
||||
unsigned long align_mask);
|
||||
|
||||
static inline unsigned long *bitmap_zero_extend(unsigned long *old,
|
||||
long old_nbits, long new_nbits)
|
||||
@ -58,4 +270,14 @@ static inline unsigned long *bitmap_zero_extend(unsigned long *old,
|
||||
return new;
|
||||
}
|
||||
|
||||
void bitmap_to_le(unsigned long *dst, const unsigned long *src,
|
||||
long nbits);
|
||||
void bitmap_from_le(unsigned long *dst, const unsigned long *src,
|
||||
long nbits);
|
||||
|
||||
void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src,
|
||||
unsigned long offset, unsigned long nbits);
|
||||
void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src,
|
||||
unsigned long shift, unsigned long nbits);
|
||||
|
||||
#endif /* BITMAP_H */
|
||||
|
@ -12,18 +12,21 @@
|
||||
#ifndef BITOPS_H
|
||||
#define BITOPS_H
|
||||
|
||||
#include "unicorn/platform.h"
|
||||
#include <assert.h>
|
||||
|
||||
#include "host-utils.h"
|
||||
#include "atomic.h"
|
||||
|
||||
#define BITS_PER_BYTE CHAR_BIT
|
||||
#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE)
|
||||
|
||||
#define BIT(nr) (1UL << (nr))
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
|
||||
#define BIT(nr) (1UL << (nr))
|
||||
#define BIT_ULL(nr) (1ULL << (nr))
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
|
||||
|
||||
#define MAKE_64BIT_MASK(shift, length) \
|
||||
(((~0ULL) >> (64 - (length))) << (shift))
|
||||
|
||||
/**
|
||||
* set_bit - Set a bit in memory
|
||||
@ -33,7 +36,7 @@
|
||||
static inline void set_bit(long nr, unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
|
||||
*p |= mask;
|
||||
}
|
||||
@ -46,7 +49,7 @@ static inline void set_bit(long nr, unsigned long *addr)
|
||||
static inline void clear_bit(long nr, unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
|
||||
*p &= ~mask;
|
||||
}
|
||||
@ -59,7 +62,7 @@ static inline void clear_bit(long nr, unsigned long *addr)
|
||||
static inline void change_bit(long nr, unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
|
||||
*p ^= mask;
|
||||
}
|
||||
@ -72,7 +75,7 @@ static inline void change_bit(long nr, unsigned long *addr)
|
||||
static inline int test_and_set_bit(long nr, unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old | mask;
|
||||
@ -87,7 +90,7 @@ static inline int test_and_set_bit(long nr, unsigned long *addr)
|
||||
static inline int test_and_clear_bit(long nr, unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old & ~mask;
|
||||
@ -102,7 +105,7 @@ static inline int test_and_clear_bit(long nr, unsigned long *addr)
|
||||
static inline int test_and_change_bit(long nr, unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long *p = addr + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old ^ mask;
|
||||
@ -119,6 +122,16 @@ static inline int test_bit(long nr, const unsigned long *addr)
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
||||
/**
|
||||
* find_last_bit - find the last set bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
* @size: The maximum size to search
|
||||
*
|
||||
* Returns the bit number of the first set bit, or size.
|
||||
*/
|
||||
unsigned long find_last_bit(const unsigned long *addr,
|
||||
unsigned long size);
|
||||
|
||||
/**
|
||||
* find_next_bit - find the next set bit in a memory region
|
||||
* @addr: The address to base the search on
|
||||
@ -126,7 +139,8 @@ static inline int test_bit(long nr, const unsigned long *addr)
|
||||
* @size: The bitmap size in bits
|
||||
*/
|
||||
unsigned long find_next_bit(const unsigned long *addr,
|
||||
unsigned long size, unsigned long offset);
|
||||
unsigned long size,
|
||||
unsigned long offset);
|
||||
|
||||
/**
|
||||
* find_next_zero_bit - find the next cleared bit in a memory region
|
||||
@ -175,16 +189,6 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr,
|
||||
return find_next_zero_bit(addr, size, 0);
|
||||
}
|
||||
|
||||
static inline unsigned long hweight_long(unsigned long w)
|
||||
{
|
||||
unsigned long count;
|
||||
|
||||
for (count = 0; w; w >>= 1) {
|
||||
count += w & 1;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* rol8 - rotate an 8-bit value left
|
||||
* @word: value to rotate
|
||||
@ -192,7 +196,7 @@ static inline unsigned long hweight_long(unsigned long w)
|
||||
*/
|
||||
static inline uint8_t rol8(uint8_t word, unsigned int shift)
|
||||
{
|
||||
return (word << shift) | (word >> (8 - shift));
|
||||
return (word << shift) | (word >> ((8 - shift) & 7));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -202,7 +206,7 @@ static inline uint8_t rol8(uint8_t word, unsigned int shift)
|
||||
*/
|
||||
static inline uint8_t ror8(uint8_t word, unsigned int shift)
|
||||
{
|
||||
return (word >> shift) | (word << (8 - shift));
|
||||
return (word >> shift) | (word << ((8 - shift) & 7));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -212,7 +216,7 @@ static inline uint8_t ror8(uint8_t word, unsigned int shift)
|
||||
*/
|
||||
static inline uint16_t rol16(uint16_t word, unsigned int shift)
|
||||
{
|
||||
return (word << shift) | (word >> (16 - shift));
|
||||
return (word << shift) | (word >> ((16 - shift) & 15));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -222,7 +226,7 @@ static inline uint16_t rol16(uint16_t word, unsigned int shift)
|
||||
*/
|
||||
static inline uint16_t ror16(uint16_t word, unsigned int shift)
|
||||
{
|
||||
return (word >> shift) | (word << (16 - shift));
|
||||
return (word >> shift) | (word << ((16 - shift) & 15));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -232,7 +236,7 @@ static inline uint16_t ror16(uint16_t word, unsigned int shift)
|
||||
*/
|
||||
static inline uint32_t rol32(uint32_t word, unsigned int shift)
|
||||
{
|
||||
return (word << shift) | (word >> (32 - shift));
|
||||
return (word << shift) | (word >> ((32 - shift) & 31));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -242,7 +246,7 @@ static inline uint32_t rol32(uint32_t word, unsigned int shift)
|
||||
*/
|
||||
static inline uint32_t ror32(uint32_t word, unsigned int shift)
|
||||
{
|
||||
return (word >> shift) | (word << ((32 - shift) & 0x1f));
|
||||
return (word >> shift) | (word << ((32 - shift) & 31));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -252,7 +256,7 @@ static inline uint32_t ror32(uint32_t word, unsigned int shift)
|
||||
*/
|
||||
static inline uint64_t rol64(uint64_t word, unsigned int shift)
|
||||
{
|
||||
return (word << shift) | (word >> (64 - shift));
|
||||
return (word << shift) | (word >> ((64 - shift) & 63));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -262,7 +266,7 @@ static inline uint64_t rol64(uint64_t word, unsigned int shift)
|
||||
*/
|
||||
static inline uint64_t ror64(uint64_t word, unsigned int shift)
|
||||
{
|
||||
return (word >> shift) | (word << (64 - shift));
|
||||
return (word >> shift) | (word << ((64 - shift) & 63));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -284,6 +288,44 @@ static inline uint32_t extract32(uint32_t value, int start, int length)
|
||||
return (value >> start) & (~0U >> (32 - length));
|
||||
}
|
||||
|
||||
/**
|
||||
* extract8:
|
||||
* @value: the value to extract the bit field from
|
||||
* @start: the lowest bit in the bit field (numbered from 0)
|
||||
* @length: the length of the bit field
|
||||
*
|
||||
* Extract from the 8 bit input @value the bit field specified by the
|
||||
* @start and @length parameters, and return it. The bit field must
|
||||
* lie entirely within the 8 bit word. It is valid to request that
|
||||
* all 8 bits are returned (ie @length 8 and @start 0).
|
||||
*
|
||||
* Returns: the value of the bit field extracted from the input value.
|
||||
*/
|
||||
static inline uint8_t extract8(uint8_t value, int start, int length)
|
||||
{
|
||||
assert(start >= 0 && length > 0 && length <= 8 - start);
|
||||
return extract32(value, start, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* extract16:
|
||||
* @value: the value to extract the bit field from
|
||||
* @start: the lowest bit in the bit field (numbered from 0)
|
||||
* @length: the length of the bit field
|
||||
*
|
||||
* Extract from the 16 bit input @value the bit field specified by the
|
||||
* @start and @length parameters, and return it. The bit field must
|
||||
* lie entirely within the 16 bit word. It is valid to request that
|
||||
* all 16 bits are returned (ie @length 16 and @start 0).
|
||||
*
|
||||
* Returns: the value of the bit field extracted from the input value.
|
||||
*/
|
||||
static inline uint16_t extract16(uint16_t value, int start, int length)
|
||||
{
|
||||
assert(start >= 0 && length > 0 && length <= 16 - start);
|
||||
return extract32(value, start, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* extract64:
|
||||
* @value: the value to extract the bit field from
|
||||
@ -344,7 +386,7 @@ static inline int32_t sextract32(uint32_t value, int start, int length)
|
||||
* Returns: the sign extended value of the bit field extracted from the
|
||||
* input value.
|
||||
*/
|
||||
static inline uint64_t sextract64(uint64_t value, int start, int length)
|
||||
static inline int64_t sextract64(uint64_t value, int start, int length)
|
||||
{
|
||||
assert(start >= 0 && length > 0 && length <= 64 - start);
|
||||
/* Note that this implementation relies on right shift of signed
|
||||
@ -405,4 +447,124 @@ static inline uint64_t deposit64(uint64_t value, int start, int length,
|
||||
return (value & ~mask) | ((fieldval << start) & mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* half_shuffle32:
|
||||
* @x: 32-bit value (of which only the bottom 16 bits are of interest)
|
||||
*
|
||||
* Given an input value::
|
||||
*
|
||||
* xxxx xxxx xxxx xxxx ABCD EFGH IJKL MNOP
|
||||
*
|
||||
* return the value where the bottom 16 bits are spread out into
|
||||
* the odd bits in the word, and the even bits are zeroed::
|
||||
*
|
||||
* 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N 0O0P
|
||||
*
|
||||
* Any bits set in the top half of the input are ignored.
|
||||
*
|
||||
* Returns: the shuffled bits.
|
||||
*/
|
||||
static inline uint32_t half_shuffle32(uint32_t x)
|
||||
{
|
||||
/* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
|
||||
* It ignores any bits set in the top half of the input.
|
||||
*/
|
||||
x = ((x & 0xFF00) << 8) | (x & 0x00FF);
|
||||
x = ((x << 4) | x) & 0x0F0F0F0F;
|
||||
x = ((x << 2) | x) & 0x33333333;
|
||||
x = ((x << 1) | x) & 0x55555555;
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* half_shuffle64:
|
||||
* @x: 64-bit value (of which only the bottom 32 bits are of interest)
|
||||
*
|
||||
* Given an input value::
|
||||
*
|
||||
* xxxx xxxx xxxx .... xxxx xxxx ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
|
||||
*
|
||||
* return the value where the bottom 32 bits are spread out into
|
||||
* the odd bits in the word, and the even bits are zeroed::
|
||||
*
|
||||
* 0A0B 0C0D 0E0F 0G0H 0I0J 0K0L 0M0N .... 0U0V 0W0X 0Y0Z 0a0b 0c0d 0e0f
|
||||
*
|
||||
* Any bits set in the top half of the input are ignored.
|
||||
*
|
||||
* Returns: the shuffled bits.
|
||||
*/
|
||||
static inline uint64_t half_shuffle64(uint64_t x)
|
||||
{
|
||||
/* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
|
||||
* It ignores any bits set in the top half of the input.
|
||||
*/
|
||||
x = ((x & 0xFFFF0000ULL) << 16) | (x & 0xFFFF);
|
||||
x = ((x << 8) | x) & 0x00FF00FF00FF00FFULL;
|
||||
x = ((x << 4) | x) & 0x0F0F0F0F0F0F0F0FULL;
|
||||
x = ((x << 2) | x) & 0x3333333333333333ULL;
|
||||
x = ((x << 1) | x) & 0x5555555555555555ULL;
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* half_unshuffle32:
|
||||
* @x: 32-bit value (of which only the odd bits are of interest)
|
||||
*
|
||||
* Given an input value::
|
||||
*
|
||||
* xAxB xCxD xExF xGxH xIxJ xKxL xMxN xOxP
|
||||
*
|
||||
* return the value where all the odd bits are compressed down
|
||||
* into the low half of the word, and the high half is zeroed::
|
||||
*
|
||||
* 0000 0000 0000 0000 ABCD EFGH IJKL MNOP
|
||||
*
|
||||
* Any even bits set in the input are ignored.
|
||||
*
|
||||
* Returns: the unshuffled bits.
|
||||
*/
|
||||
static inline uint32_t half_unshuffle32(uint32_t x)
|
||||
{
|
||||
/* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
|
||||
* where it is called an inverse half shuffle.
|
||||
*/
|
||||
x &= 0x55555555;
|
||||
x = ((x >> 1) | x) & 0x33333333;
|
||||
x = ((x >> 2) | x) & 0x0F0F0F0F;
|
||||
x = ((x >> 4) | x) & 0x00FF00FF;
|
||||
x = ((x >> 8) | x) & 0x0000FFFF;
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* half_unshuffle64:
|
||||
* @x: 64-bit value (of which only the odd bits are of interest)
|
||||
*
|
||||
* Given an input value::
|
||||
*
|
||||
* xAxB xCxD xExF xGxH xIxJ xKxL xMxN .... xUxV xWxX xYxZ xaxb xcxd xexf
|
||||
*
|
||||
* return the value where all the odd bits are compressed down
|
||||
* into the low half of the word, and the high half is zeroed::
|
||||
*
|
||||
* 0000 0000 0000 .... 0000 0000 ABCD EFGH IJKL MNOP QRST UVWX YZab cdef
|
||||
*
|
||||
* Any even bits set in the input are ignored.
|
||||
*
|
||||
* Returns: the unshuffled bits.
|
||||
*/
|
||||
static inline uint64_t half_unshuffle64(uint64_t x)
|
||||
{
|
||||
/* This algorithm is from _Hacker's Delight_ section 7-2 "Shuffling Bits".
|
||||
* where it is called an inverse half shuffle.
|
||||
*/
|
||||
x &= 0x5555555555555555ULL;
|
||||
x = ((x >> 1) | x) & 0x3333333333333333ULL;
|
||||
x = ((x >> 2) | x) & 0x0F0F0F0F0F0F0F0FULL;
|
||||
x = ((x >> 4) | x) & 0x00FF00FF00FF00FFULL;
|
||||
x = ((x >> 8) | x) & 0x0000FFFF0000FFFFULL;
|
||||
x = ((x >> 16) | x) & 0x00000000FFFFFFFFULL;
|
||||
return x;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,15 +1,11 @@
|
||||
#ifndef BSWAP_H
|
||||
#define BSWAP_H
|
||||
|
||||
#include "config-host.h"
|
||||
#include "unicorn/platform.h"
|
||||
#include <limits.h>
|
||||
#include <string.h>
|
||||
#include "fpu/softfloat.h"
|
||||
#include "osdep.h"
|
||||
#include "fpu/softfloat-types.h"
|
||||
|
||||
#ifdef CONFIG_MACHINE_BSWAP_H
|
||||
# include <sys/endian.h>
|
||||
# include <sys/types.h>
|
||||
# include <machine/bswap.h>
|
||||
#elif defined(__FreeBSD__)
|
||||
# include <sys/endian.h>
|
||||
@ -85,6 +81,64 @@ static inline void bswap64s(uint64_t *s)
|
||||
#define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Endianness conversion functions between host cpu and specified endianness.
|
||||
* (We list the complete set of prototypes produced by the macros below
|
||||
* to assist people who search the headers to find their definitions.)
|
||||
*
|
||||
* uint16_t le16_to_cpu(uint16_t v);
|
||||
* uint32_t le32_to_cpu(uint32_t v);
|
||||
* uint64_t le64_to_cpu(uint64_t v);
|
||||
* uint16_t be16_to_cpu(uint16_t v);
|
||||
* uint32_t be32_to_cpu(uint32_t v);
|
||||
* uint64_t be64_to_cpu(uint64_t v);
|
||||
*
|
||||
* Convert the value @v from the specified format to the native
|
||||
* endianness of the host CPU by byteswapping if necessary, and
|
||||
* return the converted value.
|
||||
*
|
||||
* uint16_t cpu_to_le16(uint16_t v);
|
||||
* uint32_t cpu_to_le32(uint32_t v);
|
||||
* uint64_t cpu_to_le64(uint64_t v);
|
||||
* uint16_t cpu_to_be16(uint16_t v);
|
||||
* uint32_t cpu_to_be32(uint32_t v);
|
||||
* uint64_t cpu_to_be64(uint64_t v);
|
||||
*
|
||||
* Convert the value @v from the native endianness of the host CPU to
|
||||
* the specified format by byteswapping if necessary, and return
|
||||
* the converted value.
|
||||
*
|
||||
* void le16_to_cpus(uint16_t *v);
|
||||
* void le32_to_cpus(uint32_t *v);
|
||||
* void le64_to_cpus(uint64_t *v);
|
||||
* void be16_to_cpus(uint16_t *v);
|
||||
* void be32_to_cpus(uint32_t *v);
|
||||
* void be64_to_cpus(uint64_t *v);
|
||||
*
|
||||
* Do an in-place conversion of the value pointed to by @v from the
|
||||
* specified format to the native endianness of the host CPU.
|
||||
*
|
||||
* void cpu_to_le16s(uint16_t *v);
|
||||
* void cpu_to_le32s(uint32_t *v);
|
||||
* void cpu_to_le64s(uint64_t *v);
|
||||
* void cpu_to_be16s(uint16_t *v);
|
||||
* void cpu_to_be32s(uint32_t *v);
|
||||
* void cpu_to_be64s(uint64_t *v);
|
||||
*
|
||||
* Do an in-place conversion of the value pointed to by @v from the
|
||||
* native endianness of the host CPU to the specified format.
|
||||
*
|
||||
* Both X_to_cpu() and cpu_to_X() perform the same operation; you
|
||||
* should use whichever one is better documenting of the function your
|
||||
* code is performing.
|
||||
*
|
||||
* Do not use these functions for conversion of values which are in guest
|
||||
* memory, since the data may not be sufficiently aligned for the host CPU's
|
||||
* load and store instructions. Instead you should use the ld*_p() and
|
||||
* st*_p() functions, which perform loads and stores of data of any
|
||||
* required size and endianness and handle possible misalignment.
|
||||
*/
|
||||
|
||||
#define CPU_CONVERT(endian, size, type)\
|
||||
static inline type endian ## size ## _to_cpu(type v)\
|
||||
{\
|
||||
@ -104,16 +158,6 @@ static inline void endian ## size ## _to_cpus(type *p)\
|
||||
static inline void cpu_to_ ## endian ## size ## s(type *p)\
|
||||
{\
|
||||
glue(endian, _bswaps)(p, size);\
|
||||
}\
|
||||
\
|
||||
static inline type endian ## size ## _to_cpup(const type *p)\
|
||||
{\
|
||||
return glue(glue(endian, size), _to_cpu)(*p);\
|
||||
}\
|
||||
\
|
||||
static inline void cpu_to_ ## endian ## size ## w(type *p, type v)\
|
||||
{\
|
||||
*p = glue(glue(cpu_to_, endian), size)(v);\
|
||||
}
|
||||
|
||||
CPU_CONVERT(be, 16, uint16_t)
|
||||
@ -130,6 +174,25 @@ static inline uint32_t qemu_bswap_len(uint32_t value, int len)
|
||||
return bswap32(value) >> (32 - 8 * len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as cpu_to_le{16,32}, except that gcc will figure the result is
|
||||
* a compile-time constant if you pass in a constant. So this can be
|
||||
* used to initialize static variables.
|
||||
*/
|
||||
#if defined(HOST_WORDS_BIGENDIAN)
|
||||
# define const_le32(_x) \
|
||||
((((_x) & 0x000000ffU) << 24) | \
|
||||
(((_x) & 0x0000ff00U) << 8) | \
|
||||
(((_x) & 0x00ff0000U) >> 8) | \
|
||||
(((_x) & 0xff000000U) >> 24))
|
||||
# define const_le16(_x) \
|
||||
((((_x) & 0x00ff) << 8) | \
|
||||
(((_x) & 0xff00) >> 8))
|
||||
#else
|
||||
# define const_le32(_x) (_x)
|
||||
# define const_le16(_x) (_x)
|
||||
#endif
|
||||
|
||||
/* Unions for reinterpreting between floats and integers. */
|
||||
|
||||
typedef union {
|
||||
@ -193,9 +256,9 @@ typedef union {
|
||||
/*
|
||||
* the generic syntax is:
|
||||
*
|
||||
* load: ld{type}{sign}{size}{endian}_p(ptr)
|
||||
* load: ld{type}{sign}{size}_{endian}_p(ptr)
|
||||
*
|
||||
* store: st{type}{size}{endian}_p(ptr, val)
|
||||
* store: st{type}{size}_{endian}_p(ptr, val)
|
||||
*
|
||||
* Note there are small differences with the softmmu access API!
|
||||
*
|
||||
@ -204,7 +267,7 @@ typedef union {
|
||||
* f : float access
|
||||
*
|
||||
* sign is:
|
||||
* (empty): for floats or 32 bit size
|
||||
* (empty): for 32 or 64 bit sizes (including floats and doubles)
|
||||
* u : unsigned
|
||||
* s : signed
|
||||
*
|
||||
@ -218,7 +281,25 @@ typedef union {
|
||||
* he : host endian
|
||||
* be : big endian
|
||||
* le : little endian
|
||||
* te : target endian
|
||||
* (except for byte accesses, which have no endian infix).
|
||||
*
|
||||
* The target endian accessors are obviously only available to source
|
||||
* files which are built per-target; they are defined in cpu-all.h.
|
||||
*
|
||||
* In all cases these functions take a host pointer.
|
||||
* For accessors that take a guest address rather than a
|
||||
* host address, see the cpu_{ld,st}_* accessors defined in
|
||||
* cpu_ldst.h.
|
||||
*
|
||||
* For cases where the size to be used is not fixed at compile time,
|
||||
* there are
|
||||
* stn_{endian}_p(ptr, sz, val)
|
||||
* which stores @val to @ptr as an @endian-order number @sz bytes in size
|
||||
* and
|
||||
* ldn_{endian}_p(ptr, sz)
|
||||
* which loads @sz bytes from @ptr as an unsigned @endian-order number
|
||||
* and returns it in a uint64_t.
|
||||
*/
|
||||
|
||||
static inline int ldub_p(const void *ptr)
|
||||
@ -236,51 +317,85 @@ static inline void stb_p(void *ptr, uint8_t v)
|
||||
*(uint8_t *)ptr = v;
|
||||
}
|
||||
|
||||
/* Any compiler worth its salt will turn these memcpy into native unaligned
|
||||
operations. Thus we don't need to play games with packed attributes, or
|
||||
inline byte-by-byte stores. */
|
||||
/*
|
||||
* Any compiler worth its salt will turn these memcpy into native unaligned
|
||||
* operations. Thus we don't need to play games with packed attributes, or
|
||||
* inline byte-by-byte stores.
|
||||
* Some compilation environments (eg some fortify-source implementations)
|
||||
* may intercept memcpy() in a way that defeats the compiler optimization,
|
||||
* though, so we use __builtin_memcpy() to give ourselves the best chance
|
||||
* of good performance.
|
||||
*/
|
||||
|
||||
static inline int lduw_he_p(const void *ptr)
|
||||
{
|
||||
uint16_t r;
|
||||
#ifdef _MSC_VER
|
||||
memcpy(&r, ptr, sizeof(r));
|
||||
#else
|
||||
__builtin_memcpy(&r, ptr, sizeof(r));
|
||||
#endif
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline int ldsw_he_p(const void *ptr)
|
||||
{
|
||||
int16_t r;
|
||||
#ifdef _MSC_VER
|
||||
memcpy(&r, ptr, sizeof(r));
|
||||
#else
|
||||
__builtin_memcpy(&r, ptr, sizeof(r));
|
||||
#endif
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void stw_he_p(void *ptr, uint16_t v)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
memcpy(ptr, &v, sizeof(v));
|
||||
#else
|
||||
__builtin_memcpy(ptr, &v, sizeof(v));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int ldl_he_p(const void *ptr)
|
||||
{
|
||||
int32_t r;
|
||||
#ifdef _MSC_VER
|
||||
memcpy(&r, ptr, sizeof(r));
|
||||
#else
|
||||
__builtin_memcpy(&r, ptr, sizeof(r));
|
||||
#endif
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void stl_he_p(void *ptr, uint32_t v)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
memcpy(ptr, &v, sizeof(v));
|
||||
#else
|
||||
__builtin_memcpy(ptr, &v, sizeof(v));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t ldq_he_p(const void *ptr)
|
||||
{
|
||||
uint64_t r;
|
||||
#ifdef _MSC_VER
|
||||
memcpy(&r, ptr, sizeof(r));
|
||||
#else
|
||||
__builtin_memcpy(&r, ptr, sizeof(r));
|
||||
#endif
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void stq_he_p(void *ptr, uint64_t v)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
memcpy(ptr, &v, sizeof(v));
|
||||
#else
|
||||
__builtin_memcpy(ptr, &v, sizeof(v));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int lduw_le_p(const void *ptr)
|
||||
@ -415,17 +530,58 @@ static inline void stfq_be_p(void *ptr, float64 v)
|
||||
|
||||
static inline unsigned long leul_to_cpu(unsigned long v)
|
||||
{
|
||||
/* In order to break an include loop between here and
|
||||
qemu-common.h, don't rely on HOST_LONG_BITS. */
|
||||
#if ULONG_MAX == UINT32_MAX
|
||||
#if HOST_LONG_BITS == 32
|
||||
return le_bswap(v, 32);
|
||||
#elif ULONG_MAX == UINT64_MAX
|
||||
#elif HOST_LONG_BITS == 64
|
||||
return le_bswap(v, 64);
|
||||
#else
|
||||
# error Unknown sizeof long
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Store v to p as a sz byte value in host order */
|
||||
#define DO_STN_LDN_P(END) \
|
||||
static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
|
||||
{ \
|
||||
switch (sz) { \
|
||||
case 1: \
|
||||
stb_p(ptr, v); \
|
||||
break; \
|
||||
case 2: \
|
||||
stw_ ## END ## _p(ptr, v); \
|
||||
break; \
|
||||
case 4: \
|
||||
stl_ ## END ## _p(ptr, v); \
|
||||
break; \
|
||||
case 8: \
|
||||
stq_ ## END ## _p(ptr, v); \
|
||||
break; \
|
||||
default: \
|
||||
break; /* g_assert_not_reached(); */ \
|
||||
} \
|
||||
} \
|
||||
static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
|
||||
{ \
|
||||
switch (sz) { \
|
||||
case 1: \
|
||||
return ldub_p(ptr); \
|
||||
case 2: \
|
||||
return lduw_ ## END ## _p(ptr); \
|
||||
case 4: \
|
||||
return (uint32_t)ldl_ ## END ## _p(ptr); \
|
||||
case 8: \
|
||||
return ldq_ ## END ## _p(ptr); \
|
||||
default: \
|
||||
return 0; /* g_assert_not_reached(); */ \
|
||||
} \
|
||||
}
|
||||
|
||||
DO_STN_LDN_P(he)
|
||||
DO_STN_LDN_P(le)
|
||||
DO_STN_LDN_P(be)
|
||||
|
||||
#undef DO_STN_LDN_P
|
||||
|
||||
#undef le_bswap
|
||||
#undef be_bswap
|
||||
#undef le_bswaps
|
||||
|
@ -1,11 +1,21 @@
|
||||
/* public domain */
|
||||
/* compiler.h: macros to abstract away compiler specifics
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_H
|
||||
#define COMPILER_H
|
||||
|
||||
#include "config-host.h"
|
||||
#include "unicorn/platform.h"
|
||||
|
||||
#ifndef glue
|
||||
#define xglue(x, y) x ## y
|
||||
#define glue(x, y) xglue(x, y)
|
||||
#define stringify(s) tostring(s)
|
||||
#define tostring(s) #s
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
// MSVC support
|
||||
|
||||
@ -58,22 +68,36 @@ static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}};
|
||||
#define QEMU_WARN_UNUSED_RESULT
|
||||
#define QEMU_ARTIFICIAL
|
||||
#define QEMU_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop) )
|
||||
#define QEMU_NOINLINE __declspec(noinline)
|
||||
|
||||
#define QEMU_ALIGN(A, B) __declspec(align(A)) B
|
||||
#define QEMU_ALIGNED(X)
|
||||
|
||||
#define cat(x,y) x ## y
|
||||
#define cat2(x,y) cat(x,y)
|
||||
#define QEMU_BUILD_BUG_ON(x) \
|
||||
typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] QEMU_UNUSED_VAR;
|
||||
#define QEMU_BUILD_BUG_ON(x)
|
||||
#define QEMU_BUILD_BUG_ON_ZERO(x)
|
||||
|
||||
#define GCC_FMT_ATTR(n, m)
|
||||
|
||||
#else
|
||||
#define likely(x) (x)
|
||||
#define unlikely(x) (x)
|
||||
|
||||
#define container_of(ptr, type, member) ((type *)((char *)(ptr) - offsetof(type, member)))
|
||||
|
||||
#define QEMU_FLATTEN
|
||||
#define QEMU_ALWAYS_INLINE __declspec(inline)
|
||||
|
||||
#else // Unix compilers
|
||||
|
||||
#ifndef NAN
|
||||
#define NAN (0.0 / 0.0)
|
||||
#endif
|
||||
|
||||
#if defined __clang_analyzer__ || defined __COVERITY__
|
||||
#define QEMU_STATIC_ANALYSIS 1
|
||||
#endif
|
||||
|
||||
/*----------------------------------------------------------------------------
|
||||
| The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler.
|
||||
| The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h.
|
||||
@ -90,30 +114,83 @@ static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}};
|
||||
#define QEMU_UNUSED_VAR __attribute__((unused))
|
||||
#define QEMU_UNUSED_FUNC __attribute__((unused))
|
||||
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
|
||||
#else
|
||||
#define QEMU_WARN_UNUSED_RESULT
|
||||
#endif
|
||||
|
||||
#if QEMU_GNUC_PREREQ(4, 3)
|
||||
#define QEMU_ARTIFICIAL __attribute__((always_inline, artificial))
|
||||
#else
|
||||
#define QEMU_ARTIFICIAL
|
||||
#endif
|
||||
#define QEMU_SENTINEL __attribute__((sentinel))
|
||||
|
||||
#if defined(_WIN32)
|
||||
#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
|
||||
# define QEMU_PACKED __attribute__((gcc_struct, packed))
|
||||
# define QEMU_PACK( __Declaration__ ) __Declaration__ __attribute__((gcc_struct, packed))
|
||||
#else
|
||||
# define QEMU_PACKED __attribute__((packed))
|
||||
# define QEMU_PACK( __Declaration__ ) __Declaration__ __attribute__((packed))
|
||||
#endif
|
||||
|
||||
#define QEMU_ALIGN(A, B) B __attribute__((aligned(A)))
|
||||
|
||||
#define cat(x,y) x ## y
|
||||
#define cat2(x,y) cat(x,y)
|
||||
#define QEMU_BUILD_BUG_ON(x) \
|
||||
typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] __attribute__((unused));
|
||||
#define QEMU_ALIGNED(X) __attribute__((aligned(X)))
|
||||
|
||||
#define QEMU_NOINLINE __attribute__((noinline))
|
||||
|
||||
#ifndef likely
|
||||
#if __GNUC__ < 3
|
||||
#define __builtin_expect(x, n) (x)
|
||||
#endif
|
||||
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#endif
|
||||
|
||||
#ifndef container_of
|
||||
#define container_of(ptr, type, member) ({ \
|
||||
const typeof(((type *) 0)->member) *__mptr = (ptr); \
|
||||
(type *) ((char *) __mptr - offsetof(type, member));})
|
||||
#endif
|
||||
|
||||
#define sizeof_field(type, field) sizeof(((type *)0)->field)
|
||||
|
||||
/*
|
||||
* Calculate the number of bytes up to and including the given 'field' of
|
||||
* 'container'.
|
||||
*/
|
||||
#define endof(container, field) \
|
||||
(offsetof(container, field) + sizeof_field(container, field))
|
||||
|
||||
/* Convert from a base type to a parent type, with compile time checking. */
|
||||
#ifdef __GNUC__
|
||||
#define DO_UPCAST(type, field, dev) ( __extension__ ( { \
|
||||
char __attribute__((unused)) offset_must_be_zero[ \
|
||||
-offsetof(type, field)]; \
|
||||
container_of(dev, type, field);}))
|
||||
#else
|
||||
#define DO_UPCAST(type, field, dev) container_of(dev, type, field)
|
||||
#endif
|
||||
|
||||
#define typeof_field(type, field) typeof(((type *)0)->field)
|
||||
#define type_check(t1,t2) ((t1*)0 - (t2*)0)
|
||||
|
||||
#define QEMU_BUILD_BUG_ON_STRUCT(x) \
|
||||
struct { \
|
||||
int:(x) ? -1 : 1; \
|
||||
}
|
||||
|
||||
/* QEMU_BUILD_BUG_MSG() emits the message given if _Static_assert is
|
||||
* supported; otherwise, it will be omitted from the compiler error
|
||||
* message (but as it remains present in the source code, it can still
|
||||
* be useful when debugging). */
|
||||
#if defined(CONFIG_STATIC_ASSERT)
|
||||
#define QEMU_BUILD_BUG_MSG(x, msg) _Static_assert(!(x), msg)
|
||||
#elif defined(__COUNTER__)
|
||||
#define QEMU_BUILD_BUG_MSG(x, msg) typedef QEMU_BUILD_BUG_ON_STRUCT(x) \
|
||||
glue(qemu_build_bug_on__, __COUNTER__) __attribute__((unused))
|
||||
#else
|
||||
#define QEMU_BUILD_BUG_MSG(x, msg)
|
||||
#endif
|
||||
|
||||
#define QEMU_BUILD_BUG_ON(x) QEMU_BUILD_BUG_MSG(x, "not expecting: " #x)
|
||||
|
||||
#define QEMU_BUILD_BUG_ON_ZERO(x) (sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)) - \
|
||||
sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)))
|
||||
|
||||
#if defined __GNUC__
|
||||
# if !QEMU_GNUC_PREREQ(4, 4)
|
||||
@ -132,6 +209,128 @@ static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}};
|
||||
#define GCC_FMT_ATTR(n, m)
|
||||
#endif
|
||||
|
||||
#endif // _MSC_VER
|
||||
#ifndef __has_warning
|
||||
#define __has_warning(x) 0 /* compatibility with non-clang compilers */
|
||||
#endif
|
||||
|
||||
#ifndef __has_feature
|
||||
#define __has_feature(x) 0 /* compatibility with non-clang compilers */
|
||||
#endif
|
||||
|
||||
#ifndef __has_builtin
|
||||
#define __has_builtin(x) 0 /* compatibility with non-clang compilers */
|
||||
#endif
|
||||
|
||||
#if __has_builtin(__builtin_assume_aligned) || !defined(__clang__)
|
||||
#define HAS_ASSUME_ALIGNED
|
||||
#endif
|
||||
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(x) 0 /* compatibility with older GCC */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* GCC doesn't provide __has_attribute() until GCC 5, but we know all the GCC
|
||||
* versions we support have the "flatten" attribute. Clang may not have the
|
||||
* "flatten" attribute but always has __has_attribute() to check for it.
|
||||
*/
|
||||
#if __has_attribute(flatten) || !defined(__clang__)
|
||||
# define QEMU_FLATTEN __attribute__((flatten))
|
||||
#else
|
||||
# define QEMU_FLATTEN
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If __attribute__((error)) is present, use it to produce an error at
|
||||
* compile time. Otherwise, one must wait for the linker to diagnose
|
||||
* the missing symbol.
|
||||
*/
|
||||
#if __has_attribute(error)
|
||||
# define QEMU_ERROR(X) __attribute__((error(X)))
|
||||
#else
|
||||
# define QEMU_ERROR(X)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The nonstring variable attribute specifies that an object or member
|
||||
* declaration with type array of char or pointer to char is intended
|
||||
* to store character arrays that do not necessarily contain a terminating
|
||||
* NUL character. This is useful in detecting uses of such arrays or pointers
|
||||
* with functions that expect NUL-terminated strings, and to avoid warnings
|
||||
* when such an array or pointer is used as an argument to a bounded string
|
||||
* manipulation function such as strncpy.
|
||||
*/
|
||||
#if __has_attribute(nonstring)
|
||||
# define QEMU_NONSTRING __attribute__((nonstring))
|
||||
#else
|
||||
# define QEMU_NONSTRING
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Forced inlining may be desired to encourage constant propagation
|
||||
* of function parameters. However, it can also make debugging harder,
|
||||
* so disable it for a non-optimizing build.
|
||||
*/
|
||||
#if defined(__OPTIMIZE__)
|
||||
#define QEMU_ALWAYS_INLINE __attribute__((always_inline))
|
||||
#else
|
||||
#define QEMU_ALWAYS_INLINE
|
||||
#endif
|
||||
|
||||
/* Implement C11 _Generic via GCC builtins. Example:
|
||||
*
|
||||
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
|
||||
*
|
||||
* The first argument is the discriminator. The last is the default value.
|
||||
* The middle ones are tuples in "(type, expansion)" format.
|
||||
*/
|
||||
|
||||
/* First, find out the number of generic cases. */
|
||||
#define QEMU_GENERIC(x, ...) \
|
||||
QEMU_GENERIC_(typeof(x), __VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
|
||||
|
||||
/* There will be extra arguments, but they are not used. */
|
||||
#define QEMU_GENERIC_(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, count, ...) \
|
||||
QEMU_GENERIC##count(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)
|
||||
|
||||
/* Two more helper macros, this time to extract items from a parenthesized
|
||||
* list.
|
||||
*/
|
||||
#define QEMU_FIRST_(a, b) a
|
||||
#define QEMU_SECOND_(a, b) b
|
||||
|
||||
/* ... and a final one for the common part of the "recursion". */
|
||||
#define QEMU_GENERIC_IF(x, type_then, else_) \
|
||||
__builtin_choose_expr(__builtin_types_compatible_p(x, \
|
||||
QEMU_FIRST_ type_then), \
|
||||
QEMU_SECOND_ type_then, else_)
|
||||
|
||||
/* CPP poor man's "recursion". */
|
||||
#define QEMU_GENERIC1(x, a0, ...) (a0)
|
||||
#define QEMU_GENERIC2(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC1(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC3(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC2(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC4(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC3(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC5(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC4(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC6(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC5(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC7(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC6(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC8(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC7(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
|
||||
#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
|
||||
|
||||
/**
|
||||
* qemu_build_not_reached()
|
||||
*
|
||||
* The compiler, during optimization, is expected to prove that a call
|
||||
* to this function cannot be reached and remove it. If the compiler
|
||||
* supports QEMU_ERROR, this will be reported at compile time; otherwise
|
||||
* this will be reported at link time due to the missing symbol.
|
||||
*/
|
||||
#if defined(__OPTIMIZE__) && !defined(__NO_INLINE__)
|
||||
extern void QEMU_NORETURN QEMU_ERROR("code path is reachable")
|
||||
qemu_build_not_reached(void);
|
||||
#else
|
||||
#define qemu_build_not_reached() g_assert_not_reached()
|
||||
#endif
|
||||
|
||||
#endif // _MSC_VER
|
||||
#endif /* COMPILER_H */
|
||||
|
67
qemu/include/qemu/cpuid.h
Normal file
67
qemu/include/qemu/cpuid.h
Normal file
@ -0,0 +1,67 @@
|
||||
/* cpuid.h: Macros to identify the properties of an x86 host.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_CPUID_H
|
||||
#define QEMU_CPUID_H
|
||||
|
||||
#ifndef CONFIG_CPUID_H
|
||||
# error "<cpuid.h> is unusable with this compiler"
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#else
|
||||
#include <cpuid.h>
|
||||
#endif
|
||||
|
||||
/* Cover the uses that we have within qemu. */
|
||||
/* ??? Irritating that we have the same information in target/i386/. */
|
||||
|
||||
/* Leaf 1, %edx */
|
||||
#ifndef bit_CMOV
|
||||
#define bit_CMOV (1 << 15)
|
||||
#endif
|
||||
#ifndef bit_SSE2
|
||||
#define bit_SSE2 (1 << 26)
|
||||
#endif
|
||||
#ifndef bit_POPCNT
|
||||
#define bit_POPCNT (1 << 23)
|
||||
#endif
|
||||
|
||||
/* Leaf 1, %ecx */
|
||||
#ifndef bit_SSE4_1
|
||||
#define bit_SSE4_1 (1 << 19)
|
||||
#endif
|
||||
#ifndef bit_MOVBE
|
||||
#define bit_MOVBE (1 << 22)
|
||||
#endif
|
||||
#ifndef bit_OSXSAVE
|
||||
#define bit_OSXSAVE (1 << 27)
|
||||
#endif
|
||||
#ifndef bit_AVX
|
||||
#define bit_AVX (1 << 28)
|
||||
#endif
|
||||
|
||||
/* Leaf 7, %ebx */
|
||||
#ifndef bit_BMI
|
||||
#define bit_BMI (1 << 3)
|
||||
#endif
|
||||
#ifndef bit_AVX2
|
||||
#define bit_AVX2 (1 << 5)
|
||||
#endif
|
||||
#ifndef bit_AVX512F
|
||||
#define bit_AVX512F (1 << 16)
|
||||
#endif
|
||||
#ifndef bit_BMI2
|
||||
#define bit_BMI2 (1 << 8)
|
||||
#endif
|
||||
|
||||
/* Leaf 0x80000001, %ecx */
|
||||
#ifndef bit_LZCNT
|
||||
#define bit_LZCNT (1 << 5)
|
||||
#endif
|
||||
|
||||
#endif /* QEMU_CPUID_H */
|
@ -28,8 +28,9 @@
|
||||
#ifndef QEMU_CRC32C_H
|
||||
#define QEMU_CRC32C_H
|
||||
|
||||
#include "qemu-common.h"
|
||||
|
||||
uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length);
|
||||
|
||||
uint32_t crc32(uint32_t crc, const uint8_t *data, unsigned int length);
|
||||
|
||||
#endif
|
||||
|
27
qemu/include/qemu/ctype.h
Normal file
27
qemu/include/qemu/ctype.h
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
* QEMU TCG support
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_CTYPE_H
|
||||
#define QEMU_CTYPE_H
|
||||
|
||||
#define qemu_isalnum(c) isalnum((unsigned char)(c))
|
||||
#define qemu_isalpha(c) isalpha((unsigned char)(c))
|
||||
#define qemu_iscntrl(c) iscntrl((unsigned char)(c))
|
||||
#define qemu_isdigit(c) isdigit((unsigned char)(c))
|
||||
#define qemu_isgraph(c) isgraph((unsigned char)(c))
|
||||
#define qemu_islower(c) islower((unsigned char)(c))
|
||||
#define qemu_isprint(c) isprint((unsigned char)(c))
|
||||
#define qemu_ispunct(c) ispunct((unsigned char)(c))
|
||||
#define qemu_isspace(c) isspace((unsigned char)(c))
|
||||
#define qemu_isupper(c) isupper((unsigned char)(c))
|
||||
#define qemu_isxdigit(c) isxdigit((unsigned char)(c))
|
||||
#define qemu_tolower(c) tolower((unsigned char)(c))
|
||||
#define qemu_toupper(c) toupper((unsigned char)(c))
|
||||
#define qemu_isascii(c) isascii((unsigned char)(c))
|
||||
#define qemu_toascii(c) toascii((unsigned char)(c))
|
||||
|
||||
#endif
|
41
qemu/include/qemu/cutils.h
Normal file
41
qemu/include/qemu/cutils.h
Normal file
@ -0,0 +1,41 @@
|
||||
#ifndef QEMU_CUTILS_H
|
||||
#define QEMU_CUTILS_H
|
||||
|
||||
/**
|
||||
* pstrcpy:
|
||||
* @buf: buffer to copy string into
|
||||
* @buf_size: size of @buf in bytes
|
||||
* @str: string to copy
|
||||
*
|
||||
* Copy @str into @buf, including the trailing NUL, but do not
|
||||
* write more than @buf_size bytes. The resulting buffer is
|
||||
* always NUL terminated (even if the source string was too long).
|
||||
* If @buf_size is zero or negative then no bytes are copied.
|
||||
*
|
||||
* This function is similar to strncpy(), but avoids two of that
|
||||
* function's problems:
|
||||
* * if @str fits in the buffer, pstrcpy() does not zero-fill the
|
||||
* remaining space at the end of @buf
|
||||
* * if @str is too long, pstrcpy() will copy the first @buf_size-1
|
||||
* bytes and then add a NUL
|
||||
*/
|
||||
void pstrcpy(char *buf, int buf_size, const char *str);
|
||||
/**
|
||||
* pstrcat:
|
||||
* @buf: buffer containing existing string
|
||||
* @buf_size: size of @buf in bytes
|
||||
* @s: string to concatenate to @buf
|
||||
*
|
||||
* Append a copy of @s to the string already in @buf, but do not
|
||||
* allow the buffer to overflow. If the existing contents of @buf
|
||||
* plus @str would total more than @buf_size bytes, then write
|
||||
* as much of @str as will fit followed by a NUL terminator.
|
||||
*
|
||||
* @buf must already contain a NUL-terminated string, or the
|
||||
* behaviour is undefined.
|
||||
*
|
||||
* Returns: @buf.
|
||||
*/
|
||||
char *pstrcat(char *buf, int buf_size, const char *s);
|
||||
|
||||
#endif
|
56
qemu/include/qemu/guest-random.h
Normal file
56
qemu/include/qemu/guest-random.h
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* QEMU guest-visible random functions
|
||||
*
|
||||
* Copyright 2019 Linaro, Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_GUEST_RANDOM_H
|
||||
#define QEMU_GUEST_RANDOM_H
|
||||
|
||||
/**
|
||||
* qemu_guest_random_seed_thread_part1(void)
|
||||
*
|
||||
* If qemu_getrandom is in deterministic mode, returns an
|
||||
* independent seed for the new thread. Otherwise returns 0.
|
||||
*/
|
||||
uint64_t qemu_guest_random_seed_thread_part1(void);
|
||||
|
||||
/**
|
||||
* qemu_guest_random_seed_thread_part2(uint64_t seed)
|
||||
* @seed: a value for the new thread.
|
||||
*
|
||||
* If qemu_guest_getrandom is in deterministic mode, this stores an
|
||||
* independent seed for the new thread. Otherwise a no-op.
|
||||
*/
|
||||
void qemu_guest_random_seed_thread_part2(uint64_t seed);
|
||||
|
||||
/**
|
||||
* qemu_guest_getrandom(void *buf, size_t len, Error **errp)
|
||||
* @buf: a buffer of bytes to be written
|
||||
* @len: the number of bytes in @buf
|
||||
* @errp: an error indicator
|
||||
*
|
||||
* Fills len bytes in buf with random data. This should only be used
|
||||
* for data presented to the guest. Host-side crypto services should
|
||||
* use qcrypto_random_bytes.
|
||||
*
|
||||
* Returns 0 on success, < 0 on failure while setting *errp.
|
||||
*/
|
||||
int qemu_guest_getrandom(void *buf, size_t len);
|
||||
|
||||
/**
|
||||
* qemu_guest_getrandom_nofail(void *buf, size_t len)
|
||||
* @buf: a buffer of bytes to be written
|
||||
* @len: the number of bytes in @buf
|
||||
*
|
||||
* Like qemu_guest_getrandom, but will assert for failure.
|
||||
* Use this when there is no reasonable recovery.
|
||||
*/
|
||||
void qemu_guest_getrandom_nofail(void *buf, size_t len);
|
||||
|
||||
#endif /* QEMU_GUEST_RANDOM_H */
|
@ -22,11 +22,12 @@
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
#ifndef HOST_UTILS_H
|
||||
#define HOST_UTILS_H 1
|
||||
|
||||
#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */
|
||||
#include <limits.h>
|
||||
#ifndef HOST_UTILS_H
|
||||
#define HOST_UTILS_H
|
||||
|
||||
#include "qemu/bswap.h"
|
||||
#include "qemu/int128.h"
|
||||
|
||||
#ifdef CONFIG_INT128
|
||||
static inline void mulu64(uint64_t *plow, uint64_t *phigh,
|
||||
@ -45,6 +46,12 @@ static inline void muls64(uint64_t *plow, uint64_t *phigh,
|
||||
*phigh = r >> 64;
|
||||
}
|
||||
|
||||
/* compute with 96 bit intermediate result: (a*b)/c */
|
||||
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
|
||||
{
|
||||
return (__int128_t)a * b / c;
|
||||
}
|
||||
|
||||
static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
|
||||
{
|
||||
if (divisor == 0) {
|
||||
@ -75,6 +82,29 @@ void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b);
|
||||
void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b);
|
||||
int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
|
||||
int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
|
||||
|
||||
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
|
||||
{
|
||||
union {
|
||||
uint64_t ll;
|
||||
struct {
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
uint32_t high, low;
|
||||
#else
|
||||
uint32_t low, high;
|
||||
#endif
|
||||
} l;
|
||||
} u, res;
|
||||
uint64_t rl, rh;
|
||||
|
||||
u.ll = a;
|
||||
rl = (uint64_t)u.l.low * (uint64_t)b;
|
||||
rh = (uint64_t)u.l.high * (uint64_t)b;
|
||||
rh += (rl >> 32);
|
||||
res.l.high = rh / c;
|
||||
res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
|
||||
return res.ll;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -86,7 +116,7 @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
|
||||
*/
|
||||
static inline int clz32(uint32_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#ifndef _MSC_VER
|
||||
return val ? __builtin_clz(val) : 32;
|
||||
#else
|
||||
/* Binary search for the leading one bit. */
|
||||
@ -139,7 +169,7 @@ static inline int clo32(uint32_t val)
|
||||
*/
|
||||
static inline int clz64(uint64_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#ifndef _MSC_VER
|
||||
return val ? __builtin_clzll(val) : 64;
|
||||
#else
|
||||
int cnt = 0;
|
||||
@ -174,7 +204,7 @@ static inline int clo64(uint64_t val)
|
||||
*/
|
||||
static inline int ctz32(uint32_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#ifndef _MSC_VER
|
||||
return val ? __builtin_ctz(val) : 32;
|
||||
#else
|
||||
/* Binary search for the trailing one bit. */
|
||||
@ -229,7 +259,7 @@ static inline int cto32(uint32_t val)
|
||||
*/
|
||||
static inline int ctz64(uint64_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#ifndef _MSC_VER
|
||||
return val ? __builtin_ctzll(val) : 64;
|
||||
#else
|
||||
int cnt;
|
||||
@ -264,7 +294,7 @@ static inline int cto64(uint64_t val)
|
||||
*/
|
||||
static inline int clrsb32(uint32_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(4, 7)
|
||||
#if !defined(_MSC_VER) && !defined(__clang__)
|
||||
return __builtin_clrsb(val);
|
||||
#else
|
||||
return clz32(val ^ ((int32_t)val >> 1)) - 1;
|
||||
@ -280,7 +310,7 @@ static inline int clrsb32(uint32_t val)
|
||||
*/
|
||||
static inline int clrsb64(uint64_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(4, 7)
|
||||
#if !defined(_MSC_VER) && !defined(__clang__)
|
||||
return __builtin_clrsbll(val);
|
||||
#else
|
||||
return clz64(val ^ ((int64_t)val >> 1)) - 1;
|
||||
@ -293,7 +323,7 @@ static inline int clrsb64(uint64_t val)
|
||||
*/
|
||||
static inline int ctpop8(uint8_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#ifndef _MSC_VER
|
||||
return __builtin_popcount(val);
|
||||
#else
|
||||
val = (val & 0x55) + ((val >> 1) & 0x55);
|
||||
@ -310,7 +340,7 @@ static inline int ctpop8(uint8_t val)
|
||||
*/
|
||||
static inline int ctpop16(uint16_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#ifndef _MSC_VER
|
||||
return __builtin_popcount(val);
|
||||
#else
|
||||
val = (val & 0x5555) + ((val >> 1) & 0x5555);
|
||||
@ -328,7 +358,7 @@ static inline int ctpop16(uint16_t val)
|
||||
*/
|
||||
static inline int ctpop32(uint32_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#ifndef _MSC_VER
|
||||
return __builtin_popcount(val);
|
||||
#else
|
||||
val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
|
||||
@ -347,7 +377,7 @@ static inline int ctpop32(uint32_t val)
|
||||
*/
|
||||
static inline int ctpop64(uint64_t val)
|
||||
{
|
||||
#if QEMU_GNUC_PREREQ(3, 4)
|
||||
#ifndef _MSC_VER
|
||||
return __builtin_popcountll(val);
|
||||
#else
|
||||
val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
|
||||
@ -361,6 +391,80 @@ static inline int ctpop64(uint64_t val)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* revbit8 - reverse the bits in an 8-bit value.
|
||||
* @x: The value to modify.
|
||||
*/
|
||||
static inline uint8_t revbit8(uint8_t x)
|
||||
{
|
||||
/* Assign the correct nibble position. */
|
||||
x = ((x & 0xf0) >> 4)
|
||||
| ((x & 0x0f) << 4);
|
||||
/* Assign the correct bit position. */
|
||||
x = ((x & 0x88) >> 3)
|
||||
| ((x & 0x44) >> 1)
|
||||
| ((x & 0x22) << 1)
|
||||
| ((x & 0x11) << 3);
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* revbit16 - reverse the bits in a 16-bit value.
|
||||
* @x: The value to modify.
|
||||
*/
|
||||
static inline uint16_t revbit16(uint16_t x)
|
||||
{
|
||||
/* Assign the correct byte position. */
|
||||
x = bswap16(x);
|
||||
/* Assign the correct nibble position. */
|
||||
x = ((x & 0xf0f0) >> 4)
|
||||
| ((x & 0x0f0f) << 4);
|
||||
/* Assign the correct bit position. */
|
||||
x = ((x & 0x8888) >> 3)
|
||||
| ((x & 0x4444) >> 1)
|
||||
| ((x & 0x2222) << 1)
|
||||
| ((x & 0x1111) << 3);
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* revbit32 - reverse the bits in a 32-bit value.
|
||||
* @x: The value to modify.
|
||||
*/
|
||||
static inline uint32_t revbit32(uint32_t x)
|
||||
{
|
||||
/* Assign the correct byte position. */
|
||||
x = bswap32(x);
|
||||
/* Assign the correct nibble position. */
|
||||
x = ((x & 0xf0f0f0f0u) >> 4)
|
||||
| ((x & 0x0f0f0f0fu) << 4);
|
||||
/* Assign the correct bit position. */
|
||||
x = ((x & 0x88888888u) >> 3)
|
||||
| ((x & 0x44444444u) >> 1)
|
||||
| ((x & 0x22222222u) << 1)
|
||||
| ((x & 0x11111111u) << 3);
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* revbit64 - reverse the bits in a 64-bit value.
|
||||
* @x: The value to modify.
|
||||
*/
|
||||
static inline uint64_t revbit64(uint64_t x)
|
||||
{
|
||||
/* Assign the correct byte position. */
|
||||
x = bswap64(x);
|
||||
/* Assign the correct nibble position. */
|
||||
x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4)
|
||||
| ((x & 0x0f0f0f0f0f0f0f0full) << 4);
|
||||
/* Assign the correct bit position. */
|
||||
x = ((x & 0x8888888888888888ull) >> 3)
|
||||
| ((x & 0x4444444444444444ull) >> 1)
|
||||
| ((x & 0x2222222222222222ull) << 1)
|
||||
| ((x & 0x1111111111111111ull) << 3);
|
||||
return x;
|
||||
}
|
||||
|
||||
/* Host type specific sizes of these routines. */
|
||||
|
||||
#if ULONG_MAX == UINT32_MAX
|
||||
@ -369,14 +473,93 @@ static inline int ctpop64(uint64_t val)
|
||||
# define clol clo32
|
||||
# define ctol cto32
|
||||
# define ctpopl ctpop32
|
||||
# define revbitl revbit32
|
||||
#elif ULONG_MAX == UINT64_MAX
|
||||
# define clzl clz64
|
||||
# define ctzl ctz64
|
||||
# define clol clo64
|
||||
# define ctol cto64
|
||||
# define ctpopl ctpop64
|
||||
# define revbitl revbit64
|
||||
#else
|
||||
# error Unknown sizeof long
|
||||
#endif
|
||||
|
||||
static inline bool is_power_of_2(uint64_t value)
|
||||
{
|
||||
if (!value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !(value & (value - 1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return @value rounded down to the nearest power of two or zero.
|
||||
*/
|
||||
static inline uint64_t pow2floor(uint64_t value)
|
||||
{
|
||||
if (!value) {
|
||||
/* Avoid undefined shift by 64 */
|
||||
return 0;
|
||||
}
|
||||
return 0x8000000000000000ull >> clz64(value);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return @value rounded up to the nearest power of two modulo 2^64.
|
||||
* This is *zero* for @value > 2^63, so be careful.
|
||||
*/
|
||||
static inline uint64_t pow2ceil(uint64_t value)
|
||||
{
|
||||
int n = clz64(value - 1);
|
||||
|
||||
if (!n) {
|
||||
/*
|
||||
* @value - 1 has no leading zeroes, thus @value - 1 >= 2^63
|
||||
* Therefore, either @value == 0 or @value > 2^63.
|
||||
* If it's 0, return 1, else return 0.
|
||||
*/
|
||||
return !value;
|
||||
}
|
||||
return 0x8000000000000000ull >> (n - 1);
|
||||
}
|
||||
|
||||
static inline uint32_t pow2roundup32(uint32_t x)
|
||||
{
|
||||
x |= (x >> 1);
|
||||
x |= (x >> 2);
|
||||
x |= (x >> 4);
|
||||
x |= (x >> 8);
|
||||
x |= (x >> 16);
|
||||
return x + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* urshift - 128-bit Unsigned Right Shift.
|
||||
* @plow: in/out - lower 64-bit integer.
|
||||
* @phigh: in/out - higher 64-bit integer.
|
||||
* @shift: in - bytes to shift, between 0 and 127.
|
||||
*
|
||||
* Result is zero-extended and stored in plow/phigh, which are
|
||||
* input/output variables. Shift values outside the range will
|
||||
* be mod to 128. In other words, the caller is responsible to
|
||||
* verify/assert both the shift range and plow/phigh pointers.
|
||||
*/
|
||||
void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
|
||||
|
||||
/**
|
||||
* ulshift - 128-bit Unsigned Left Shift.
|
||||
* @plow: in/out - lower 64-bit integer.
|
||||
* @phigh: in/out - higher 64-bit integer.
|
||||
* @shift: in - bytes to shift, between 0 and 127.
|
||||
* @overflow: out - true if any 1-bit is shifted out.
|
||||
*
|
||||
* Result is zero-extended and stored in plow/phigh, which are
|
||||
* input/output variables. Shift values outside the range will
|
||||
* be mod to 128. In other words, the caller is responsible to
|
||||
* verify/assert both the shift range and plow/phigh pointers.
|
||||
*/
|
||||
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
|
||||
|
||||
#endif
|
||||
|
@ -1,10 +1,152 @@
|
||||
#ifndef INT128_H
|
||||
#define INT128_H
|
||||
|
||||
//#include <assert.h>
|
||||
#include "unicorn/platform.h"
|
||||
#include "qemu/bswap.h"
|
||||
#ifdef CONFIG_INT128
|
||||
|
||||
typedef __int128_t Int128;
|
||||
|
||||
static inline Int128 int128_make64(uint64_t a)
|
||||
{
|
||||
return a;
|
||||
}
|
||||
|
||||
static inline Int128 int128_make128(uint64_t lo, uint64_t hi)
|
||||
{
|
||||
return (__uint128_t)hi << 64 | lo;
|
||||
}
|
||||
|
||||
static inline uint64_t int128_get64(Int128 a)
|
||||
{
|
||||
uint64_t r = a;
|
||||
assert(r == a);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline uint64_t int128_getlo(Int128 a)
|
||||
{
|
||||
return a;
|
||||
}
|
||||
|
||||
static inline int64_t int128_gethi(Int128 a)
|
||||
{
|
||||
return a >> 64;
|
||||
}
|
||||
|
||||
static inline Int128 int128_zero(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline Int128 int128_one(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline Int128 int128_2_64(void)
|
||||
{
|
||||
return (Int128)1 << 64;
|
||||
}
|
||||
|
||||
static inline Int128 int128_exts64(int64_t a)
|
||||
{
|
||||
return a;
|
||||
}
|
||||
|
||||
static inline Int128 int128_and(Int128 a, Int128 b)
|
||||
{
|
||||
return a & b;
|
||||
}
|
||||
|
||||
static inline Int128 int128_rshift(Int128 a, int n)
|
||||
{
|
||||
return a >> n;
|
||||
}
|
||||
|
||||
static inline Int128 int128_add(Int128 a, Int128 b)
|
||||
{
|
||||
return a + b;
|
||||
}
|
||||
|
||||
static inline Int128 int128_neg(Int128 a)
|
||||
{
|
||||
return -a;
|
||||
}
|
||||
|
||||
static inline Int128 int128_sub(Int128 a, Int128 b)
|
||||
{
|
||||
return a - b;
|
||||
}
|
||||
|
||||
static inline bool int128_nonneg(Int128 a)
|
||||
{
|
||||
return a >= 0;
|
||||
}
|
||||
|
||||
static inline bool int128_eq(Int128 a, Int128 b)
|
||||
{
|
||||
return a == b;
|
||||
}
|
||||
|
||||
static inline bool int128_ne(Int128 a, Int128 b)
|
||||
{
|
||||
return a != b;
|
||||
}
|
||||
|
||||
static inline bool int128_ge(Int128 a, Int128 b)
|
||||
{
|
||||
return a >= b;
|
||||
}
|
||||
|
||||
static inline bool int128_lt(Int128 a, Int128 b)
|
||||
{
|
||||
return a < b;
|
||||
}
|
||||
|
||||
static inline bool int128_le(Int128 a, Int128 b)
|
||||
{
|
||||
return a <= b;
|
||||
}
|
||||
|
||||
static inline bool int128_gt(Int128 a, Int128 b)
|
||||
{
|
||||
return a > b;
|
||||
}
|
||||
|
||||
static inline bool int128_nz(Int128 a)
|
||||
{
|
||||
return a != 0;
|
||||
}
|
||||
|
||||
static inline Int128 int128_min(Int128 a, Int128 b)
|
||||
{
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
static inline Int128 int128_max(Int128 a, Int128 b)
|
||||
{
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
static inline void int128_addto(Int128 *a, Int128 b)
|
||||
{
|
||||
*a += b;
|
||||
}
|
||||
|
||||
static inline void int128_subfrom(Int128 *a, Int128 b)
|
||||
{
|
||||
*a -= b;
|
||||
}
|
||||
|
||||
static inline Int128 bswap128(Int128 a)
|
||||
{
|
||||
return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a)));
|
||||
}
|
||||
|
||||
#else /* !CONFIG_INT128 */
|
||||
|
||||
typedef struct Int128 Int128;
|
||||
typedef Int128 __int128_t;
|
||||
|
||||
struct Int128 {
|
||||
uint64_t lo;
|
||||
@ -13,16 +155,30 @@ struct Int128 {
|
||||
|
||||
static inline Int128 int128_make64(uint64_t a)
|
||||
{
|
||||
Int128 i128 = { a, 0 };
|
||||
return i128;
|
||||
return (Int128) { a, 0 };
|
||||
}
|
||||
|
||||
static inline Int128 int128_make128(uint64_t lo, uint64_t hi)
|
||||
{
|
||||
return (Int128) { lo, hi };
|
||||
}
|
||||
|
||||
static inline uint64_t int128_get64(Int128 a)
|
||||
{
|
||||
//assert(!a.hi);
|
||||
assert(!a.hi);
|
||||
return a.lo;
|
||||
}
|
||||
|
||||
static inline uint64_t int128_getlo(Int128 a)
|
||||
{
|
||||
return a.lo;
|
||||
}
|
||||
|
||||
static inline int64_t int128_gethi(Int128 a)
|
||||
{
|
||||
return a.hi;
|
||||
}
|
||||
|
||||
static inline Int128 int128_zero(void)
|
||||
{
|
||||
return int128_make64(0);
|
||||
@ -35,20 +191,17 @@ static inline Int128 int128_one(void)
|
||||
|
||||
static inline Int128 int128_2_64(void)
|
||||
{
|
||||
Int128 i128 = { 0, 1 };
|
||||
return i128;
|
||||
return (Int128) { 0, 1 };
|
||||
}
|
||||
|
||||
static inline Int128 int128_exts64(int64_t a)
|
||||
{
|
||||
Int128 i128 = { a, (a < 0) ? -1 : 0 };
|
||||
return i128;
|
||||
return (Int128) { .lo = a, .hi = (a < 0) ? -1 : 0 };
|
||||
}
|
||||
|
||||
static inline Int128 int128_and(Int128 a, Int128 b)
|
||||
{
|
||||
Int128 i128 = { a.lo & b.lo, a.hi & b.hi };
|
||||
return i128;
|
||||
return (Int128) { a.lo & b.lo, a.hi & b.hi };
|
||||
}
|
||||
|
||||
static inline Int128 int128_rshift(Int128 a, int n)
|
||||
@ -59,11 +212,9 @@ static inline Int128 int128_rshift(Int128 a, int n)
|
||||
}
|
||||
h = a.hi >> (n & 63);
|
||||
if (n >= 64) {
|
||||
Int128 i128 = { h, h >> 63 };
|
||||
return i128;
|
||||
return int128_make128(h, h >> 63);
|
||||
} else {
|
||||
Int128 i128 = { (a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h };
|
||||
return i128;
|
||||
return int128_make128((a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h);
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,21 +228,23 @@ static inline Int128 int128_add(Int128 a, Int128 b)
|
||||
*
|
||||
* So the carry is lo < a.lo.
|
||||
*/
|
||||
Int128 i128 = { lo, (uint64_t)a.hi + b.hi + (lo < a.lo) };
|
||||
return i128;
|
||||
return int128_make128(lo, (uint64_t)a.hi + b.hi + (lo < a.lo));
|
||||
}
|
||||
|
||||
static inline Int128 int128_neg(Int128 a)
|
||||
{
|
||||
uint64_t lo = 0-a.lo;
|
||||
Int128 i128 = { lo, ~(uint64_t)a.hi + !lo };
|
||||
return i128;
|
||||
#ifdef _MSC_VER
|
||||
uint64_t lo = a.lo;
|
||||
lo = 0 - lo;
|
||||
#else
|
||||
uint64_t lo = (uint64_t)(-a.lo);
|
||||
#endif
|
||||
return int128_make128(lo, ~(uint64_t)a.hi + !lo);
|
||||
}
|
||||
|
||||
static inline Int128 int128_sub(Int128 a, Int128 b)
|
||||
{
|
||||
Int128 i128 = { a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo) };
|
||||
return i128;
|
||||
return int128_make128(a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo));
|
||||
}
|
||||
|
||||
static inline bool int128_nonneg(Int128 a)
|
||||
@ -154,4 +307,10 @@ static inline void int128_subfrom(Int128 *a, Int128 b)
|
||||
*a = int128_sub(*a, b);
|
||||
}
|
||||
|
||||
#endif
|
||||
static inline Int128 bswap128(Int128 a)
|
||||
{
|
||||
return int128_make128(bswap64(int128_gethi(a)), bswap64(int128_getlo(a)));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_INT128 */
|
||||
#endif /* INT128_H */
|
||||
|
@ -1,29 +1,6 @@
|
||||
#ifndef QEMU_LOG_H
|
||||
#define QEMU_LOG_H
|
||||
|
||||
#include <stdarg.h>
|
||||
#include "unicorn/platform.h"
|
||||
#include "qemu/compiler.h"
|
||||
#include "qom/cpu.h"
|
||||
|
||||
/* Private global variables, don't use */
|
||||
extern FILE *qemu_logfile;
|
||||
extern int qemu_loglevel;
|
||||
|
||||
/*
|
||||
* The new API:
|
||||
*
|
||||
*/
|
||||
|
||||
/* Log settings checking macros: */
|
||||
|
||||
/* Returns true if qemu_log() will really write somewhere
|
||||
*/
|
||||
static inline bool qemu_log_enabled(void)
|
||||
{
|
||||
return qemu_logfile != NULL;
|
||||
}
|
||||
|
||||
#define CPU_LOG_TB_OUT_ASM (1 << 0)
|
||||
#define CPU_LOG_TB_IN_ASM (1 << 1)
|
||||
#define CPU_LOG_TB_OP (1 << 2)
|
||||
@ -31,87 +8,42 @@ static inline bool qemu_log_enabled(void)
|
||||
#define CPU_LOG_INT (1 << 4)
|
||||
#define CPU_LOG_EXEC (1 << 5)
|
||||
#define CPU_LOG_PCALL (1 << 6)
|
||||
#define CPU_LOG_IOPORT (1 << 7)
|
||||
#define CPU_LOG_TB_CPU (1 << 8)
|
||||
#define CPU_LOG_RESET (1 << 9)
|
||||
#define LOG_UNIMP (1 << 10)
|
||||
#define LOG_GUEST_ERROR (1 << 11)
|
||||
#define CPU_LOG_MMU (1 << 12)
|
||||
#define CPU_LOG_TB_NOCHAIN (1 << 13)
|
||||
#define CPU_LOG_PAGE (1 << 14)
|
||||
/* LOG_TRACE (1 << 15) is defined in log-for-trace.h */
|
||||
#define CPU_LOG_TB_OP_IND (1 << 16)
|
||||
#define CPU_LOG_TB_FPU (1 << 17)
|
||||
#define CPU_LOG_PLUGIN (1 << 18)
|
||||
/* LOG_STRACE is used for user-mode strace logging. */
|
||||
#define LOG_STRACE (1 << 19)
|
||||
|
||||
/* Returns true if a bit is set in the current loglevel mask
|
||||
/* Lock output for a series of related logs. Since this is not needed
|
||||
* for a single qemu_log / qemu_log_mask / qemu_log_mask_and_addr, we
|
||||
* assume that qemu_loglevel_mask has already been tested, and that
|
||||
* qemu_loglevel is never set when qemu_logfile is unset.
|
||||
*/
|
||||
static inline bool qemu_loglevel_mask(int mask)
|
||||
{
|
||||
return (qemu_loglevel & mask) != 0;
|
||||
}
|
||||
|
||||
/* Logging functions: */
|
||||
|
||||
/* main logging function
|
||||
/* log only if a bit is set on the current loglevel mask:
|
||||
* @mask: bit to check in the mask
|
||||
* @fmt: printf-style format string
|
||||
* @args: optional arguments for format string
|
||||
*/
|
||||
void GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...);
|
||||
|
||||
/* vfprintf-like logging function
|
||||
*/
|
||||
static inline void GCC_FMT_ATTR(1, 0)
|
||||
qemu_log_vprintf(const char *fmt, va_list va)
|
||||
{
|
||||
if (qemu_logfile) {
|
||||
vfprintf(qemu_logfile, fmt, va);
|
||||
}
|
||||
}
|
||||
#define qemu_log_mask(MASK, FMT, ...)
|
||||
|
||||
/* log only if a bit is set on the current loglevel mask
|
||||
* and we are in the address range we care about:
|
||||
* @mask: bit to check in the mask
|
||||
* @addr: address to check in dfilter
|
||||
* @fmt: printf-style format string
|
||||
* @args: optional arguments for format string
|
||||
*/
|
||||
void GCC_FMT_ATTR(2, 3) qemu_log_mask(int mask, const char *fmt, ...);
|
||||
|
||||
|
||||
/* Special cases: */
|
||||
|
||||
/* cpu_dump_state() logging functions: */
|
||||
/**
|
||||
* log_cpu_state:
|
||||
* @cpu: The CPU whose state is to be logged.
|
||||
* @flags: Flags what to log.
|
||||
*
|
||||
* Logs the output of cpu_dump_state().
|
||||
*/
|
||||
static inline void log_cpu_state(CPUState *cpu, int flags)
|
||||
{
|
||||
if (qemu_log_enabled()) {
|
||||
cpu_dump_state(cpu, qemu_logfile, fprintf, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* log_cpu_state_mask:
|
||||
* @mask: Mask when to log.
|
||||
* @cpu: The CPU whose state is to be logged.
|
||||
* @flags: Flags what to log.
|
||||
*
|
||||
* Logs the output of cpu_dump_state() if loglevel includes @mask.
|
||||
*/
|
||||
static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags)
|
||||
{
|
||||
if (qemu_loglevel & mask) {
|
||||
log_cpu_state(cpu, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/* fflush() the log file */
|
||||
static inline void qemu_log_flush(void)
|
||||
{
|
||||
fflush(qemu_logfile);
|
||||
}
|
||||
|
||||
/* Close the log file */
|
||||
static inline void qemu_log_close(void)
|
||||
{
|
||||
if (qemu_logfile) {
|
||||
if (qemu_logfile != stderr) {
|
||||
fclose(qemu_logfile);
|
||||
}
|
||||
qemu_logfile = NULL;
|
||||
}
|
||||
}
|
||||
#define qemu_log_mask_and_addr(MASK, ADDR, FMT, ...)
|
||||
|
||||
#endif
|
||||
|
@ -1,30 +0,0 @@
|
||||
/*
|
||||
* QEMU Module Infrastructure
|
||||
*
|
||||
* Copyright IBM, Corp. 2009
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef QEMU_MODULE_H
|
||||
#define QEMU_MODULE_H
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
typedef enum {
|
||||
MODULE_INIT_MACHINE,
|
||||
MODULE_INIT_QOM,
|
||||
MODULE_INIT_MAX
|
||||
} module_init_type;
|
||||
|
||||
#define machine_init(function) module_init(function, MODULE_INIT_MACHINE)
|
||||
#define type_init(function) module_init(function, MODULE_INIT_QOM)
|
||||
|
||||
void module_call_init(struct uc_struct *uc, module_init_type type);
|
||||
|
||||
#endif
|
@ -1,11 +1,101 @@
|
||||
/*
|
||||
* OS includes and handling of OS dependencies
|
||||
*
|
||||
* This header exists to pull in some common system headers that
|
||||
* most code in QEMU will want, and to fix up some possible issues with
|
||||
* it (missing defines, Windows weirdness, and so on).
|
||||
*
|
||||
* To avoid getting into possible circular include dependencies, this
|
||||
* file should not include any other QEMU headers, with the exceptions
|
||||
* of config-host.h, config-target.h, qemu/compiler.h,
|
||||
* sysemu/os-posix.h, sysemu/os-win32.h, glib-compat.h and
|
||||
* qemu/typedefs.h, all of which are doing a similar job to this file
|
||||
* and are under similar constraints.
|
||||
*
|
||||
* This header also contains prototypes for functions defined in
|
||||
* os-*.c and util/oslib-*.c; those would probably be better split
|
||||
* out into separate header files.
|
||||
*
|
||||
* In an ideal world this header would contain only:
|
||||
* (1) things which everybody needs
|
||||
* (2) things without which code would work on most platforms but
|
||||
* fail to compile or misbehave on a minority of host OSes
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#ifndef QEMU_OSDEP_H
|
||||
#define QEMU_OSDEP_H
|
||||
|
||||
#include "config-host.h"
|
||||
#ifdef NEED_CPU_H
|
||||
#include "config-target.h"
|
||||
#else
|
||||
#include "exec/poison.h"
|
||||
#endif
|
||||
|
||||
#include "qemu/compiler.h"
|
||||
|
||||
struct uc_struct;
|
||||
|
||||
|
||||
/* Older versions of C++ don't get definitions of various macros from
|
||||
* stdlib.h unless we define these macros before first inclusion of
|
||||
* that system header.
|
||||
*/
|
||||
#ifndef __STDC_CONSTANT_MACROS
|
||||
#define __STDC_CONSTANT_MACROS
|
||||
#endif
|
||||
#ifndef __STDC_LIMIT_MACROS
|
||||
#define __STDC_LIMIT_MACROS
|
||||
#endif
|
||||
#ifndef __STDC_FORMAT_MACROS
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
/* as defined in sdkddkver.h */
|
||||
#ifndef _WIN32_WINNT
|
||||
#define _WIN32_WINNT 0x0600 /* Vista */
|
||||
#endif
|
||||
/* reduces the number of implicitly included headers */
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* enable C99/POSIX format strings (needs mingw32-runtime 3.15 or later) */
|
||||
#ifdef __MINGW32__
|
||||
#ifndef __USE_MINGW_ANSI_STDIO
|
||||
#define __USE_MINGW_ANSI_STDIO 1
|
||||
#endif // __USE_MINGW_ANSI_STDIO
|
||||
#endif
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stddef.h>
|
||||
#include "unicorn/platform.h"
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
#include <limits.h>
|
||||
|
||||
#include <unicorn/platform.h>
|
||||
|
||||
#include <time.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <assert.h>
|
||||
/* setjmp must be declared before sysemu/os-win32.h
|
||||
* because it is redefined there. */
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
|
||||
#ifdef __OpenBSD__
|
||||
#include <sys/signal.h>
|
||||
#endif
|
||||
@ -17,52 +107,153 @@
|
||||
#define WEXITSTATUS(x) (x)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10
|
||||
/* [u]int_fast*_t not in <sys/int_types.h> */
|
||||
typedef unsigned char uint_fast8_t;
|
||||
typedef unsigned int uint_fast16_t;
|
||||
typedef signed int int_fast16_t;
|
||||
#ifdef _WIN32
|
||||
#include "sysemu/os-win32.h"
|
||||
#endif
|
||||
|
||||
#ifndef glue
|
||||
#define xglue(x, y) x ## y
|
||||
#define glue(x, y) xglue(x, y)
|
||||
#define stringify(s) tostring(s)
|
||||
#define tostring(s) #s
|
||||
#ifdef CONFIG_POSIX
|
||||
#include "sys/mman.h"
|
||||
#endif
|
||||
|
||||
#ifndef likely
|
||||
#if __GNUC__ < 3
|
||||
#define __builtin_expect(x, n) (x)
|
||||
/*
|
||||
* Only allow MAP_JIT for Mojave or later.
|
||||
*
|
||||
* Source: https://github.com/moby/hyperkit/pull/259/files#diff-e6b5417230ff2daff9155d9b15aefae12e89410ec2dca1f59d04be511f6737fcR41
|
||||
*/
|
||||
#if defined(__APPLE__)
|
||||
#if defined(HAVE_PTHREAD_JIT_PROTECT)
|
||||
#define USE_MAP_JIT
|
||||
#else
|
||||
#include <Availability.h>
|
||||
#ifdef __MAC_OS_X_VERSION_MIN_REQUIRED
|
||||
#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 101400 && defined(MAP_JIT)
|
||||
#define USE_MAP_JIT
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#include <glib_compat.h>
|
||||
#include "qemu/typedefs.h"
|
||||
|
||||
|
||||
/* Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default
|
||||
* instead of QEMU_VERSION, so setting hw_version on MachineClass
|
||||
* is no longer mandatory.
|
||||
*
|
||||
* Do NOT change this string, or it will break compatibility on all
|
||||
* machine classes that don't set hw_version.
|
||||
*/
|
||||
#define QEMU_HW_VERSION "2.5+"
|
||||
|
||||
|
||||
/*
|
||||
* For mingw, as of v6.0.0, the function implementing the assert macro is
|
||||
* not marked as noreturn, so the compiler cannot delete code following an
|
||||
* assert(false) as unused. We rely on this within the code base to delete
|
||||
* code that is unreachable when features are disabled.
|
||||
* All supported versions of Glib's g_assert() satisfy this requirement.
|
||||
*/
|
||||
#ifdef __MINGW32__
|
||||
#undef assert
|
||||
#define assert(x) g_assert(x)
|
||||
#endif
|
||||
|
||||
#ifndef container_of
|
||||
#ifndef _MSC_VER
|
||||
#define container_of(ptr, type, member) ({ \
|
||||
const typeof(((type *) 0)->member) *__mptr = (ptr); \
|
||||
(type *) ((char *) __mptr - offsetof(type, member));})
|
||||
/*
|
||||
* According to waitpid man page:
|
||||
* WCOREDUMP
|
||||
* This macro is not specified in POSIX.1-2001 and is not
|
||||
* available on some UNIX implementations (e.g., AIX, SunOS).
|
||||
* Therefore, enclose its use inside #ifdef WCOREDUMP ... #endif.
|
||||
*/
|
||||
#ifndef WCOREDUMP
|
||||
#define WCOREDUMP(status) 0
|
||||
#endif
|
||||
/*
|
||||
* We have a lot of unaudited code that may fail in strange ways, or
|
||||
* even be a security risk during migration, if you disable assertions
|
||||
* at compile-time. You may comment out these safety checks if you
|
||||
* absolutely want to disable assertion overhead, but it is not
|
||||
* supported upstream so the risk is all yours. Meanwhile, please
|
||||
* submit patches to remove any side-effects inside an assertion, or
|
||||
* fixing error handling that should use Error instead of assert.
|
||||
*/
|
||||
#ifdef G_DISABLE_ASSERT
|
||||
#error building with G_DISABLE_ASSERT is not supported
|
||||
#endif
|
||||
|
||||
#ifndef O_LARGEFILE
|
||||
#define O_LARGEFILE 0
|
||||
#endif
|
||||
#ifndef O_BINARY
|
||||
#define O_BINARY 0
|
||||
#endif
|
||||
#ifndef MAP_ANONYMOUS
|
||||
#define MAP_ANONYMOUS MAP_ANON
|
||||
#endif
|
||||
#ifndef ENOMEDIUM
|
||||
#define ENOMEDIUM ENODEV
|
||||
#endif
|
||||
#if !defined(ENOTSUP)
|
||||
#define ENOTSUP 4096
|
||||
#endif
|
||||
#if !defined(ECANCELED)
|
||||
#define ECANCELED 4097
|
||||
#endif
|
||||
#if !defined(EMEDIUMTYPE)
|
||||
#define EMEDIUMTYPE 4098
|
||||
#endif
|
||||
#if !defined(ESHUTDOWN)
|
||||
#define ESHUTDOWN 4099
|
||||
#endif
|
||||
|
||||
/* time_t may be either 32 or 64 bits depending on the host OS, and
|
||||
* can be either signed or unsigned, so we can't just hardcode a
|
||||
* specific maximum value. This is not a C preprocessor constant,
|
||||
* so you can't use TIME_MAX in an #ifdef, but for our purposes
|
||||
* this isn't a problem.
|
||||
*/
|
||||
|
||||
/* The macros TYPE_SIGNED, TYPE_WIDTH, and TYPE_MAXIMUM are from
|
||||
* Gnulib, and are under the LGPL v2.1 or (at your option) any
|
||||
* later version.
|
||||
*/
|
||||
|
||||
/* True if the real type T is signed. */
|
||||
#define TYPE_SIGNED(t) (!((t)0 < (t)-1))
|
||||
|
||||
/* The width in bits of the integer type or expression T.
|
||||
* Padding bits are not supported.
|
||||
*/
|
||||
#define TYPE_WIDTH(t) (sizeof(t) * CHAR_BIT)
|
||||
|
||||
/* The maximum and minimum values for the integer type T. */
|
||||
#define TYPE_MAXIMUM(t) \
|
||||
((t) (!TYPE_SIGNED(t) \
|
||||
? (t)-1 \
|
||||
: ((((t)1 << (TYPE_WIDTH(t) - 2)) - 1) * 2 + 1)))
|
||||
|
||||
#ifndef TIME_MAX
|
||||
#define TIME_MAX TYPE_MAXIMUM(time_t)
|
||||
#endif
|
||||
|
||||
/* HOST_LONG_BITS is the size of a native pointer in bits. */
|
||||
#if UINTPTR_MAX == UINT32_MAX
|
||||
# define HOST_LONG_BITS 32
|
||||
#elif UINTPTR_MAX == UINT64_MAX
|
||||
# define HOST_LONG_BITS 64
|
||||
#else
|
||||
#define container_of(ptr, type, member) ((type *)((char *)(ptr) -offsetof(type,member)))
|
||||
#endif
|
||||
# error Unknown pointer size
|
||||
#endif
|
||||
|
||||
/* Convert from a base type to a parent type, with compile time checking. */
|
||||
#ifdef __GNUC__
|
||||
#define DO_UPCAST(type, field, dev) ( __extension__ ( { \
|
||||
char QEMU_UNUSED_VAR offset_must_be_zero[ \
|
||||
-offsetof(type, field)]; \
|
||||
container_of(dev, type, field);}))
|
||||
#else
|
||||
#define DO_UPCAST(type, field, dev) container_of(dev, type, field)
|
||||
/* Mac OSX has a <stdint.h> bug that incorrectly defines SIZE_MAX with
|
||||
* the wrong type. Our replacement isn't usable in preprocessor
|
||||
* expressions, but it is sufficient for our needs. */
|
||||
#if defined(HAVE_BROKEN_SIZE_MAX) && HAVE_BROKEN_SIZE_MAX
|
||||
#undef SIZE_MAX
|
||||
#define SIZE_MAX ((size_t)-1)
|
||||
#endif
|
||||
|
||||
#define typeof_field(type, field) typeof(((type *)0)->field)
|
||||
#define type_check(t1,t2) ((t1*)0 - (t2*)0)
|
||||
|
||||
#ifndef MIN
|
||||
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
|
||||
#endif
|
||||
@ -73,40 +264,215 @@ typedef signed int int_fast16_t;
|
||||
/* Minimum function that returns zero only iff both values are zero.
|
||||
* Intended for use with unsigned values only. */
|
||||
#ifndef MIN_NON_ZERO
|
||||
#define MIN_NON_ZERO(a, b) (((a) != 0 && (a) < (b)) ? (a) : (b))
|
||||
#define MIN_NON_ZERO(a, b) ((a) == 0 ? (b) : \
|
||||
((b) == 0 ? (a) : (MIN(a, b))))
|
||||
#endif
|
||||
|
||||
/* Round number down to multiple */
|
||||
#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m))
|
||||
|
||||
/* Round number up to multiple. Safe when m is not a power of 2 (see
|
||||
* ROUND_UP for a faster version when a power of 2 is guaranteed) */
|
||||
#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
|
||||
|
||||
/* Check if n is a multiple of m */
|
||||
#define QEMU_IS_ALIGNED(n, m) (((n) % (m)) == 0)
|
||||
|
||||
/* n-byte align pointer down */
|
||||
#ifdef _MSC_VER
|
||||
#define QEMU_ALIGN_PTR_DOWN(p, n) (QEMU_ALIGN_DOWN((uintptr_t)(p), (n)))
|
||||
#else
|
||||
#define QEMU_ALIGN_PTR_DOWN(p, n) ((typeof(p))QEMU_ALIGN_DOWN((uintptr_t)(p), (n)))
|
||||
#endif
|
||||
|
||||
/* n-byte align pointer up */
|
||||
#ifndef _MSC_VER
|
||||
#define QEMU_ALIGN_PTR_UP(p, n) ((typeof(p))QEMU_ALIGN_UP((uintptr_t)(p), (n)))
|
||||
#else
|
||||
#define QEMU_ALIGN_PTR_UP(p, n) QEMU_ALIGN_UP((uintptr_t)(p), (n))
|
||||
#endif
|
||||
|
||||
/* Check if pointer p is n-bytes aligned */
|
||||
#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n))
|
||||
|
||||
/* Round number up to multiple. Requires that d be a power of 2 (see
|
||||
* QEMU_ALIGN_UP for a safer but slower version on arbitrary
|
||||
* numbers); works even if d is a smaller type than n. */
|
||||
#ifndef ROUND_UP
|
||||
#define ROUND_UP(n,d) (((n) + (d) - 1) & -(d))
|
||||
#ifdef _MSC_VER
|
||||
#define ROUND_UP(n, d) (((n) + (d) - 1) & (0 - (0 ? (n) : (d))))
|
||||
#else
|
||||
#define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d)))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef DIV_ROUND_UP
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* &(x)[0] is always a pointer - if it's same type as x then the argument is a
|
||||
* pointer, not an array.
|
||||
*/
|
||||
#define QEMU_IS_ARRAY(x) (!__builtin_types_compatible_p(typeof(x), \
|
||||
typeof(&(x)[0])))
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
||||
#endif
|
||||
|
||||
#ifndef always_inline
|
||||
#if !((__GNUC__ < 3) || defined(__APPLE__))
|
||||
#ifdef __OPTIMIZE__
|
||||
#undef inline
|
||||
#define inline __attribute__ (( always_inline )) __inline__
|
||||
#endif
|
||||
#endif
|
||||
#ifndef _MSC_VER
|
||||
#define ARRAY_SIZE(x) ((sizeof(x) / sizeof((x)[0])) + \
|
||||
QEMU_BUILD_BUG_ON_ZERO(!QEMU_IS_ARRAY(x)))
|
||||
#else
|
||||
#undef inline
|
||||
#define inline always_inline
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define qemu_printf printf
|
||||
|
||||
void *qemu_try_memalign(size_t alignment, size_t size);
|
||||
void *qemu_memalign(size_t alignment, size_t size);
|
||||
void *qemu_anon_ram_alloc(size_t size, uint64_t *align);
|
||||
void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *align);
|
||||
void qemu_vfree(void *ptr);
|
||||
void qemu_anon_ram_free(void *ptr, size_t size);
|
||||
void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size);
|
||||
|
||||
#define QEMU_MADV_INVALID -1
|
||||
|
||||
#if defined(CONFIG_MADVISE)
|
||||
|
||||
#define QEMU_MADV_WILLNEED MADV_WILLNEED
|
||||
#define QEMU_MADV_DONTNEED MADV_DONTNEED
|
||||
#ifdef MADV_DONTFORK
|
||||
#define QEMU_MADV_DONTFORK MADV_DONTFORK
|
||||
#else
|
||||
#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
|
||||
#endif
|
||||
#ifdef MADV_MERGEABLE
|
||||
#define QEMU_MADV_MERGEABLE MADV_MERGEABLE
|
||||
#else
|
||||
#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
|
||||
#endif
|
||||
#ifdef MADV_UNMERGEABLE
|
||||
#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE
|
||||
#else
|
||||
#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
|
||||
#endif
|
||||
#ifdef MADV_DODUMP
|
||||
#define QEMU_MADV_DODUMP MADV_DODUMP
|
||||
#else
|
||||
#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
|
||||
#endif
|
||||
#ifdef MADV_DONTDUMP
|
||||
#define QEMU_MADV_DONTDUMP MADV_DONTDUMP
|
||||
#else
|
||||
#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
|
||||
#endif
|
||||
#ifdef MADV_HUGEPAGE
|
||||
#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE
|
||||
#else
|
||||
#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
|
||||
#endif
|
||||
#ifdef MADV_NOHUGEPAGE
|
||||
#define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE
|
||||
#else
|
||||
#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
|
||||
#endif
|
||||
#ifdef MADV_REMOVE
|
||||
#define QEMU_MADV_REMOVE MADV_REMOVE
|
||||
#else
|
||||
#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
|
||||
#endif
|
||||
|
||||
#elif defined(CONFIG_POSIX_MADVISE)
|
||||
|
||||
#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED
|
||||
#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED
|
||||
#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
|
||||
|
||||
#else /* no-op */
|
||||
|
||||
#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_DODUMP QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
|
||||
#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#define HAVE_CHARDEV_SERIAL 1
|
||||
#elif defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \
|
||||
|| defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \
|
||||
|| defined(__GLIBC__)
|
||||
#define HAVE_CHARDEV_SERIAL 1
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__) || \
|
||||
defined(__FreeBSD_kernel__) || defined(__DragonFly__)
|
||||
#define HAVE_CHARDEV_PARPORT 1
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LINUX)
|
||||
#ifndef BUS_MCEERR_AR
|
||||
#define BUS_MCEERR_AR 4
|
||||
#endif
|
||||
#ifndef BUS_MCEERR_AO
|
||||
#define BUS_MCEERR_AO 5
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) && \
|
||||
(defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) \
|
||||
|| defined(__powerpc64__))
|
||||
/* Use 2 MiB alignment so transparent hugepages can be used by KVM.
|
||||
Valgrind does not support alignments larger than 1 MiB,
|
||||
therefore we need special code which handles running on Valgrind. */
|
||||
# define QEMU_VMALLOC_ALIGN (512 * 4096)
|
||||
#elif defined(__linux__) && defined(__s390x__)
|
||||
/* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
|
||||
# define QEMU_VMALLOC_ALIGN (256 * 4096)
|
||||
#elif defined(__linux__) && defined(__sparc__)
|
||||
#include <sys/shm.h>
|
||||
# define QEMU_VMALLOC_ALIGN MAX(uc->qemu_real_host_page_size, SHMLBA)
|
||||
#else
|
||||
# define QEMU_VMALLOC_ALIGN uc->qemu_real_host_page_size
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_POSIX
|
||||
struct qemu_signalfd_siginfo {
|
||||
uint32_t ssi_signo; /* Signal number */
|
||||
int32_t ssi_errno; /* Error number (unused) */
|
||||
int32_t ssi_code; /* Signal code */
|
||||
uint32_t ssi_pid; /* PID of sender */
|
||||
uint32_t ssi_uid; /* Real UID of sender */
|
||||
int32_t ssi_fd; /* File descriptor (SIGIO) */
|
||||
uint32_t ssi_tid; /* Kernel timer ID (POSIX timers) */
|
||||
uint32_t ssi_band; /* Band event (SIGIO) */
|
||||
uint32_t ssi_overrun; /* POSIX timer overrun count */
|
||||
uint32_t ssi_trapno; /* Trap number that caused signal */
|
||||
int32_t ssi_status; /* Exit status or signal (SIGCHLD) */
|
||||
int32_t ssi_int; /* Integer sent by sigqueue(2) */
|
||||
uint64_t ssi_ptr; /* Pointer sent by sigqueue(2) */
|
||||
uint64_t ssi_utime; /* User CPU time consumed (SIGCHLD) */
|
||||
uint64_t ssi_stime; /* System CPU time consumed (SIGCHLD) */
|
||||
uint64_t ssi_addr; /* Address that generated signal
|
||||
(for hardware-generated signals) */
|
||||
uint8_t pad[48]; /* Pad size to 128 bytes (allow for
|
||||
additional fields in the future) */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
int qemu_madvise(void *addr, size_t len, int advice);
|
||||
int qemu_mprotect_rwx(void *addr, size_t size);
|
||||
int qemu_mprotect_none(void *addr, size_t size);
|
||||
|
||||
#if defined(__HAIKU__) && defined(__i386__)
|
||||
#define FMT_pid "%ld"
|
||||
@ -116,6 +482,25 @@ void qemu_anon_ram_free(void *ptr, size_t size);
|
||||
#define FMT_pid "%d"
|
||||
#endif
|
||||
|
||||
int qemu_get_thread_id(void);
|
||||
|
||||
#ifdef _WIN32
|
||||
static inline void qemu_timersub(const struct timeval *val1,
|
||||
const struct timeval *val2,
|
||||
struct timeval *res)
|
||||
{
|
||||
res->tv_sec = val1->tv_sec - val2->tv_sec;
|
||||
if (val1->tv_usec < val2->tv_usec) {
|
||||
res->tv_sec--;
|
||||
res->tv_usec = val1->tv_usec - val2->tv_usec + 1000 * 1000;
|
||||
} else {
|
||||
res->tv_usec = val1->tv_usec - val2->tv_usec;
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define qemu_timersub timersub
|
||||
#endif
|
||||
|
||||
/**
|
||||
* qemu_getauxval:
|
||||
* @type: the auxiliary vector key to lookup
|
||||
|
27
qemu/include/qemu/processor.h
Normal file
27
qemu/include/qemu/processor.h
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
|
||||
*
|
||||
* License: GNU GPL, version 2.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#ifndef QEMU_PROCESSOR_H
|
||||
#define QEMU_PROCESSOR_H
|
||||
|
||||
#include "qemu/atomic.h"
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
# define cpu_relax() asm volatile("rep; nop" ::: "memory")
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
# define cpu_relax() asm volatile("yield" ::: "memory")
|
||||
|
||||
#elif defined(__powerpc64__)
|
||||
/* set Hardware Multi-Threading (HMT) priority to low; then back to medium */
|
||||
# define cpu_relax() asm volatile("or 1, 1, 1;" \
|
||||
"or 2, 2, 2;" ::: "memory")
|
||||
|
||||
#else
|
||||
# define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#endif /* QEMU_PROCESSOR_H */
|
55
qemu/include/qemu/qdist.h
Normal file
55
qemu/include/qemu/qdist.h
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
|
||||
*
|
||||
* License: GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#ifndef QEMU_QDIST_H
|
||||
#define QEMU_QDIST_H
|
||||
|
||||
#include "qemu/bitops.h"
|
||||
|
||||
/*
|
||||
* Samples with the same 'x value' end up in the same qdist_entry,
|
||||
* e.g. inc(0.1) and inc(0.1) end up as {x=0.1, count=2}.
|
||||
*
|
||||
* Binning happens only at print time, so that we retain the flexibility to
|
||||
* choose the binning. This might not be ideal for workloads that do not care
|
||||
* much about precision and insert many samples all with different x values;
|
||||
* in that case, pre-binning (e.g. entering both 0.115 and 0.097 as 0.1)
|
||||
* should be considered.
|
||||
*/
|
||||
struct qdist_entry {
|
||||
double x;
|
||||
unsigned long count;
|
||||
};
|
||||
|
||||
struct qdist {
|
||||
struct qdist_entry *entries;
|
||||
size_t n;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
#define QDIST_PR_BORDER BIT(0)
|
||||
#define QDIST_PR_LABELS BIT(1)
|
||||
/* the remaining options only work if PR_LABELS is set */
|
||||
#define QDIST_PR_NODECIMAL BIT(2)
|
||||
#define QDIST_PR_PERCENT BIT(3)
|
||||
#define QDIST_PR_100X BIT(4)
|
||||
#define QDIST_PR_NOBINRANGE BIT(5)
|
||||
|
||||
void qdist_init(struct qdist *dist);
|
||||
void qdist_destroy(struct qdist *dist);
|
||||
|
||||
void qdist_add(struct qdist *dist, double x, long count);
|
||||
void qdist_inc(struct qdist *dist, double x);
|
||||
double qdist_xmin(const struct qdist *dist);
|
||||
double qdist_xmax(const struct qdist *dist);
|
||||
double qdist_avg(const struct qdist *dist);
|
||||
unsigned long qdist_sample_count(const struct qdist *dist);
|
||||
size_t qdist_unique_entries(const struct qdist *dist);
|
||||
|
||||
/* Only qdist code and test code should ever call this function */
|
||||
void qdist_bin__internal(struct qdist *to, const struct qdist *from, size_t n);
|
||||
|
||||
#endif /* QEMU_QDIST_H */
|
225
qemu/include/qemu/qht.h
Normal file
225
qemu/include/qemu/qht.h
Normal file
@ -0,0 +1,225 @@
|
||||
/*
|
||||
* Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
|
||||
*
|
||||
* License: GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#ifndef QEMU_QHT_H
|
||||
#define QEMU_QHT_H
|
||||
|
||||
// #include "qemu/seqlock.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/qdist.h"
|
||||
|
||||
struct uc_struct;
|
||||
|
||||
typedef bool (*qht_cmp_func_t)(struct uc_struct *uc, const void *a, const void *b);
|
||||
|
||||
struct qht {
|
||||
struct qht_map *map;
|
||||
qht_cmp_func_t cmp;
|
||||
unsigned int mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qht_stats - Statistics of a QHT
|
||||
* @head_buckets: number of head buckets
|
||||
* @used_head_buckets: number of non-empty head buckets
|
||||
* @entries: total number of entries
|
||||
* @chain: frequency distribution representing the number of buckets in each
|
||||
* chain, excluding empty chains.
|
||||
* @occupancy: frequency distribution representing chain occupancy rate.
|
||||
* Valid range: from 0.0 (empty) to 1.0 (full occupancy).
|
||||
*
|
||||
* An entry is a pointer-hash pair.
|
||||
* Each bucket can host several entries.
|
||||
* Chains are chains of buckets, whose first link is always a head bucket.
|
||||
*/
|
||||
struct qht_stats {
|
||||
size_t head_buckets;
|
||||
size_t used_head_buckets;
|
||||
size_t entries;
|
||||
struct qdist chain;
|
||||
struct qdist occupancy;
|
||||
};
|
||||
|
||||
typedef bool (*qht_lookup_func_t)(struct uc_struct *uc, const void *obj, const void *userp);
|
||||
typedef void (*qht_iter_func_t)(struct uc_struct *uc, void *p, uint32_t h, void *up);
|
||||
typedef bool (*qht_iter_bool_func_t)(void *p, uint32_t h, void *up);
|
||||
|
||||
#define QHT_MODE_AUTO_RESIZE 0x1 /* auto-resize when heavily loaded */
|
||||
#define QHT_MODE_RAW_MUTEXES 0x2 /* bypass the profiler (QSP) */
|
||||
|
||||
/**
|
||||
* qht_init - Initialize a QHT
|
||||
* @ht: QHT to be initialized
|
||||
* @cmp: default comparison function. Cannot be NULL.
|
||||
* @n_elems: number of entries the hash table should be optimized for.
|
||||
* @mode: bitmask with OR'ed QHT_MODE_*
|
||||
*/
|
||||
void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
|
||||
unsigned int mode);
|
||||
|
||||
/**
|
||||
* qht_destroy - destroy a previously initialized QHT
|
||||
* @ht: QHT to be destroyed
|
||||
*
|
||||
* Call only when there are no readers/writers left.
|
||||
*/
|
||||
void qht_destroy(struct qht *ht);
|
||||
|
||||
/**
|
||||
* qht_insert - Insert a pointer into the hash table
|
||||
* @ht: QHT to insert to
|
||||
* @p: pointer to be inserted
|
||||
* @hash: hash corresponding to @p
|
||||
* @existing: address where the pointer to an existing entry can be copied to
|
||||
*
|
||||
* Attempting to insert a NULL @p is a bug.
|
||||
* Inserting the same pointer @p with different @hash values is a bug.
|
||||
*
|
||||
* In case of successful operation, smp_wmb() is implied before the pointer is
|
||||
* inserted into the hash table.
|
||||
*
|
||||
* Returns true on success.
|
||||
* Returns false if there is an existing entry in the table that is equivalent
|
||||
* (i.e. ht->cmp matches and the hash is the same) to @p-@h. If @existing
|
||||
* is !NULL, a pointer to this existing entry is copied to it.
|
||||
*/
|
||||
bool qht_insert(struct uc_struct *uc, struct qht *ht, void *p, uint32_t hash, void **existing);
|
||||
|
||||
/**
|
||||
* qht_lookup_custom - Look up a pointer using a custom comparison function.
|
||||
* @ht: QHT to be looked up
|
||||
* @userp: pointer to pass to @func
|
||||
* @hash: hash of the pointer to be looked up
|
||||
* @func: function to compare existing pointers against @userp
|
||||
*
|
||||
* Needs to be called under an RCU read-critical section.
|
||||
*
|
||||
* smp_read_barrier_depends() is implied before the call to @func.
|
||||
*
|
||||
* The user-provided @func compares pointers in QHT against @userp.
|
||||
* If the function returns true, a match has been found.
|
||||
*
|
||||
* Returns the corresponding pointer when a match is found.
|
||||
* Returns NULL otherwise.
|
||||
*/
|
||||
void *qht_lookup_custom(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash,
|
||||
qht_lookup_func_t func);
|
||||
|
||||
/**
|
||||
* qht_lookup - Look up a pointer in a QHT
|
||||
* @ht: QHT to be looked up
|
||||
* @userp: pointer to pass to the comparison function
|
||||
* @hash: hash of the pointer to be looked up
|
||||
*
|
||||
* Calls qht_lookup_custom() using @ht's default comparison function.
|
||||
*/
|
||||
void *qht_lookup(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash);
|
||||
|
||||
/**
|
||||
* qht_remove - remove a pointer from the hash table
|
||||
* @ht: QHT to remove from
|
||||
* @p: pointer to be removed
|
||||
* @hash: hash corresponding to @p
|
||||
*
|
||||
* Attempting to remove a NULL @p is a bug.
|
||||
*
|
||||
* Just-removed @p pointers cannot be immediately freed; they need to remain
|
||||
* valid until the end of the RCU grace period in which qht_remove() is called.
|
||||
* This guarantees that concurrent lookups will always compare against valid
|
||||
* data.
|
||||
*
|
||||
* Returns true on success.
|
||||
* Returns false if the @p-@hash pair was not found.
|
||||
*/
|
||||
bool qht_remove(struct qht *ht, const void *p, uint32_t hash);
|
||||
|
||||
/**
|
||||
* qht_reset - reset a QHT
|
||||
* @ht: QHT to be reset
|
||||
*
|
||||
* All entries in the hash table are reset. No resizing is performed.
|
||||
*
|
||||
* If concurrent readers may exist, the objects pointed to by the hash table
|
||||
* must remain valid for the existing RCU grace period -- see qht_remove().
|
||||
* See also: qht_reset_size()
|
||||
*/
|
||||
void qht_reset(struct qht *ht);
|
||||
|
||||
/**
|
||||
* qht_reset_size - reset and resize a QHT
|
||||
* @ht: QHT to be reset and resized
|
||||
* @n_elems: number of entries the resized hash table should be optimized for.
|
||||
*
|
||||
* Returns true if the resize was necessary and therefore performed.
|
||||
* Returns false otherwise.
|
||||
*
|
||||
* If concurrent readers may exist, the objects pointed to by the hash table
|
||||
* must remain valid for the existing RCU grace period -- see qht_remove().
|
||||
* See also: qht_reset(), qht_resize().
|
||||
*/
|
||||
bool qht_reset_size(struct uc_struct *uc, struct qht *ht, size_t n_elems);
|
||||
|
||||
/**
|
||||
* qht_resize - resize a QHT
|
||||
* @ht: QHT to be resized
|
||||
* @n_elems: number of entries the resized hash table should be optimized for
|
||||
*
|
||||
* Returns true on success.
|
||||
* Returns false if the resize was not necessary and therefore not performed.
|
||||
* See also: qht_reset_size().
|
||||
*/
|
||||
bool qht_resize(struct uc_struct *uc, struct qht *ht, size_t n_elems);
|
||||
|
||||
/**
|
||||
* qht_iter - Iterate over a QHT
|
||||
* @ht: QHT to be iterated over
|
||||
* @func: function to be called for each entry in QHT
|
||||
* @userp: additional pointer to be passed to @func
|
||||
*
|
||||
* Each time it is called, user-provided @func is passed a pointer-hash pair,
|
||||
* plus @userp.
|
||||
*
|
||||
* Note: @ht cannot be accessed from @func
|
||||
* See also: qht_iter_remove()
|
||||
*/
|
||||
void qht_iter(struct uc_struct *uc, struct qht *ht, qht_iter_func_t func, void *userp);
|
||||
|
||||
/**
|
||||
* qht_iter_remove - Iterate over a QHT, optionally removing entries
|
||||
* @ht: QHT to be iterated over
|
||||
* @func: function to be called for each entry in QHT
|
||||
* @userp: additional pointer to be passed to @func
|
||||
*
|
||||
* Each time it is called, user-provided @func is passed a pointer-hash pair,
|
||||
* plus @userp. If @func returns true, the pointer-hash pair is removed.
|
||||
*
|
||||
* Note: @ht cannot be accessed from @func
|
||||
* See also: qht_iter()
|
||||
*/
|
||||
void qht_iter_remove(struct uc_struct *uc, struct qht *ht, qht_iter_bool_func_t func, void *userp);
|
||||
|
||||
/**
|
||||
* qht_statistics_init - Gather statistics from a QHT
|
||||
* @ht: QHT to gather statistics from
|
||||
* @stats: pointer to a &struct qht_stats to be filled in
|
||||
*
|
||||
* Does NOT need to be called under an RCU read-critical section,
|
||||
* since it does not dereference any pointers stored in the hash table.
|
||||
*
|
||||
* When done with @stats, pass the struct to qht_statistics_destroy().
|
||||
* Failing to do this will leak memory.
|
||||
*/
|
||||
void qht_statistics_init(const struct qht *ht, struct qht_stats *stats);
|
||||
|
||||
/**
|
||||
* qht_statistics_destroy - Destroy a &struct qht_stats
|
||||
* @stats: &struct qht_stats to be destroyed
|
||||
*
|
||||
* See also: qht_statistics_init().
|
||||
*/
|
||||
void qht_statistics_destroy(struct qht_stats *stats);
|
||||
|
||||
#endif /* QEMU_QHT_H */
|
@ -37,8 +37,8 @@
|
||||
* @(#)queue.h 8.5 (Berkeley) 8/20/94
|
||||
*/
|
||||
|
||||
#ifndef QEMU_SYS_QUEUE_H_
|
||||
#define QEMU_SYS_QUEUE_H_
|
||||
#ifndef QEMU_SYS_QUEUE_H
|
||||
#define QEMU_SYS_QUEUE_H
|
||||
|
||||
/*
|
||||
* This file defines four types of data structures: singly-linked lists,
|
||||
@ -78,8 +78,6 @@
|
||||
* For details on the use of these macros, see the queue(3) manual page.
|
||||
*/
|
||||
|
||||
#include "qemu/atomic.h" /* for smp_wmb() */
|
||||
|
||||
/*
|
||||
* List definitions.
|
||||
*/
|
||||
@ -104,6 +102,19 @@ struct { \
|
||||
(head)->lh_first = NULL; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QLIST_SWAP(dstlist, srclist, field) do { \
|
||||
void *tmplist; \
|
||||
tmplist = (srclist)->lh_first; \
|
||||
(srclist)->lh_first = (dstlist)->lh_first; \
|
||||
if ((srclist)->lh_first != NULL) { \
|
||||
(srclist)->lh_first->field.le_prev = &(srclist)->lh_first; \
|
||||
} \
|
||||
(dstlist)->lh_first = tmplist; \
|
||||
if ((dstlist)->lh_first != NULL) { \
|
||||
(dstlist)->lh_first->field.le_prev = &(dstlist)->lh_first; \
|
||||
} \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QLIST_INSERT_AFTER(listelm, elm, field) do { \
|
||||
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
|
||||
(listelm)->field.le_next->field.le_prev = \
|
||||
@ -126,24 +137,32 @@ struct { \
|
||||
(elm)->field.le_prev = &(head)->lh_first; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
|
||||
(elm)->field.le_prev = &(head)->lh_first; \
|
||||
(elm)->field.le_next = (head)->lh_first; \
|
||||
smp_wmb(); /* fill elm before linking it */ \
|
||||
if ((head)->lh_first != NULL) { \
|
||||
(head)->lh_first->field.le_prev = &(elm)->field.le_next; \
|
||||
} \
|
||||
(head)->lh_first = (elm); \
|
||||
smp_wmb(); \
|
||||
} while (/* CONSTCOND*/0)
|
||||
|
||||
#define QLIST_REMOVE(elm, field) do { \
|
||||
if ((elm)->field.le_next != NULL) \
|
||||
(elm)->field.le_next->field.le_prev = \
|
||||
(elm)->field.le_prev; \
|
||||
*(elm)->field.le_prev = (elm)->field.le_next; \
|
||||
(elm)->field.le_next = NULL; \
|
||||
(elm)->field.le_prev = NULL; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
/*
|
||||
* Like QLIST_REMOVE() but safe to call when elm is not in a list
|
||||
*/
|
||||
#define QLIST_SAFE_REMOVE(elm, field) do { \
|
||||
if ((elm)->field.le_prev != NULL) { \
|
||||
if ((elm)->field.le_next != NULL) \
|
||||
(elm)->field.le_next->field.le_prev = \
|
||||
(elm)->field.le_prev; \
|
||||
*(elm)->field.le_prev = (elm)->field.le_next; \
|
||||
(elm)->field.le_next = NULL; \
|
||||
(elm)->field.le_prev = NULL; \
|
||||
} \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
/* Is elm in a list? */
|
||||
#define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL)
|
||||
|
||||
#define QLIST_FOREACH(var, head, field) \
|
||||
for ((var) = ((head)->lh_first); \
|
||||
(var); \
|
||||
@ -191,17 +210,44 @@ struct { \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSLIST_INSERT_HEAD(head, elm, field) do { \
|
||||
(elm)->field.sle_next = (head)->slh_first; \
|
||||
(head)->slh_first = (elm); \
|
||||
(elm)->field.sle_next = (head)->slh_first; \
|
||||
(head)->slh_first = (elm); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSLIST_INSERT_HEAD_ATOMIC(head, elm, field) do { \
|
||||
typeof(elm) save_sle_next; \
|
||||
do { \
|
||||
save_sle_next = (elm)->field.sle_next = (head)->slh_first; \
|
||||
} while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \
|
||||
save_sle_next); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSLIST_MOVE_ATOMIC(dest, src) do { \
|
||||
(dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSLIST_REMOVE_HEAD(head, field) do { \
|
||||
(head)->slh_first = (head)->slh_first->field.sle_next; \
|
||||
typeof((head)->slh_first) elm = (head)->slh_first; \
|
||||
(head)->slh_first = elm->field.sle_next; \
|
||||
elm->field.sle_next = NULL; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
|
||||
(slistelm)->field.sle_next = \
|
||||
QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \
|
||||
#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
|
||||
typeof(slistelm) next = (slistelm)->field.sle_next; \
|
||||
(slistelm)->field.sle_next = next->field.sle_next; \
|
||||
next->field.sle_next = NULL; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSLIST_REMOVE(head, elm, type, field) do { \
|
||||
if ((head)->slh_first == (elm)) { \
|
||||
QSLIST_REMOVE_HEAD((head), field); \
|
||||
} else { \
|
||||
struct type *curelm = (head)->slh_first; \
|
||||
while (curelm->field.sle_next != (elm)) \
|
||||
curelm = curelm->field.sle_next; \
|
||||
curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \
|
||||
(elm)->field.sle_next = NULL; \
|
||||
} \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSLIST_FOREACH(var, head, field) \
|
||||
@ -264,8 +310,21 @@ struct { \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \
|
||||
if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\
|
||||
typeof((head)->sqh_first) elm = (head)->sqh_first; \
|
||||
if (((head)->sqh_first = elm->field.sqe_next) == NULL) \
|
||||
(head)->sqh_last = &(head)->sqh_first; \
|
||||
elm->field.sqe_next = NULL; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \
|
||||
QSIMPLEQ_INIT(removed); \
|
||||
if (((removed)->sqh_first = (head)->sqh_first) != NULL) { \
|
||||
if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) { \
|
||||
(head)->sqh_last = &(head)->sqh_first; \
|
||||
} \
|
||||
(removed)->sqh_last = &(elm)->field.sqe_next; \
|
||||
(elm)->field.sqe_next = NULL; \
|
||||
} \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \
|
||||
@ -278,6 +337,7 @@ struct { \
|
||||
if ((curelm->field.sqe_next = \
|
||||
curelm->field.sqe_next->field.sqe_next) == NULL) \
|
||||
(head)->sqh_last = &(curelm)->field.sqe_next; \
|
||||
(elm)->field.sqe_next = NULL; \
|
||||
} \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
@ -299,6 +359,14 @@ struct { \
|
||||
} \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSIMPLEQ_PREPEND(head1, head2) do { \
|
||||
if (!QSIMPLEQ_EMPTY((head2))) { \
|
||||
*(head2)->sqh_last = (head1)->sqh_first; \
|
||||
(head1)->sqh_first = (head2)->sqh_first; \
|
||||
QSIMPLEQ_INIT((head2)); \
|
||||
} \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QSIMPLEQ_LAST(head, type, field) \
|
||||
(QSIMPLEQ_EMPTY((head)) ? \
|
||||
NULL : \
|
||||
@ -308,82 +376,99 @@ struct { \
|
||||
/*
|
||||
* Simple queue access methods.
|
||||
*/
|
||||
#define QSIMPLEQ_EMPTY_ATOMIC(head) (atomic_read(&((head)->sqh_first)) == NULL)
|
||||
#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
|
||||
#define QSIMPLEQ_FIRST(head) ((head)->sqh_first)
|
||||
#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
|
||||
|
||||
typedef struct QTailQLink {
|
||||
void *tql_next;
|
||||
struct QTailQLink *tql_prev;
|
||||
} QTailQLink;
|
||||
|
||||
/*
|
||||
* Tail queue definitions.
|
||||
* Tail queue definitions. The union acts as a poor man template, as if
|
||||
* it were QTailQLink<type>.
|
||||
*/
|
||||
#define Q_TAILQ_HEAD(name, type, qual) \
|
||||
struct name { \
|
||||
qual type *tqh_first; /* first element */ \
|
||||
qual type *qual *tqh_last; /* addr of last next element */ \
|
||||
#define QTAILQ_HEAD(name, type) \
|
||||
union name { \
|
||||
struct type *tqh_first; /* first element */ \
|
||||
QTailQLink tqh_circ; /* link for circular backwards list */ \
|
||||
}
|
||||
#define QTAILQ_HEAD(name, type) Q_TAILQ_HEAD(name, struct type,)
|
||||
|
||||
#define QTAILQ_HEAD_INITIALIZER(head) \
|
||||
{ NULL, &(head).tqh_first }
|
||||
{ .tqh_circ = { NULL, &(head).tqh_circ } }
|
||||
|
||||
#define Q_TAILQ_ENTRY(type, qual) \
|
||||
struct { \
|
||||
qual type *tqe_next; /* next element */ \
|
||||
qual type *qual *tqe_prev; /* address of previous next element */\
|
||||
#define QTAILQ_ENTRY(type) \
|
||||
union { \
|
||||
struct type *tqe_next; /* next element */ \
|
||||
QTailQLink tqe_circ; /* link for circular backwards list */ \
|
||||
}
|
||||
#define QTAILQ_ENTRY(type) Q_TAILQ_ENTRY(struct type,)
|
||||
|
||||
/*
|
||||
* Tail queue functions.
|
||||
*/
|
||||
#define QTAILQ_INIT(head) do { \
|
||||
(head)->tqh_first = NULL; \
|
||||
(head)->tqh_last = &(head)->tqh_first; \
|
||||
(head)->tqh_circ.tql_prev = &(head)->tqh_circ; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QTAILQ_INSERT_HEAD(head, elm, field) do { \
|
||||
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
|
||||
(head)->tqh_first->field.tqe_prev = \
|
||||
&(elm)->field.tqe_next; \
|
||||
(head)->tqh_first->field.tqe_circ.tql_prev = \
|
||||
&(elm)->field.tqe_circ; \
|
||||
else \
|
||||
(head)->tqh_last = &(elm)->field.tqe_next; \
|
||||
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
|
||||
(head)->tqh_first = (elm); \
|
||||
(elm)->field.tqe_prev = &(head)->tqh_first; \
|
||||
(elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QTAILQ_INSERT_TAIL(head, elm, field) do { \
|
||||
(elm)->field.tqe_next = NULL; \
|
||||
(elm)->field.tqe_prev = (head)->tqh_last; \
|
||||
*(head)->tqh_last = (elm); \
|
||||
(head)->tqh_last = &(elm)->field.tqe_next; \
|
||||
(elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
|
||||
(head)->tqh_circ.tql_prev->tql_next = (elm); \
|
||||
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
|
||||
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
|
||||
(elm)->field.tqe_next->field.tqe_prev = \
|
||||
&(elm)->field.tqe_next; \
|
||||
(elm)->field.tqe_next->field.tqe_circ.tql_prev = \
|
||||
&(elm)->field.tqe_circ; \
|
||||
else \
|
||||
(head)->tqh_last = &(elm)->field.tqe_next; \
|
||||
(head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
|
||||
(listelm)->field.tqe_next = (elm); \
|
||||
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
|
||||
(elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
|
||||
(elm)->field.tqe_next = (listelm); \
|
||||
*(listelm)->field.tqe_prev = (elm); \
|
||||
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
|
||||
#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
(elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
|
||||
(elm)->field.tqe_next = (listelm); \
|
||||
(listelm)->field.tqe_circ.tql_prev->tql_next = (elm); \
|
||||
(listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QTAILQ_REMOVE(head, elm, field) do { \
|
||||
if (((elm)->field.tqe_next) != NULL) \
|
||||
(elm)->field.tqe_next->field.tqe_prev = \
|
||||
(elm)->field.tqe_prev; \
|
||||
(elm)->field.tqe_next->field.tqe_circ.tql_prev = \
|
||||
(elm)->field.tqe_circ.tql_prev; \
|
||||
else \
|
||||
(head)->tqh_last = (elm)->field.tqe_prev; \
|
||||
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
|
||||
(head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
|
||||
(elm)->field.tqe_circ.tql_prev->tql_next = (elm)->field.tqe_next; \
|
||||
(elm)->field.tqe_circ.tql_prev = NULL; \
|
||||
(elm)->field.tqe_circ.tql_next = NULL; \
|
||||
(elm)->field.tqe_next = NULL; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
/* remove @left, @right and all elements in between from @head */
|
||||
#define QTAILQ_REMOVE_SEVERAL(head, left, right, field) do { \
|
||||
if (((right)->field.tqe_next) != NULL) \
|
||||
(right)->field.tqe_next->field.tqe_circ.tql_prev = \
|
||||
(left)->field.tqe_circ.tql_prev; \
|
||||
else \
|
||||
(head)->tqh_circ.tql_prev = (left)->field.tqe_circ.tql_prev; \
|
||||
(left)->field.tqe_circ.tql_prev->tql_next = (right)->field.tqe_next; \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QTAILQ_FOREACH(var, head, field) \
|
||||
for ((var) = ((head)->tqh_first); \
|
||||
(var); \
|
||||
@ -394,10 +479,15 @@ struct { \
|
||||
(var) && ((next_var) = ((var)->field.tqe_next), 1); \
|
||||
(var) = (next_var))
|
||||
|
||||
#define QTAILQ_FOREACH_REVERSE(var, head, headname, field) \
|
||||
for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
|
||||
#define QTAILQ_FOREACH_REVERSE(var, head, field) \
|
||||
for ((var) = QTAILQ_LAST(head); \
|
||||
(var); \
|
||||
(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
|
||||
(var) = QTAILQ_PREV(var, field))
|
||||
|
||||
#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, field, prev_var) \
|
||||
for ((var) = QTAILQ_LAST(head); \
|
||||
(var) && ((prev_var) = QTAILQ_PREV(var, field), 1); \
|
||||
(var) = (prev_var))
|
||||
|
||||
/*
|
||||
* Tail queue access methods.
|
||||
@ -405,10 +495,88 @@ struct { \
|
||||
#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL)
|
||||
#define QTAILQ_FIRST(head) ((head)->tqh_first)
|
||||
#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
|
||||
#define QTAILQ_IN_USE(elm, field) ((elm)->field.tqe_circ.tql_prev != NULL)
|
||||
|
||||
#define QTAILQ_LAST(head, headname) \
|
||||
(*(((struct headname *)((head)->tqh_last))->tqh_last))
|
||||
#define QTAILQ_PREV(elm, headname, field) \
|
||||
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
|
||||
#define QTAILQ_LINK_PREV(link) \
|
||||
((link).tql_prev->tql_prev->tql_next)
|
||||
#ifndef _MSC_VER
|
||||
#define QTAILQ_LAST(head) \
|
||||
((typeof((head)->tqh_first)) QTAILQ_LINK_PREV((head)->tqh_circ))
|
||||
#define QTAILQ_PREV(elm, field) \
|
||||
((typeof((elm)->field.tqe_next)) QTAILQ_LINK_PREV((elm)->field.tqe_circ))
|
||||
#else
|
||||
#define QTAILQ_LAST(head) \
|
||||
(QTAILQ_LINK_PREV((head)->tqh_circ))
|
||||
#define QTAILQ_PREV(elm, field) \
|
||||
(QTAILQ_LINK_PREV((elm)->field.tqe_circ))
|
||||
#endif
|
||||
|
||||
#endif /* !QEMU_SYS_QUEUE_H_ */
|
||||
#define field_at_offset(base, offset, type) \
|
||||
((type *) (((char *) (base)) + (offset)))
|
||||
|
||||
/*
|
||||
* Raw access of elements of a tail queue head. Offsets are all zero
|
||||
* because it's a union.
|
||||
*/
|
||||
#define QTAILQ_RAW_FIRST(head) \
|
||||
field_at_offset(head, 0, void *)
|
||||
#define QTAILQ_RAW_TQH_CIRC(head) \
|
||||
field_at_offset(head, 0, QTailQLink)
|
||||
|
||||
/*
|
||||
* Raw access of elements of a tail entry
|
||||
*/
|
||||
#define QTAILQ_RAW_NEXT(elm, entry) \
|
||||
field_at_offset(elm, entry, void *)
|
||||
#define QTAILQ_RAW_TQE_CIRC(elm, entry) \
|
||||
field_at_offset(elm, entry, QTailQLink)
|
||||
/*
|
||||
* Tail queue traversal using pointer arithmetic.
|
||||
*/
|
||||
#define QTAILQ_RAW_FOREACH(elm, head, entry) \
|
||||
for ((elm) = *QTAILQ_RAW_FIRST(head); \
|
||||
(elm); \
|
||||
(elm) = *QTAILQ_RAW_NEXT(elm, entry))
|
||||
/*
|
||||
* Tail queue insertion using pointer arithmetic.
|
||||
*/
|
||||
#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \
|
||||
*QTAILQ_RAW_NEXT(elm, entry) = NULL; \
|
||||
QTAILQ_RAW_TQE_CIRC(elm, entry)->tql_prev = QTAILQ_RAW_TQH_CIRC(head)->tql_prev; \
|
||||
QTAILQ_RAW_TQH_CIRC(head)->tql_prev->tql_next = (elm); \
|
||||
QTAILQ_RAW_TQH_CIRC(head)->tql_prev = QTAILQ_RAW_TQE_CIRC(elm, entry); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
#define QLIST_RAW_FIRST(head) \
|
||||
field_at_offset(head, 0, void *)
|
||||
|
||||
#define QLIST_RAW_NEXT(elm, entry) \
|
||||
field_at_offset(elm, entry, void *)
|
||||
|
||||
#define QLIST_RAW_PREVIOUS(elm, entry) \
|
||||
field_at_offset(elm, entry + sizeof(void *), void *)
|
||||
|
||||
#define QLIST_RAW_FOREACH(elm, head, entry) \
|
||||
for ((elm) = *QLIST_RAW_FIRST(head); \
|
||||
(elm); \
|
||||
(elm) = *QLIST_RAW_NEXT(elm, entry))
|
||||
|
||||
#define QLIST_RAW_INSERT_AFTER(head, prev, elem, entry) do { \
|
||||
*QLIST_RAW_NEXT(prev, entry) = elem; \
|
||||
*QLIST_RAW_PREVIOUS(elem, entry) = QLIST_RAW_NEXT(prev, entry); \
|
||||
*QLIST_RAW_NEXT(elem, entry) = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define QLIST_RAW_INSERT_HEAD(head, elm, entry) do { \
|
||||
void *first = *QLIST_RAW_FIRST(head); \
|
||||
*QLIST_RAW_FIRST(head) = elm; \
|
||||
*QLIST_RAW_PREVIOUS(elm, entry) = QLIST_RAW_FIRST(head); \
|
||||
if (first) { \
|
||||
*QLIST_RAW_NEXT(elm, entry) = first; \
|
||||
*QLIST_RAW_PREVIOUS(first, entry) = QLIST_RAW_NEXT(elm, entry); \
|
||||
} else { \
|
||||
*QLIST_RAW_NEXT(elm, entry) = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* QEMU_SYS_QUEUE_H */
|
||||
|
@ -1,39 +1,195 @@
|
||||
/*
|
||||
* QEMU 64-bit address ranges
|
||||
*
|
||||
* Copyright (c) 2015-2016 Red Hat, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_RANGE_H
|
||||
#define QEMU_RANGE_H
|
||||
|
||||
#include "unicorn/platform.h"
|
||||
#include <qemu/typedefs.h>
|
||||
#include "qemu/queue.h"
|
||||
|
||||
/*
|
||||
* Operations on 64 bit address ranges.
|
||||
* Notes:
|
||||
* - ranges must not wrap around 0, but can include the last byte ~0x0LL.
|
||||
* - this can not represent a full 0 to ~0x0LL range.
|
||||
* - Ranges must not wrap around 0, but can include UINT64_MAX.
|
||||
*/
|
||||
|
||||
/* A structure representing a range of addresses. */
|
||||
struct Range {
|
||||
uint64_t begin; /* First byte of the range, or 0 if empty. */
|
||||
uint64_t end; /* 1 + the last byte. 0 if range empty or ends at ~0x0LL. */
|
||||
/*
|
||||
* Do not access members directly, use the functions!
|
||||
* A non-empty range has @lob <= @upb.
|
||||
* An empty range has @lob == @upb + 1.
|
||||
*/
|
||||
uint64_t lob; /* inclusive lower bound */
|
||||
uint64_t upb; /* inclusive upper bound */
|
||||
};
|
||||
|
||||
static inline void range_invariant(const Range *range)
|
||||
{
|
||||
assert(range->lob <= range->upb || range->lob == range->upb + 1);
|
||||
}
|
||||
|
||||
/* Compound literal encoding the empty range */
|
||||
#define range_empty ((Range){ .lob = 1, .upb = 0 })
|
||||
|
||||
/* Is @range empty? */
|
||||
static inline bool range_is_empty(const Range *range)
|
||||
{
|
||||
range_invariant(range);
|
||||
return range->lob > range->upb;
|
||||
}
|
||||
|
||||
/* Does @range contain @val? */
|
||||
static inline bool range_contains(const Range *range, uint64_t val)
|
||||
{
|
||||
return val >= range->lob && val <= range->upb;
|
||||
}
|
||||
|
||||
/* Initialize @range to the empty range */
|
||||
static inline void range_make_empty(Range *range)
|
||||
{
|
||||
*range = range_empty;
|
||||
assert(range_is_empty(range));
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize @range to span the interval [@lob,@upb].
|
||||
* Both bounds are inclusive.
|
||||
* The interval must not be empty, i.e. @lob must be less than or
|
||||
* equal @upb.
|
||||
*/
|
||||
static inline void range_set_bounds(Range *range, uint64_t lob, uint64_t upb)
|
||||
{
|
||||
range->lob = lob;
|
||||
range->upb = upb;
|
||||
assert(!range_is_empty(range));
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize @range to span the interval [@lob,@upb_plus1).
|
||||
* The lower bound is inclusive, the upper bound is exclusive.
|
||||
* Zero @upb_plus1 is special: if @lob is also zero, set @range to the
|
||||
* empty range. Else, set @range to [@lob,UINT64_MAX].
|
||||
*/
|
||||
static inline void range_set_bounds1(Range *range,
|
||||
uint64_t lob, uint64_t upb_plus1)
|
||||
{
|
||||
if (!lob && !upb_plus1) {
|
||||
*range = range_empty;
|
||||
} else {
|
||||
range->lob = lob;
|
||||
range->upb = upb_plus1 - 1;
|
||||
}
|
||||
range_invariant(range);
|
||||
}
|
||||
|
||||
/* Return @range's lower bound. @range must not be empty. */
|
||||
static inline uint64_t range_lob(Range *range)
|
||||
{
|
||||
assert(!range_is_empty(range));
|
||||
return range->lob;
|
||||
}
|
||||
|
||||
/* Return @range's upper bound. @range must not be empty. */
|
||||
static inline uint64_t range_upb(Range *range)
|
||||
{
|
||||
assert(!range_is_empty(range));
|
||||
return range->upb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize @range to span the interval [@lob,@lob + @size - 1].
|
||||
* @size may be 0. If the range would overflow, returns -ERANGE, otherwise
|
||||
* 0.
|
||||
*/
|
||||
static inline int QEMU_WARN_UNUSED_RESULT range_init(Range *range, uint64_t lob,
|
||||
uint64_t size)
|
||||
{
|
||||
if (lob + size < lob) {
|
||||
return -ERANGE;
|
||||
}
|
||||
range->lob = lob;
|
||||
range->upb = lob + size - 1;
|
||||
range_invariant(range);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize @range to span the interval [@lob,@lob + @size - 1].
|
||||
* @size may be 0. Range must not overflow.
|
||||
*/
|
||||
static inline void range_init_nofail(Range *range, uint64_t lob, uint64_t size)
|
||||
{
|
||||
range->lob = lob;
|
||||
range->upb = lob + size - 1;
|
||||
range_invariant(range);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the size of @range.
|
||||
*/
|
||||
static inline uint64_t range_size(const Range *range)
|
||||
{
|
||||
return range->upb - range->lob + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if @range1 overlaps with @range2. If one of the ranges is empty,
|
||||
* the result is always "false".
|
||||
*/
|
||||
static inline bool range_overlaps_range(const Range *range1,
|
||||
const Range *range2)
|
||||
{
|
||||
if (range_is_empty(range1) || range_is_empty(range2)) {
|
||||
return false;
|
||||
}
|
||||
return !(range2->upb < range1->lob || range1->upb < range2->lob);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if @range1 contains @range2. If one of the ranges is empty,
|
||||
* the result is always "false".
|
||||
*/
|
||||
static inline bool range_contains_range(const Range *range1,
|
||||
const Range *range2)
|
||||
{
|
||||
if (range_is_empty(range1) || range_is_empty(range2)) {
|
||||
return false;
|
||||
}
|
||||
return range1->lob <= range2->lob && range1->upb >= range2->upb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extend @range to the smallest interval that includes @extend_by, too.
|
||||
*/
|
||||
static inline void range_extend(Range *range, Range *extend_by)
|
||||
{
|
||||
if (!extend_by->begin && !extend_by->end) {
|
||||
if (range_is_empty(extend_by)) {
|
||||
return;
|
||||
}
|
||||
if (!range->begin && !range->end) {
|
||||
if (range_is_empty(range)) {
|
||||
*range = *extend_by;
|
||||
return;
|
||||
}
|
||||
if (range->begin > extend_by->begin) {
|
||||
range->begin = extend_by->begin;
|
||||
if (range->lob > extend_by->lob) {
|
||||
range->lob = extend_by->lob;
|
||||
}
|
||||
/* Compare last byte in case region ends at ~0x0LL */
|
||||
if (range->end - 1 < extend_by->end - 1) {
|
||||
range->end = extend_by->end;
|
||||
if (range->upb < extend_by->upb) {
|
||||
range->upb = extend_by->upb;
|
||||
}
|
||||
range_invariant(range);
|
||||
}
|
||||
|
||||
/* Get last byte of a range from offset + length.
|
||||
@ -61,75 +217,6 @@ static inline int ranges_overlap(uint64_t first1, uint64_t len1,
|
||||
return !(last2 < first1 || last1 < first2);
|
||||
}
|
||||
|
||||
/* 0,1 can merge with 1,2 but don't overlap */
|
||||
static inline bool ranges_can_merge(Range *range1, Range *range2)
|
||||
{
|
||||
return !(range1->end < range2->begin || range2->end < range1->begin);
|
||||
}
|
||||
|
||||
static inline int range_merge(Range *range1, Range *range2)
|
||||
{
|
||||
if (ranges_can_merge(range1, range2)) {
|
||||
if (range1->end < range2->end) {
|
||||
range1->end = range2->end;
|
||||
}
|
||||
if (range1->begin > range2->begin) {
|
||||
range1->begin = range2->begin;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline GList *g_list_insert_sorted_merged(GList *list,
|
||||
gpointer data,
|
||||
GCompareFunc func)
|
||||
{
|
||||
GList *l, *next = NULL;
|
||||
Range *r, *nextr;
|
||||
|
||||
if (!list) {
|
||||
list = g_list_insert_sorted(list, data, func);
|
||||
return list;
|
||||
}
|
||||
|
||||
nextr = data;
|
||||
l = list;
|
||||
while (l && l != next && nextr) {
|
||||
r = l->data;
|
||||
if (ranges_can_merge(r, nextr)) {
|
||||
range_merge(r, nextr);
|
||||
l = g_list_remove_link(l, next);
|
||||
next = g_list_next(l);
|
||||
if (next) {
|
||||
nextr = next->data;
|
||||
} else {
|
||||
nextr = NULL;
|
||||
}
|
||||
} else {
|
||||
l = g_list_next(l);
|
||||
}
|
||||
}
|
||||
|
||||
if (!l) {
|
||||
list = g_list_insert_sorted(list, data, func);
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
static inline gint range_compare(gconstpointer a, gconstpointer b)
|
||||
{
|
||||
Range *ra = (Range *)a, *rb = (Range *)b;
|
||||
if (ra->begin == rb->begin && ra->end == rb->end) {
|
||||
return 0;
|
||||
} else if (range_get_last(ra->begin, ra->end) <
|
||||
range_get_last(rb->begin, rb->end)) {
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
GList *range_list_insert(GList *list, Range *data);
|
||||
|
||||
#endif
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef __QEMU_THREAD_POSIX_H
|
||||
#define __QEMU_THREAD_POSIX_H 1
|
||||
#include "pthread.h"
|
||||
#ifndef QEMU_THREAD_POSIX_H
|
||||
#define QEMU_THREAD_POSIX_H
|
||||
|
||||
#include <pthread.h>
|
||||
#include <semaphore.h>
|
||||
|
||||
struct QemuThread {
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef __QEMU_THREAD_WIN32_H
|
||||
#define __QEMU_THREAD_WIN32_H 1
|
||||
#include "windows.h"
|
||||
#ifndef QEMU_THREAD_WIN32_H
|
||||
#define QEMU_THREAD_WIN32_H
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
typedef struct QemuThreadData QemuThreadData;
|
||||
struct QemuThread {
|
||||
@ -9,6 +10,6 @@ struct QemuThread {
|
||||
};
|
||||
|
||||
/* Only valid for joinable threads. */
|
||||
HANDLE qemu_thread_get_handle(QemuThread *thread);
|
||||
HANDLE qemu_thread_get_handle(struct QemuThread *thread);
|
||||
|
||||
#endif
|
||||
|
@ -1,11 +1,13 @@
|
||||
#ifndef __QEMU_THREAD_H
|
||||
#define __QEMU_THREAD_H 1
|
||||
#ifndef QEMU_THREAD_H
|
||||
#define QEMU_THREAD_H
|
||||
|
||||
#include "unicorn/platform.h"
|
||||
#include "qemu/processor.h"
|
||||
|
||||
struct uc_struct;
|
||||
typedef struct QemuThread QemuThread;
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(_WIN32) && !defined(__MINGW32__)
|
||||
#include "qemu/thread-win32.h"
|
||||
#else
|
||||
#include "qemu/thread-posix.h"
|
||||
@ -14,8 +16,6 @@ typedef struct QemuThread QemuThread;
|
||||
#define QEMU_THREAD_JOINABLE 0
|
||||
#define QEMU_THREAD_DETACHED 1
|
||||
|
||||
struct uc_struct;
|
||||
// return -1 on error, 0 on success
|
||||
int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name,
|
||||
void *(*start_routine)(void *),
|
||||
void *arg, int mode);
|
||||
|
@ -1,8 +1,10 @@
|
||||
#ifndef QEMU_TIMER_H
|
||||
#define QEMU_TIMER_H
|
||||
|
||||
#include "qemu/typedefs.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "qemu/host-utils.h"
|
||||
|
||||
#define NANOSECONDS_PER_SECOND 1000000000LL
|
||||
|
||||
/* timers */
|
||||
|
||||
@ -18,32 +20,57 @@
|
||||
* @QEMU_CLOCK_REALTIME: Real time clock
|
||||
*
|
||||
* The real time clock should be used only for stuff which does not
|
||||
* change the virtual machine state, as it is run even if the virtual
|
||||
* machine is stopped. The real time clock has a frequency of 1000
|
||||
* Hz.
|
||||
* change the virtual machine state, as it runs even if the virtual
|
||||
* machine is stopped.
|
||||
*
|
||||
* @QEMU_CLOCK_VIRTUAL: virtual clock
|
||||
*
|
||||
* The virtual clock is only run during the emulation. It is stopped
|
||||
* when the virtual machine is stopped. Virtual timers use a high
|
||||
* precision clock, usually cpu cycles (use ticks_per_sec).
|
||||
* The virtual clock only runs during the emulation. It stops
|
||||
* when the virtual machine is stopped.
|
||||
*
|
||||
* @QEMU_CLOCK_HOST: host clock
|
||||
*
|
||||
* The host clock should be use for device models that emulate accurate
|
||||
* The host clock should be used for device models that emulate accurate
|
||||
* real time sources. It will continue to run when the virtual machine
|
||||
* is suspended, and it will reflect system time changes the host may
|
||||
* undergo (e.g. due to NTP). The host clock has the same precision as
|
||||
* the virtual clock.
|
||||
* undergo (e.g. due to NTP).
|
||||
*
|
||||
* @QEMU_CLOCK_VIRTUAL_RT: realtime clock used for icount warp
|
||||
*
|
||||
* Outside icount mode, this clock is the same as @QEMU_CLOCK_VIRTUAL.
|
||||
* In icount mode, this clock counts nanoseconds while the virtual
|
||||
* machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL
|
||||
* while the CPUs are sleeping and thus not executing instructions.
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
QEMU_CLOCK_REALTIME = 0,
|
||||
QEMU_CLOCK_VIRTUAL = 1,
|
||||
QEMU_CLOCK_HOST = 2,
|
||||
QEMU_CLOCK_VIRTUAL_RT = 3,
|
||||
QEMU_CLOCK_MAX
|
||||
} QEMUClockType;
|
||||
|
||||
/**
|
||||
* QEMU Timer attributes:
|
||||
*
|
||||
* An individual timer may be given one or multiple attributes when initialized.
|
||||
* Each attribute corresponds to one bit. Attributes modify the processing
|
||||
* of timers when they fire.
|
||||
*
|
||||
* The following attributes are available:
|
||||
*
|
||||
* QEMU_TIMER_ATTR_EXTERNAL: drives external subsystem
|
||||
* QEMU_TIMER_ATTR_ALL: mask for all existing attributes
|
||||
*
|
||||
* Timers with this attribute do not recorded in rr mode, therefore it could be
|
||||
* used for the subsystems that operate outside the guest core. Applicable only
|
||||
* with virtual clock type.
|
||||
*/
|
||||
|
||||
#define QEMU_TIMER_ATTR_EXTERNAL ((int)BIT(0))
|
||||
#define QEMU_TIMER_ATTR_ALL 0xffffffff
|
||||
|
||||
typedef struct QEMUTimerList QEMUTimerList;
|
||||
|
||||
struct QEMUTimerListGroup {
|
||||
@ -51,7 +78,7 @@ struct QEMUTimerListGroup {
|
||||
};
|
||||
|
||||
typedef void QEMUTimerCB(void *opaque);
|
||||
typedef void QEMUTimerListNotifyCB(void *opaque);
|
||||
typedef void QEMUTimerListNotifyCB(void *opaque, QEMUClockType type);
|
||||
|
||||
struct QEMUTimer {
|
||||
int64_t expire_time; /* in nanoseconds */
|
||||
@ -59,13 +86,10 @@ struct QEMUTimer {
|
||||
QEMUTimerCB *cb;
|
||||
void *opaque;
|
||||
QEMUTimer *next;
|
||||
int attributes;
|
||||
int scale;
|
||||
};
|
||||
|
||||
/*
|
||||
* QEMUClockType
|
||||
*/
|
||||
|
||||
/*
|
||||
* qemu_clock_get_ns;
|
||||
* @type: the clock type
|
||||
@ -105,6 +129,599 @@ static inline int64_t qemu_clock_get_us(QEMUClockType type)
|
||||
return qemu_clock_get_ns(type) / SCALE_US;
|
||||
}
|
||||
|
||||
/**
|
||||
* qemu_clock_has_timers:
|
||||
* @type: the clock type
|
||||
*
|
||||
* Determines whether a clock's default timer list
|
||||
* has timers attached
|
||||
*
|
||||
* Note that this function should not be used when other threads also access
|
||||
* the timer list. The return value may be outdated by the time it is acted
|
||||
* upon.
|
||||
*
|
||||
* Returns: true if the clock's default timer list
|
||||
* has timers attached
|
||||
*/
|
||||
bool qemu_clock_has_timers(QEMUClockType type);
|
||||
|
||||
/**
|
||||
* qemu_clock_expired:
|
||||
* @type: the clock type
|
||||
*
|
||||
* Determines whether a clock's default timer list
|
||||
* has an expired timer.
|
||||
*
|
||||
* Returns: true if the clock's default timer list has
|
||||
* an expired timer
|
||||
*/
|
||||
bool qemu_clock_expired(QEMUClockType type);
|
||||
|
||||
/**
|
||||
* qemu_clock_use_for_deadline:
|
||||
* @type: the clock type
|
||||
*
|
||||
* Determine whether a clock should be used for deadline
|
||||
* calculations. Some clocks, for instance vm_clock with
|
||||
* use_icount set, do not count in nanoseconds. Such clocks
|
||||
* are not used for deadline calculations, and are presumed
|
||||
* to interrupt any poll using qemu_notify/aio_notify
|
||||
* etc.
|
||||
*
|
||||
* Returns: true if the clock runs in nanoseconds and
|
||||
* should be used for a deadline.
|
||||
*/
|
||||
bool qemu_clock_use_for_deadline(QEMUClockType type);
|
||||
|
||||
/**
|
||||
* qemu_clock_deadline_ns_all:
|
||||
* @type: the clock type
|
||||
* @attr_mask: mask for the timer attributes that are included
|
||||
* in deadline calculation
|
||||
*
|
||||
* Calculate the deadline across all timer lists associated
|
||||
* with a clock (as opposed to just the default one)
|
||||
* in nanoseconds, or -1 if no timer is set to expire.
|
||||
*
|
||||
* Returns: time until expiry in nanoseconds or -1
|
||||
*/
|
||||
int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask);
|
||||
|
||||
/**
|
||||
* qemu_clock_get_main_loop_timerlist:
|
||||
* @type: the clock type
|
||||
*
|
||||
* Return the default timer list associated with a clock.
|
||||
*
|
||||
* Returns: the default timer list
|
||||
*/
|
||||
QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type);
|
||||
|
||||
/**
|
||||
* qemu_clock_nofify:
|
||||
* @type: the clock type
|
||||
*
|
||||
* Call the notifier callback connected with the default timer
|
||||
* list linked to the clock, or qemu_notify() if none.
|
||||
*/
|
||||
void qemu_clock_notify(QEMUClockType type);
|
||||
|
||||
/**
|
||||
* qemu_clock_enable:
|
||||
* @type: the clock type
|
||||
* @enabled: true to enable, false to disable
|
||||
*
|
||||
* Enable or disable a clock
|
||||
* Disabling the clock will wait for related timerlists to stop
|
||||
* executing qemu_run_timers. Thus, this functions should not
|
||||
* be used from the callback of a timer that is based on @clock.
|
||||
* Doing so would cause a deadlock.
|
||||
*
|
||||
* Caller should hold BQL.
|
||||
*/
|
||||
void qemu_clock_enable(QEMUClockType type, bool enabled);
|
||||
|
||||
/**
|
||||
* qemu_start_warp_timer:
|
||||
*
|
||||
* Starts a timer for virtual clock update
|
||||
*/
|
||||
void qemu_start_warp_timer(void);
|
||||
|
||||
/**
|
||||
* qemu_clock_run_timers:
|
||||
* @type: clock on which to operate
|
||||
*
|
||||
* Run all the timers associated with the default timer list
|
||||
* of a clock.
|
||||
*
|
||||
* Returns: true if any timer ran.
|
||||
*/
|
||||
bool qemu_clock_run_timers(QEMUClockType type);
|
||||
|
||||
/**
|
||||
* qemu_clock_run_all_timers:
|
||||
*
|
||||
* Run all the timers associated with the default timer list
|
||||
* of every clock.
|
||||
*
|
||||
* Returns: true if any timer ran.
|
||||
*/
|
||||
bool qemu_clock_run_all_timers(void);
|
||||
|
||||
|
||||
/*
|
||||
* QEMUTimerList
|
||||
*/
|
||||
|
||||
/**
|
||||
* timerlist_new:
|
||||
* @type: the clock type to associate with the timerlist
|
||||
* @cb: the callback to call on notification
|
||||
* @opaque: the opaque pointer to pass to the callback
|
||||
*
|
||||
* Create a new timerlist associated with the clock of
|
||||
* type @type.
|
||||
*
|
||||
* Returns: a pointer to the QEMUTimerList created
|
||||
*/
|
||||
QEMUTimerList *timerlist_new(QEMUClockType type,
|
||||
QEMUTimerListNotifyCB *cb, void *opaque);
|
||||
|
||||
/**
|
||||
* timerlist_free:
|
||||
* @timer_list: the timer list to free
|
||||
*
|
||||
* Frees a timer_list. It must have no active timers.
|
||||
*/
|
||||
void timerlist_free(QEMUTimerList *timer_list);
|
||||
|
||||
/**
|
||||
* timerlist_has_timers:
|
||||
* @timer_list: the timer list to operate on
|
||||
*
|
||||
* Determine whether a timer list has active timers
|
||||
*
|
||||
* Note that this function should not be used when other threads also access
|
||||
* the timer list. The return value may be outdated by the time it is acted
|
||||
* upon.
|
||||
*
|
||||
* Returns: true if the timer list has timers.
|
||||
*/
|
||||
bool timerlist_has_timers(QEMUTimerList *timer_list);
|
||||
|
||||
/**
|
||||
* timerlist_expired:
|
||||
* @timer_list: the timer list to operate on
|
||||
*
|
||||
* Determine whether a timer list has any timers which
|
||||
* are expired.
|
||||
*
|
||||
* Returns: true if the timer list has timers which
|
||||
* have expired.
|
||||
*/
|
||||
bool timerlist_expired(QEMUTimerList *timer_list);
|
||||
|
||||
/**
|
||||
* timerlist_deadline_ns:
|
||||
* @timer_list: the timer list to operate on
|
||||
*
|
||||
* Determine the deadline for a timer_list, i.e.
|
||||
* the number of nanoseconds until the first timer
|
||||
* expires. Return -1 if there are no timers.
|
||||
*
|
||||
* Returns: the number of nanoseconds until the earliest
|
||||
* timer expires -1 if none
|
||||
*/
|
||||
int64_t timerlist_deadline_ns(QEMUTimerList *timer_list);
|
||||
|
||||
/**
|
||||
* timerlist_get_clock:
|
||||
* @timer_list: the timer list to operate on
|
||||
*
|
||||
* Determine the clock type associated with a timer list.
|
||||
*
|
||||
* Returns: the clock type associated with the
|
||||
* timer list.
|
||||
*/
|
||||
QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list);
|
||||
|
||||
/**
|
||||
* timerlist_run_timers:
|
||||
* @timer_list: the timer list to use
|
||||
*
|
||||
* Call all expired timers associated with the timer list.
|
||||
*
|
||||
* Returns: true if any timer expired
|
||||
*/
|
||||
bool timerlist_run_timers(QEMUTimerList *timer_list);
|
||||
|
||||
/**
|
||||
* timerlist_notify:
|
||||
* @timer_list: the timer list to use
|
||||
*
|
||||
* call the notifier callback associated with the timer list.
|
||||
*/
|
||||
void timerlist_notify(QEMUTimerList *timer_list);
|
||||
|
||||
/*
|
||||
* QEMUTimerListGroup
|
||||
*/
|
||||
|
||||
/**
|
||||
* timerlistgroup_init:
|
||||
* @tlg: the timer list group
|
||||
* @cb: the callback to call when a notify is required
|
||||
* @opaque: the opaque pointer to be passed to the callback.
|
||||
*
|
||||
* Initialise a timer list group. This must already be
|
||||
* allocated in memory and zeroed. The notifier callback is
|
||||
* called whenever a clock in the timer list group is
|
||||
* reenabled or whenever a timer associated with any timer
|
||||
* list is modified. If @cb is specified as null, qemu_notify()
|
||||
* is used instead.
|
||||
*/
|
||||
void timerlistgroup_init(QEMUTimerListGroup *tlg,
|
||||
QEMUTimerListNotifyCB *cb, void *opaque);
|
||||
|
||||
/**
|
||||
* timerlistgroup_deinit:
|
||||
* @tlg: the timer list group
|
||||
*
|
||||
* Deinitialise a timer list group. This must already be
|
||||
* initialised. Note the memory is not freed.
|
||||
*/
|
||||
void timerlistgroup_deinit(QEMUTimerListGroup *tlg);
|
||||
|
||||
/**
|
||||
* timerlistgroup_run_timers:
|
||||
* @tlg: the timer list group
|
||||
*
|
||||
* Run the timers associated with a timer list group.
|
||||
* This will run timers on multiple clocks.
|
||||
*
|
||||
* Returns: true if any timer callback ran
|
||||
*/
|
||||
bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg);
|
||||
|
||||
/**
|
||||
* timerlistgroup_deadline_ns:
|
||||
* @tlg: the timer list group
|
||||
*
|
||||
* Determine the deadline of the soonest timer to
|
||||
* expire associated with any timer list linked to
|
||||
* the timer list group. Only clocks suitable for
|
||||
* deadline calculation are included.
|
||||
*
|
||||
* Returns: the deadline in nanoseconds or -1 if no
|
||||
* timers are to expire.
|
||||
*/
|
||||
int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg);
|
||||
|
||||
/*
|
||||
* QEMUTimer
|
||||
*/
|
||||
|
||||
/**
|
||||
* timer_init_full:
|
||||
* @ts: the timer to be initialised
|
||||
* @timer_list_group: (optional) the timer list group to attach the timer to
|
||||
* @type: the clock type to use
|
||||
* @scale: the scale value for the timer
|
||||
* @attributes: 0, or one or more OR'ed QEMU_TIMER_ATTR_<id> values
|
||||
* @cb: the callback to be called when the timer expires
|
||||
* @opaque: the opaque pointer to be passed to the callback
|
||||
*
|
||||
* Initialise a timer with the given scale and attributes,
|
||||
* and associate it with timer list for given clock @type in @timer_list_group
|
||||
* (or default timer list group, if NULL).
|
||||
* The caller is responsible for allocating the memory.
|
||||
*
|
||||
* You need not call an explicit deinit call. Simply make
|
||||
* sure it is not on a list with timer_del.
|
||||
*/
|
||||
void timer_init_full(QEMUTimer *ts,
|
||||
QEMUTimerListGroup *timer_list_group, QEMUClockType type,
|
||||
int scale, int attributes,
|
||||
QEMUTimerCB *cb, void *opaque);
|
||||
|
||||
/**
|
||||
* timer_init:
|
||||
* @ts: the timer to be initialised
|
||||
* @type: the clock to associate with the timer
|
||||
* @scale: the scale value for the timer
|
||||
* @cb: the callback to call when the timer expires
|
||||
* @opaque: the opaque pointer to pass to the callback
|
||||
*
|
||||
* Initialize a timer with the given scale on the default timer list
|
||||
* associated with the clock.
|
||||
* See timer_init_full for details.
|
||||
*/
|
||||
static inline void timer_init(QEMUTimer *ts, QEMUClockType type, int scale,
|
||||
QEMUTimerCB *cb, void *opaque)
|
||||
{
|
||||
// timer_init_full(ts, NULL, type, scale, 0, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_init_ns:
|
||||
* @ts: the timer to be initialised
|
||||
* @type: the clock to associate with the timer
|
||||
* @cb: the callback to call when the timer expires
|
||||
* @opaque: the opaque pointer to pass to the callback
|
||||
*
|
||||
* Initialize a timer with nanosecond scale on the default timer list
|
||||
* associated with the clock.
|
||||
* See timer_init_full for details.
|
||||
*/
|
||||
static inline void timer_init_ns(QEMUTimer *ts, QEMUClockType type,
|
||||
QEMUTimerCB *cb, void *opaque)
|
||||
{
|
||||
timer_init(ts, type, SCALE_NS, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_init_us:
|
||||
* @ts: the timer to be initialised
|
||||
* @type: the clock to associate with the timer
|
||||
* @cb: the callback to call when the timer expires
|
||||
* @opaque: the opaque pointer to pass to the callback
|
||||
*
|
||||
* Initialize a timer with microsecond scale on the default timer list
|
||||
* associated with the clock.
|
||||
* See timer_init_full for details.
|
||||
*/
|
||||
static inline void timer_init_us(QEMUTimer *ts, QEMUClockType type,
|
||||
QEMUTimerCB *cb, void *opaque)
|
||||
{
|
||||
timer_init(ts, type, SCALE_US, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_init_ms:
|
||||
* @ts: the timer to be initialised
|
||||
* @type: the clock to associate with the timer
|
||||
* @cb: the callback to call when the timer expires
|
||||
* @opaque: the opaque pointer to pass to the callback
|
||||
*
|
||||
* Initialize a timer with millisecond scale on the default timer list
|
||||
* associated with the clock.
|
||||
* See timer_init_full for details.
|
||||
*/
|
||||
static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type,
|
||||
QEMUTimerCB *cb, void *opaque)
|
||||
{
|
||||
timer_init(ts, type, SCALE_MS, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_new_full:
|
||||
* @timer_list_group: (optional) the timer list group to attach the timer to
|
||||
* @type: the clock type to use
|
||||
* @scale: the scale value for the timer
|
||||
* @attributes: 0, or one or more OR'ed QEMU_TIMER_ATTR_<id> values
|
||||
* @cb: the callback to be called when the timer expires
|
||||
* @opaque: the opaque pointer to be passed to the callback
|
||||
*
|
||||
* Create a new timer with the given scale and attributes,
|
||||
* and associate it with timer list for given clock @type in @timer_list_group
|
||||
* (or default timer list group, if NULL).
|
||||
* The memory is allocated by the function.
|
||||
*
|
||||
* This is not the preferred interface unless you know you
|
||||
* are going to call timer_free. Use timer_init or timer_init_full instead.
|
||||
*
|
||||
* The default timer list has one special feature: in icount mode,
|
||||
* %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is
|
||||
* not true of other timer lists, which are typically associated
|
||||
* with an AioContext---each of them runs its timer callbacks in its own
|
||||
* AioContext thread.
|
||||
*
|
||||
* Returns: a pointer to the timer
|
||||
*/
|
||||
static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group,
|
||||
QEMUClockType type,
|
||||
int scale, int attributes,
|
||||
QEMUTimerCB *cb, void *opaque)
|
||||
{
|
||||
QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer));
|
||||
// timer_init_full(ts, timer_list_group, type, scale, attributes, cb, opaque);
|
||||
return ts;
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_new:
|
||||
* @type: the clock type to use
|
||||
* @scale: the scale value for the timer
|
||||
* @cb: the callback to be called when the timer expires
|
||||
* @opaque: the opaque pointer to be passed to the callback
|
||||
*
|
||||
* Create a new timer with the given scale,
|
||||
* and associate it with the default timer list for the clock type @type.
|
||||
* See timer_new_full for details.
|
||||
*
|
||||
* Returns: a pointer to the timer
|
||||
*/
|
||||
static inline QEMUTimer *timer_new(QEMUClockType type, int scale,
|
||||
QEMUTimerCB *cb, void *opaque)
|
||||
{
|
||||
return timer_new_full(NULL, type, scale, 0, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_new_ns:
|
||||
* @type: the clock type to associate with the timer
|
||||
* @cb: the callback to call when the timer expires
|
||||
* @opaque: the opaque pointer to pass to the callback
|
||||
*
|
||||
* Create a new timer with nanosecond scale on the default timer list
|
||||
* associated with the clock.
|
||||
* See timer_new_full for details.
|
||||
*
|
||||
* Returns: a pointer to the newly created timer
|
||||
*/
|
||||
static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb,
|
||||
void *opaque)
|
||||
{
|
||||
return timer_new(type, SCALE_NS, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_new_us:
|
||||
* @type: the clock type to associate with the timer
|
||||
* @cb: the callback to call when the timer expires
|
||||
* @opaque: the opaque pointer to pass to the callback
|
||||
*
|
||||
* Create a new timer with microsecond scale on the default timer list
|
||||
* associated with the clock.
|
||||
* See timer_new_full for details.
|
||||
*
|
||||
* Returns: a pointer to the newly created timer
|
||||
*/
|
||||
static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb,
|
||||
void *opaque)
|
||||
{
|
||||
return timer_new(type, SCALE_US, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_new_ms:
|
||||
* @type: the clock type to associate with the timer
|
||||
* @cb: the callback to call when the timer expires
|
||||
* @opaque: the opaque pointer to pass to the callback
|
||||
*
|
||||
* Create a new timer with millisecond scale on the default timer list
|
||||
* associated with the clock.
|
||||
* See timer_new_full for details.
|
||||
*
|
||||
* Returns: a pointer to the newly created timer
|
||||
*/
|
||||
static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb,
|
||||
void *opaque)
|
||||
{
|
||||
return timer_new(type, SCALE_MS, cb, opaque);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_deinit:
|
||||
* @ts: the timer to be de-initialised
|
||||
*
|
||||
* Deassociate the timer from any timerlist. You should
|
||||
* call timer_del before. After this call, any further
|
||||
* timer_del call cannot cause dangling pointer accesses
|
||||
* even if the previously used timerlist is freed.
|
||||
*/
|
||||
void timer_deinit(QEMUTimer *ts);
|
||||
|
||||
/**
|
||||
* timer_free:
|
||||
* @ts: the timer
|
||||
*
|
||||
* Free a timer (it must not be on the active list)
|
||||
*/
|
||||
static inline void timer_free(QEMUTimer *ts)
|
||||
{
|
||||
g_free(ts);
|
||||
}
|
||||
|
||||
/**
|
||||
* timer_del:
|
||||
* @ts: the timer
|
||||
*
|
||||
* Delete a timer from the active list.
|
||||
*
|
||||
* This function is thread-safe but the timer and its timer list must not be
|
||||
* freed while this function is running.
|
||||
*/
|
||||
void timer_del(QEMUTimer *ts);
|
||||
|
||||
/**
|
||||
* timer_mod_ns:
|
||||
* @ts: the timer
|
||||
* @expire_time: the expiry time in nanoseconds
|
||||
*
|
||||
* Modify a timer to expire at @expire_time
|
||||
*
|
||||
* This function is thread-safe but the timer and its timer list must not be
|
||||
* freed while this function is running.
|
||||
*/
|
||||
void timer_mod_ns(QEMUTimer *ts, int64_t expire_time);
|
||||
|
||||
/**
|
||||
* timer_mod_anticipate_ns:
|
||||
* @ts: the timer
|
||||
* @expire_time: the expiry time in nanoseconds
|
||||
*
|
||||
* Modify a timer to expire at @expire_time or the current time,
|
||||
* whichever comes earlier.
|
||||
*
|
||||
* This function is thread-safe but the timer and its timer list must not be
|
||||
* freed while this function is running.
|
||||
*/
|
||||
void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time);
|
||||
|
||||
/**
|
||||
* timer_mod:
|
||||
* @ts: the timer
|
||||
* @expire_time: the expire time in the units associated with the timer
|
||||
*
|
||||
* Modify a timer to expiry at @expire_time, taking into
|
||||
* account the scale associated with the timer.
|
||||
*
|
||||
* This function is thread-safe but the timer and its timer list must not be
|
||||
* freed while this function is running.
|
||||
*/
|
||||
void timer_mod(QEMUTimer *ts, int64_t expire_timer);
|
||||
|
||||
/**
|
||||
* timer_mod_anticipate:
|
||||
* @ts: the timer
|
||||
* @expire_time: the expiry time in nanoseconds
|
||||
*
|
||||
* Modify a timer to expire at @expire_time or the current time, whichever
|
||||
* comes earlier, taking into account the scale associated with the timer.
|
||||
*
|
||||
* This function is thread-safe but the timer and its timer list must not be
|
||||
* freed while this function is running.
|
||||
*/
|
||||
void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time);
|
||||
|
||||
/**
|
||||
* timer_pending:
|
||||
* @ts: the timer
|
||||
*
|
||||
* Determines whether a timer is pending (i.e. is on the
|
||||
* active list of timers, whether or not it has not yet expired).
|
||||
*
|
||||
* Returns: true if the timer is pending
|
||||
*/
|
||||
bool timer_pending(QEMUTimer *ts);
|
||||
|
||||
/**
|
||||
* timer_expired:
|
||||
* @ts: the timer
|
||||
* @current_time: the current time
|
||||
*
|
||||
* Determines whether a timer has expired.
|
||||
*
|
||||
* Returns: true if the timer has expired
|
||||
*/
|
||||
bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
|
||||
|
||||
/**
|
||||
* timer_expire_time_ns:
|
||||
* @ts: the timer
|
||||
*
|
||||
* Determine the expiry time of a timer
|
||||
*
|
||||
* Returns: the expiry time in nanoseconds
|
||||
*/
|
||||
uint64_t timer_expire_time_ns(QEMUTimer *ts);
|
||||
|
||||
/*
|
||||
* General utility functions
|
||||
*/
|
||||
|
||||
/**
|
||||
* qemu_timeout_ns_to_ms:
|
||||
* @ns: nanosecond timeout value
|
||||
@ -140,7 +757,7 @@ static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
|
||||
*
|
||||
* Initialise the clock & timer infrastructure
|
||||
*/
|
||||
void init_clocks(void);
|
||||
void init_clocks(QEMUTimerListNotifyCB *notify_cb);
|
||||
|
||||
int64_t cpu_get_ticks(void);
|
||||
/* Caller must hold BQL */
|
||||
@ -148,16 +765,20 @@ void cpu_enable_ticks(void);
|
||||
/* Caller must hold BQL */
|
||||
void cpu_disable_ticks(void);
|
||||
|
||||
static inline int64_t get_ticks_per_sec(void)
|
||||
static inline int64_t get_max_clock_jump(void)
|
||||
{
|
||||
return 1000000000LL;
|
||||
/* This should be small enough to prevent excessive interrupts from being
|
||||
* generated by the RTC on clock jumps, but large enough to avoid frequent
|
||||
* unnecessary resets in idle VMs.
|
||||
*/
|
||||
return 60 * NANOSECONDS_PER_SECOND;
|
||||
}
|
||||
|
||||
/*
|
||||
* Low level clock functions
|
||||
*/
|
||||
|
||||
/* real time host monotonic timer */
|
||||
/* get host real time in nanosecond */
|
||||
static inline int64_t get_clock_realtime(void)
|
||||
{
|
||||
struct timeval tv;
|
||||
@ -176,29 +797,40 @@ static inline int64_t get_clock(void)
|
||||
{
|
||||
LARGE_INTEGER ti;
|
||||
QueryPerformanceCounter(&ti);
|
||||
return muldiv64(ti.QuadPart, (uint32_t)get_ticks_per_sec(), (uint32_t)clock_freq);
|
||||
return muldiv64(ti.QuadPart, NANOSECONDS_PER_SECOND, clock_freq);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
extern int use_rt_clock;
|
||||
|
||||
static inline int64_t get_clock(void)
|
||||
{
|
||||
return get_clock_realtime();
|
||||
if (use_rt_clock) {
|
||||
struct timespec ts;
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
return ts.tv_sec * 1000000000LL + ts.tv_nsec;
|
||||
} else {
|
||||
/* XXX: using gettimeofday leads to problems if the date
|
||||
changes, so it should be avoided. */
|
||||
return get_clock_realtime();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* icount */
|
||||
int64_t cpu_get_icount_raw(void);
|
||||
int64_t cpu_get_icount(void);
|
||||
int64_t cpu_get_clock(void);
|
||||
int64_t cpu_get_clock_offset(void);
|
||||
int64_t cpu_icount_to_ns(int64_t icount);
|
||||
void cpu_update_icount(CPUState *cpu);
|
||||
|
||||
/*******************************************/
|
||||
/* host CPU ticks (if available) */
|
||||
|
||||
#if defined(_ARCH_PPC)
|
||||
|
||||
static inline int64_t cpu_get_real_ticks(void)
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
int64_t retval;
|
||||
#ifdef _ARCH_PPC64
|
||||
@ -224,7 +856,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
||||
|
||||
#elif defined(__i386__)
|
||||
|
||||
static inline int64_t cpu_get_real_ticks(void)
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
return __rdtsc();
|
||||
@ -237,7 +869,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
||||
|
||||
#elif defined(__x86_64__)
|
||||
|
||||
static inline int64_t cpu_get_real_ticks(void)
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
return __rdtsc();
|
||||
@ -254,25 +886,16 @@ static inline int64_t cpu_get_real_ticks(void)
|
||||
|
||||
#elif defined(__hppa__)
|
||||
|
||||
static inline int64_t cpu_get_real_ticks(void)
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
int val;
|
||||
asm volatile ("mfctl %%cr16, %0" : "=r"(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#elif defined(__ia64)
|
||||
|
||||
static inline int64_t cpu_get_real_ticks(void)
|
||||
{
|
||||
int64_t val;
|
||||
asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
|
||||
return val;
|
||||
}
|
||||
|
||||
#elif defined(__s390__)
|
||||
|
||||
static inline int64_t cpu_get_real_ticks(void)
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
int64_t val;
|
||||
asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
|
||||
@ -281,7 +904,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
||||
|
||||
#elif defined(__sparc__)
|
||||
|
||||
static inline int64_t cpu_get_real_ticks (void)
|
||||
static inline int64_t cpu_get_host_ticks (void)
|
||||
{
|
||||
#if defined(_LP64)
|
||||
uint64_t rval;
|
||||
@ -319,7 +942,7 @@ static inline int64_t cpu_get_real_ticks (void)
|
||||
: "=r" (value)); \
|
||||
}
|
||||
|
||||
static inline int64_t cpu_get_real_ticks(void)
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
/* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
|
||||
uint32_t count;
|
||||
@ -335,7 +958,7 @@ static inline int64_t cpu_get_real_ticks(void)
|
||||
|
||||
#elif defined(__alpha__)
|
||||
|
||||
static inline int64_t cpu_get_real_ticks(void)
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
uint64_t cc;
|
||||
uint32_t cur, ofs;
|
||||
@ -350,22 +973,12 @@ static inline int64_t cpu_get_real_ticks(void)
|
||||
/* The host CPU doesn't have an easily accessible cycle counter.
|
||||
Just return a monotonically increasing value. This will be
|
||||
totally wrong, but hopefully better than nothing. */
|
||||
static inline int64_t cpu_get_real_ticks (void)
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
static int64_t ticks = 0;
|
||||
return ticks++;
|
||||
return get_clock();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
static inline int64_t profile_getclock(void)
|
||||
{
|
||||
return cpu_get_real_ticks();
|
||||
}
|
||||
|
||||
extern int64_t qemu_time, qemu_time_start;
|
||||
extern int64_t tlb_flush_time;
|
||||
extern int64_t dev_time;
|
||||
#endif
|
||||
void init_get_clock(void);
|
||||
|
||||
#endif
|
||||
|
@ -1,80 +1,47 @@
|
||||
#ifndef QEMU_TYPEDEFS_H
|
||||
#define QEMU_TYPEDEFS_H
|
||||
|
||||
/* A load of opaque types so that device init declarations don't have to
|
||||
pull in all the real definitions. */
|
||||
/*
|
||||
* This header is for selectively avoiding #include just to get a
|
||||
* typedef name.
|
||||
*
|
||||
* Declaring a typedef name in its "obvious" place can result in
|
||||
* inclusion cycles, in particular for complete struct and union
|
||||
* types that need more types for their members. It can also result
|
||||
* in headers pulling in many more headers, slowing down builds.
|
||||
*
|
||||
* You can break such cycles and unwanted dependencies by declaring
|
||||
* the typedef name here.
|
||||
*
|
||||
* For struct types used in only a few headers, judicious use of the
|
||||
* struct tag instead of the typedef name is commonly preferable.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Incomplete struct types
|
||||
* Please keep this list in case-insensitive alphabetical order.
|
||||
*/
|
||||
typedef struct AddressSpace AddressSpace;
|
||||
typedef struct CPUAddressSpace CPUAddressSpace;
|
||||
typedef struct CPUState CPUState;
|
||||
typedef struct FlatView FlatView;
|
||||
typedef struct IOMMUMemoryRegion IOMMUMemoryRegion;
|
||||
typedef struct MemoryListener MemoryListener;
|
||||
typedef struct MemoryMappingList MemoryMappingList;
|
||||
typedef struct MemoryRegion MemoryRegion;
|
||||
typedef struct MemoryRegionCache MemoryRegionCache;
|
||||
typedef struct MemoryRegionSection MemoryRegionSection;
|
||||
typedef struct QEMUTimer QEMUTimer;
|
||||
typedef struct QEMUTimerListGroup QEMUTimerListGroup;
|
||||
typedef struct QEMUFile QEMUFile;
|
||||
typedef struct QEMUBH QEMUBH;
|
||||
|
||||
typedef struct AioContext AioContext;
|
||||
|
||||
typedef struct Visitor Visitor;
|
||||
|
||||
typedef struct MigrationParams MigrationParams;
|
||||
|
||||
typedef struct Property Property;
|
||||
typedef struct PropertyInfo PropertyInfo;
|
||||
typedef struct CompatProperty CompatProperty;
|
||||
typedef struct DeviceState DeviceState;
|
||||
typedef struct BusState BusState;
|
||||
typedef struct BusClass BusClass;
|
||||
|
||||
typedef struct AddressSpace AddressSpace;
|
||||
typedef struct MemoryRegion MemoryRegion;
|
||||
typedef struct MemoryRegionSection MemoryRegionSection;
|
||||
typedef struct MemoryListener MemoryListener;
|
||||
|
||||
typedef struct MemoryMappingList MemoryMappingList;
|
||||
|
||||
typedef struct QEMUMachine QEMUMachine;
|
||||
typedef struct MachineClass MachineClass;
|
||||
typedef struct MachineState MachineState;
|
||||
typedef struct NICInfo NICInfo;
|
||||
typedef struct HCIInfo HCIInfo;
|
||||
typedef struct AudioState AudioState;
|
||||
typedef struct BlockBackend BlockBackend;
|
||||
typedef struct BlockDriverState BlockDriverState;
|
||||
typedef struct DriveInfo DriveInfo;
|
||||
typedef struct DisplayState DisplayState;
|
||||
typedef struct DisplayChangeListener DisplayChangeListener;
|
||||
typedef struct DisplaySurface DisplaySurface;
|
||||
typedef struct PixelFormat PixelFormat;
|
||||
typedef struct QemuConsole QemuConsole;
|
||||
typedef struct CharDriverState CharDriverState;
|
||||
typedef struct MACAddr MACAddr;
|
||||
typedef struct NetClientState NetClientState;
|
||||
typedef struct I2CBus I2CBus;
|
||||
typedef struct ISABus ISABus;
|
||||
typedef struct ISADevice ISADevice;
|
||||
typedef struct SMBusDevice SMBusDevice;
|
||||
typedef struct PCIHostState PCIHostState;
|
||||
typedef struct PCIExpressHost PCIExpressHost;
|
||||
typedef struct PCIBus PCIBus;
|
||||
typedef struct PCIDevice PCIDevice;
|
||||
typedef struct PCIExpressDevice PCIExpressDevice;
|
||||
typedef struct PCIBridge PCIBridge;
|
||||
typedef struct PCIEAERMsg PCIEAERMsg;
|
||||
typedef struct PCIEAERLog PCIEAERLog;
|
||||
typedef struct PCIEAERErr PCIEAERErr;
|
||||
typedef struct PCIEPort PCIEPort;
|
||||
typedef struct PCIESlot PCIESlot;
|
||||
typedef struct MSIMessage MSIMessage;
|
||||
typedef struct SerialState SerialState;
|
||||
typedef struct PCMCIACardState PCMCIACardState;
|
||||
typedef struct MouseTransformInfo MouseTransformInfo;
|
||||
typedef struct uWireSlave uWireSlave;
|
||||
typedef struct I2SCodec I2SCodec;
|
||||
typedef struct SSIBus SSIBus;
|
||||
typedef struct EventNotifier EventNotifier;
|
||||
typedef struct VirtIODevice VirtIODevice;
|
||||
typedef struct QEMUSGList QEMUSGList;
|
||||
typedef struct QEMUSizedBuffer QEMUSizedBuffer;
|
||||
typedef struct SHPCDevice SHPCDevice;
|
||||
typedef struct FWCfgState FWCfgState;
|
||||
typedef struct PcGuestInfo PcGuestInfo;
|
||||
typedef struct RAMBlock RAMBlock;
|
||||
typedef struct Range Range;
|
||||
typedef struct AdapterInfo AdapterInfo;
|
||||
|
||||
/*
|
||||
* Pointer types
|
||||
* Such typedefs should be limited to cases where the typedef's users
|
||||
* are oblivious of its "pointer-ness".
|
||||
* Please keep this list in case-insensitive alphabetical order.
|
||||
*/
|
||||
typedef struct IRQState *qemu_irq;
|
||||
|
||||
#endif /* QEMU_TYPEDEFS_H */
|
||||
|
20
qemu/include/qemu/units.h
Normal file
20
qemu/include/qemu/units.h
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
* IEC binary prefixes definitions
|
||||
*
|
||||
* Copyright (C) 2015 Nikunj A Dadhania, IBM Corporation
|
||||
* Copyright (C) 2018 Philippe Mathieu-Daudé <f4bug@amsat.org>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef QEMU_UNITS_H
|
||||
#define QEMU_UNITS_H
|
||||
|
||||
#define KiB (INT64_C(1) << 10)
|
||||
#define MiB (INT64_C(1) << 20)
|
||||
#define GiB (INT64_C(1) << 30)
|
||||
#define TiB (INT64_C(1) << 40)
|
||||
#define PiB (INT64_C(1) << 50)
|
||||
#define EiB (INT64_C(1) << 60)
|
||||
|
||||
#endif
|
129
qemu/include/qemu/xxhash.h
Normal file
129
qemu/include/qemu/xxhash.h
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
* xxHash - Fast Hash algorithm
|
||||
* Copyright (C) 2012-2016, Yann Collet
|
||||
*
|
||||
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* + Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* + Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You can contact the author at :
|
||||
* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
||||
*/
|
||||
|
||||
#ifndef QEMU_XXHASH_H
|
||||
#define QEMU_XXHASH_H
|
||||
|
||||
#include "qemu/bitops.h"
|
||||
|
||||
#define PRIME32_1 2654435761U
|
||||
#define PRIME32_2 2246822519U
|
||||
#define PRIME32_3 3266489917U
|
||||
#define PRIME32_4 668265263U
|
||||
#define PRIME32_5 374761393U
|
||||
|
||||
#define QEMU_XXHASH_SEED 1
|
||||
|
||||
/*
|
||||
* xxhash32, customized for input variables that are not guaranteed to be
|
||||
* contiguous in memory.
|
||||
*/
|
||||
static inline uint32_t
|
||||
qemu_xxhash7(uint64_t ab, uint64_t cd, uint32_t e, uint32_t f, uint32_t g)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
uint64_t v1x = QEMU_XXHASH_SEED;
|
||||
v1x += PRIME32_1;
|
||||
v1x += PRIME32_2;
|
||||
uint32_t v1 = v1x;
|
||||
#else
|
||||
uint32_t v1 = QEMU_XXHASH_SEED + PRIME32_1 + PRIME32_2;
|
||||
#endif
|
||||
uint32_t v2 = QEMU_XXHASH_SEED + PRIME32_2;
|
||||
uint32_t v3 = QEMU_XXHASH_SEED + 0;
|
||||
uint32_t v4 = QEMU_XXHASH_SEED - PRIME32_1;
|
||||
uint32_t a = ab;
|
||||
uint32_t b = ab >> 32;
|
||||
uint32_t c = cd;
|
||||
uint32_t d = cd >> 32;
|
||||
uint32_t h32;
|
||||
|
||||
v1 += a * PRIME32_2;
|
||||
v1 = rol32(v1, 13);
|
||||
v1 *= PRIME32_1;
|
||||
|
||||
v2 += b * PRIME32_2;
|
||||
v2 = rol32(v2, 13);
|
||||
v2 *= PRIME32_1;
|
||||
|
||||
v3 += c * PRIME32_2;
|
||||
v3 = rol32(v3, 13);
|
||||
v3 *= PRIME32_1;
|
||||
|
||||
v4 += d * PRIME32_2;
|
||||
v4 = rol32(v4, 13);
|
||||
v4 *= PRIME32_1;
|
||||
|
||||
h32 = rol32(v1, 1) + rol32(v2, 7) + rol32(v3, 12) + rol32(v4, 18);
|
||||
h32 += 28;
|
||||
|
||||
h32 += e * PRIME32_3;
|
||||
h32 = rol32(h32, 17) * PRIME32_4;
|
||||
|
||||
h32 += f * PRIME32_3;
|
||||
h32 = rol32(h32, 17) * PRIME32_4;
|
||||
|
||||
h32 += g * PRIME32_3;
|
||||
h32 = rol32(h32, 17) * PRIME32_4;
|
||||
|
||||
h32 ^= h32 >> 15;
|
||||
h32 *= PRIME32_2;
|
||||
h32 ^= h32 >> 13;
|
||||
h32 *= PRIME32_3;
|
||||
h32 ^= h32 >> 16;
|
||||
|
||||
return h32;
|
||||
}
|
||||
|
||||
static inline uint32_t qemu_xxhash2(uint64_t ab)
|
||||
{
|
||||
return qemu_xxhash7(ab, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
static inline uint32_t qemu_xxhash4(uint64_t ab, uint64_t cd)
|
||||
{
|
||||
return qemu_xxhash7(ab, cd, 0, 0, 0);
|
||||
}
|
||||
|
||||
static inline uint32_t qemu_xxhash5(uint64_t ab, uint64_t cd, uint32_t e)
|
||||
{
|
||||
return qemu_xxhash7(ab, cd, e, 0, 0);
|
||||
}
|
||||
|
||||
static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e,
|
||||
uint32_t f)
|
||||
{
|
||||
return qemu_xxhash7(ab, cd, e, f, 0);
|
||||
}
|
||||
|
||||
#endif /* QEMU_XXHASH_H */
|
Reference in New Issue
Block a user