import Unicorn2
This commit is contained in:
@ -1,10 +0,0 @@
|
||||
util-obj-y = cutils.o qemu-timer-common.o
|
||||
util-obj-$(CONFIG_WIN32) += oslib-win32.o qemu-thread-win32.o
|
||||
util-obj-$(CONFIG_POSIX) += oslib-posix.o qemu-thread-posix.o
|
||||
util-obj-y += module.o
|
||||
util-obj-y += bitmap.o bitops.o
|
||||
util-obj-y += error.o
|
||||
util-obj-y += aes.o
|
||||
util-obj-y += crc32c.o
|
||||
util-obj-y += host-utils.o
|
||||
util-obj-y += getauxval.o
|
1059
qemu/util/aes.c
1059
qemu/util/aes.c
File diff suppressed because it is too large
Load Diff
@ -9,10 +9,153 @@
|
||||
* Version 2.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/bitops.h"
|
||||
#include "qemu/bitmap.h"
|
||||
#include "qemu/atomic.h"
|
||||
|
||||
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
|
||||
/*
|
||||
* bitmaps provide an array of bits, implemented using an
|
||||
* array of unsigned longs. The number of valid bits in a
|
||||
* given bitmap does _not_ need to be an exact multiple of
|
||||
* BITS_PER_LONG.
|
||||
*
|
||||
* The possible unused bits in the last, partially used word
|
||||
* of a bitmap are 'don't care'. The implementation makes
|
||||
* no particular effort to keep them zero. It ensures that
|
||||
* their value will not affect the results of any operation.
|
||||
* The bitmap operations that return Boolean (bitmap_empty,
|
||||
* for example) or scalar (bitmap_weight, for example) results
|
||||
* carefully filter out these unused bits from impacting their
|
||||
* results.
|
||||
*
|
||||
* These operations actually hold to a slightly stronger rule:
|
||||
* if you don't input any bitmaps to these ops that have some
|
||||
* unused bits set, then they won't output any set unused bits
|
||||
* in output bitmaps.
|
||||
*
|
||||
* The byte ordering of bitmaps is more natural on little
|
||||
* endian architectures.
|
||||
*/
|
||||
|
||||
int slow_bitmap_empty(const unsigned long *bitmap, long bits)
|
||||
{
|
||||
long k, lim = bits/BITS_PER_LONG;
|
||||
|
||||
for (k = 0; k < lim; ++k) {
|
||||
if (bitmap[k]) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (bits % BITS_PER_LONG) {
|
||||
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int slow_bitmap_full(const unsigned long *bitmap, long bits)
|
||||
{
|
||||
long k, lim = bits/BITS_PER_LONG;
|
||||
|
||||
for (k = 0; k < lim; ++k) {
|
||||
if (~bitmap[k]) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (bits % BITS_PER_LONG) {
|
||||
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int slow_bitmap_equal(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits)
|
||||
{
|
||||
long k, lim = bits/BITS_PER_LONG;
|
||||
|
||||
for (k = 0; k < lim; ++k) {
|
||||
if (bitmap1[k] != bitmap2[k]) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (bits % BITS_PER_LONG) {
|
||||
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void slow_bitmap_complement(unsigned long *dst, const unsigned long *src,
|
||||
long bits)
|
||||
{
|
||||
long k, lim = bits/BITS_PER_LONG;
|
||||
|
||||
for (k = 0; k < lim; ++k) {
|
||||
dst[k] = ~src[k];
|
||||
}
|
||||
|
||||
if (bits % BITS_PER_LONG) {
|
||||
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
|
||||
}
|
||||
}
|
||||
|
||||
int slow_bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits)
|
||||
{
|
||||
long k;
|
||||
long nr = BITS_TO_LONGS(bits);
|
||||
unsigned long result = 0;
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
|
||||
}
|
||||
return result != 0;
|
||||
}
|
||||
|
||||
void slow_bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits)
|
||||
{
|
||||
long k;
|
||||
long nr = BITS_TO_LONGS(bits);
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
dst[k] = bitmap1[k] | bitmap2[k];
|
||||
}
|
||||
}
|
||||
|
||||
void slow_bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits)
|
||||
{
|
||||
long k;
|
||||
long nr = BITS_TO_LONGS(bits);
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
dst[k] = bitmap1[k] ^ bitmap2[k];
|
||||
}
|
||||
}
|
||||
|
||||
int slow_bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits)
|
||||
{
|
||||
long k;
|
||||
long nr = BITS_TO_LONGS(bits);
|
||||
unsigned long result = 0;
|
||||
|
||||
for (k = 0; k < nr; k++) {
|
||||
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
|
||||
}
|
||||
return result != 0;
|
||||
}
|
||||
|
||||
void qemu_bitmap_set(unsigned long *map, long start, long nr)
|
||||
{
|
||||
@ -21,6 +164,8 @@ void qemu_bitmap_set(unsigned long *map, long start, long nr)
|
||||
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
|
||||
|
||||
assert(start >= 0 && nr >= 0);
|
||||
|
||||
while (nr - bits_to_set >= 0) {
|
||||
*p |= mask_to_set;
|
||||
nr -= bits_to_set;
|
||||
@ -34,6 +179,45 @@ void qemu_bitmap_set(unsigned long *map, long start, long nr)
|
||||
}
|
||||
}
|
||||
|
||||
void bitmap_set_atomic(unsigned long *map, long start, long nr)
|
||||
{
|
||||
unsigned long *p = map + BIT_WORD(start);
|
||||
const long size = start + nr;
|
||||
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
|
||||
|
||||
assert(start >= 0 && nr >= 0);
|
||||
|
||||
/* First word */
|
||||
if (nr - bits_to_set > 0) {
|
||||
atomic_or(p, mask_to_set);
|
||||
nr -= bits_to_set;
|
||||
bits_to_set = BITS_PER_LONG;
|
||||
mask_to_set = ~0UL;
|
||||
p++;
|
||||
}
|
||||
|
||||
/* Full words */
|
||||
if (bits_to_set == BITS_PER_LONG) {
|
||||
while (nr >= BITS_PER_LONG) {
|
||||
*p = ~0UL;
|
||||
nr -= BITS_PER_LONG;
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Last word */
|
||||
if (nr) {
|
||||
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
|
||||
atomic_or(p, mask_to_set);
|
||||
} else {
|
||||
/* If we avoided the full barrier in atomic_or(), issue a
|
||||
* barrier to account for the assignments in the while loop.
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
}
|
||||
|
||||
void qemu_bitmap_clear(unsigned long *map, long start, long nr)
|
||||
{
|
||||
unsigned long *p = map + BIT_WORD(start);
|
||||
@ -41,6 +225,8 @@ void qemu_bitmap_clear(unsigned long *map, long start, long nr)
|
||||
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
|
||||
|
||||
assert(start >= 0 && nr >= 0);
|
||||
|
||||
while (nr - bits_to_clear >= 0) {
|
||||
*p &= ~mask_to_clear;
|
||||
nr -= bits_to_clear;
|
||||
@ -53,3 +239,253 @@ void qemu_bitmap_clear(unsigned long *map, long start, long nr)
|
||||
*p &= ~mask_to_clear;
|
||||
}
|
||||
}
|
||||
|
||||
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
|
||||
{
|
||||
unsigned long *p = map + BIT_WORD(start);
|
||||
const long size = start + nr;
|
||||
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
|
||||
unsigned long dirty = 0;
|
||||
unsigned long old_bits;
|
||||
|
||||
assert(start >= 0 && nr >= 0);
|
||||
|
||||
/* First word */
|
||||
if (nr - bits_to_clear > 0) {
|
||||
old_bits = atomic_fetch_and(p, ~mask_to_clear);
|
||||
dirty |= old_bits & mask_to_clear;
|
||||
nr -= bits_to_clear;
|
||||
bits_to_clear = BITS_PER_LONG;
|
||||
mask_to_clear = ~0UL;
|
||||
p++;
|
||||
}
|
||||
|
||||
/* Full words */
|
||||
if (bits_to_clear == BITS_PER_LONG) {
|
||||
while (nr >= BITS_PER_LONG) {
|
||||
if (*p) {
|
||||
old_bits = *p;
|
||||
*p = 0;
|
||||
dirty |= old_bits;
|
||||
}
|
||||
nr -= BITS_PER_LONG;
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Last word */
|
||||
if (nr) {
|
||||
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
|
||||
old_bits = atomic_fetch_and(p, ~mask_to_clear);
|
||||
dirty |= old_bits & mask_to_clear;
|
||||
} else {
|
||||
if (!dirty) {
|
||||
smp_mb();
|
||||
}
|
||||
}
|
||||
|
||||
return dirty != 0;
|
||||
}
|
||||
|
||||
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
|
||||
long nr)
|
||||
{
|
||||
while (nr > 0) {
|
||||
*dst = *src;
|
||||
*src = 0;
|
||||
dst++;
|
||||
src++;
|
||||
nr -= BITS_PER_LONG;
|
||||
}
|
||||
}
|
||||
|
||||
#define ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
|
||||
|
||||
/**
|
||||
* bitmap_find_next_zero_area - find a contiguous aligned zero area
|
||||
* @map: The address to base the search on
|
||||
* @size: The bitmap size in bits
|
||||
* @start: The bitnumber to start searching at
|
||||
* @nr: The number of zeroed bits we're looking for
|
||||
* @align_mask: Alignment mask for zero area
|
||||
*
|
||||
* The @align_mask should be one less than a power of 2; the effect is that
|
||||
* the bit offset of all zero areas this function finds is multiples of that
|
||||
* power of 2. A @align_mask of 0 means no alignment is required.
|
||||
*/
|
||||
unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
unsigned long size,
|
||||
unsigned long start,
|
||||
unsigned long nr,
|
||||
unsigned long align_mask)
|
||||
{
|
||||
unsigned long index, end, i;
|
||||
again:
|
||||
index = find_next_zero_bit(map, size, start);
|
||||
|
||||
/* Align allocation */
|
||||
index = ALIGN_MASK(index, align_mask);
|
||||
|
||||
end = index + nr;
|
||||
if (end > size) {
|
||||
return end;
|
||||
}
|
||||
i = find_next_bit(map, end, index);
|
||||
if (i < end) {
|
||||
start = i + 1;
|
||||
goto again;
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
int slow_bitmap_intersects(const unsigned long *bitmap1,
|
||||
const unsigned long *bitmap2, long bits)
|
||||
{
|
||||
long k, lim = bits/BITS_PER_LONG;
|
||||
|
||||
for (k = 0; k < lim; ++k) {
|
||||
if (bitmap1[k] & bitmap2[k]) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (bits % BITS_PER_LONG) {
|
||||
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
long slow_bitmap_count_one(const unsigned long *bitmap, long nbits)
|
||||
{
|
||||
long k, lim = nbits / BITS_PER_LONG, result = 0;
|
||||
|
||||
for (k = 0; k < lim; k++) {
|
||||
result += ctpopl(bitmap[k]);
|
||||
}
|
||||
|
||||
if (nbits % BITS_PER_LONG) {
|
||||
result += ctpopl(bitmap[k] & BITMAP_LAST_WORD_MASK(nbits));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void bitmap_to_from_le(unsigned long *dst,
|
||||
const unsigned long *src, long nbits)
|
||||
{
|
||||
long len = BITS_TO_LONGS(nbits);
|
||||
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
long index;
|
||||
|
||||
for (index = 0; index < len; index++) {
|
||||
# if HOST_LONG_BITS == 64
|
||||
dst[index] = bswap64(src[index]);
|
||||
# else
|
||||
dst[index] = bswap32(src[index]);
|
||||
# endif
|
||||
}
|
||||
#else
|
||||
memcpy(dst, src, len * sizeof(unsigned long));
|
||||
#endif
|
||||
}
|
||||
|
||||
void bitmap_from_le(unsigned long *dst, const unsigned long *src,
|
||||
long nbits)
|
||||
{
|
||||
bitmap_to_from_le(dst, src, nbits);
|
||||
}
|
||||
|
||||
void bitmap_to_le(unsigned long *dst, const unsigned long *src,
|
||||
long nbits)
|
||||
{
|
||||
bitmap_to_from_le(dst, src, nbits);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy "src" bitmap with a positive offset and put it into the "dst"
|
||||
* bitmap. The caller needs to make sure the bitmap size of "src"
|
||||
* is bigger than (shift + nbits).
|
||||
*/
|
||||
void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src,
|
||||
unsigned long shift, unsigned long nbits)
|
||||
{
|
||||
unsigned long left_mask, right_mask, last_mask;
|
||||
|
||||
/* Proper shift src pointer to the first word to copy from */
|
||||
src += BIT_WORD(shift);
|
||||
shift %= BITS_PER_LONG;
|
||||
|
||||
if (!shift) {
|
||||
/* Fast path */
|
||||
bitmap_copy(dst, src, nbits);
|
||||
return;
|
||||
}
|
||||
|
||||
right_mask = (1ul << shift) - 1;
|
||||
left_mask = ~right_mask;
|
||||
|
||||
while (nbits >= BITS_PER_LONG) {
|
||||
*dst = (*src & left_mask) >> shift;
|
||||
*dst |= (src[1] & right_mask) << (BITS_PER_LONG - shift);
|
||||
dst++;
|
||||
src++;
|
||||
nbits -= BITS_PER_LONG;
|
||||
}
|
||||
|
||||
if (nbits > BITS_PER_LONG - shift) {
|
||||
*dst = (*src & left_mask) >> shift;
|
||||
nbits -= BITS_PER_LONG - shift;
|
||||
last_mask = (1ul << nbits) - 1;
|
||||
*dst |= (src[1] & last_mask) << (BITS_PER_LONG - shift);
|
||||
} else if (nbits) {
|
||||
last_mask = (1ul << nbits) - 1;
|
||||
*dst = (*src >> shift) & last_mask;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy "src" bitmap into the "dst" bitmap with an offset in the
|
||||
* "dst". The caller needs to make sure the bitmap size of "dst" is
|
||||
* bigger than (shift + nbits).
|
||||
*/
|
||||
void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src,
|
||||
unsigned long shift, unsigned long nbits)
|
||||
{
|
||||
unsigned long left_mask, right_mask, last_mask;
|
||||
|
||||
/* Proper shift dst pointer to the first word to copy from */
|
||||
dst += BIT_WORD(shift);
|
||||
shift %= BITS_PER_LONG;
|
||||
|
||||
if (!shift) {
|
||||
/* Fast path */
|
||||
bitmap_copy(dst, src, nbits);
|
||||
return;
|
||||
}
|
||||
|
||||
right_mask = (1ul << (BITS_PER_LONG - shift)) - 1;
|
||||
left_mask = ~right_mask;
|
||||
|
||||
*dst &= (1ul << shift) - 1;
|
||||
while (nbits >= BITS_PER_LONG) {
|
||||
*dst |= (*src & right_mask) << shift;
|
||||
dst[1] = (*src & left_mask) >> (BITS_PER_LONG - shift);
|
||||
dst++;
|
||||
src++;
|
||||
nbits -= BITS_PER_LONG;
|
||||
}
|
||||
|
||||
if (nbits > BITS_PER_LONG - shift) {
|
||||
*dst |= (*src & right_mask) << shift;
|
||||
nbits -= BITS_PER_LONG - shift;
|
||||
last_mask = ((1ul << nbits) - 1) << (BITS_PER_LONG - shift);
|
||||
dst[1] = (*src & last_mask) >> (BITS_PER_LONG - shift);
|
||||
} else if (nbits) {
|
||||
last_mask = (1ul << nbits) - 1;
|
||||
*dst |= (*src & last_mask) << shift;
|
||||
}
|
||||
}
|
||||
|
@ -11,17 +11,16 @@
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/bitops.h"
|
||||
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
|
||||
/*
|
||||
* Find the next set bit in a memory region.
|
||||
*/
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
const unsigned long *p = addr + BIT_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long tmp;
|
||||
|
||||
@ -84,9 +83,9 @@ found_middle:
|
||||
* Linus' asm-alpha/bitops.h.
|
||||
*/
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
const unsigned long *p = addr + BIT_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long tmp;
|
||||
|
||||
@ -127,3 +126,32 @@ found_first:
|
||||
found_middle:
|
||||
return result + ctzl(~tmp);
|
||||
}
|
||||
|
||||
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
unsigned long words;
|
||||
unsigned long tmp;
|
||||
|
||||
/* Start at final word. */
|
||||
words = size / BITS_PER_LONG;
|
||||
|
||||
/* Partial final word? */
|
||||
if (size & (BITS_PER_LONG-1)) {
|
||||
tmp = (addr[words] & (~0UL >> (BITS_PER_LONG
|
||||
- (size & (BITS_PER_LONG-1)))));
|
||||
if (tmp) {
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
|
||||
while (words) {
|
||||
tmp = addr[--words];
|
||||
if (tmp) {
|
||||
found:
|
||||
return words * BITS_PER_LONG + BITS_PER_LONG - 1 - clzl(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/* Not found */
|
||||
return size;
|
||||
}
|
||||
|
189
qemu/util/cacheinfo.c
Normal file
189
qemu/util/cacheinfo.c
Normal file
@ -0,0 +1,189 @@
|
||||
/*
|
||||
* cacheinfo.c - helpers to query the host about its caches
|
||||
*
|
||||
* Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
|
||||
* License: GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include "qemu/atomic.h"
|
||||
|
||||
#include <uc_priv.h>
|
||||
|
||||
/*
|
||||
* Operating system specific detection mechanisms.
|
||||
*/
|
||||
|
||||
#if defined(_WIN32)
|
||||
|
||||
static void sys_cache_info(int *isize, int *dsize)
|
||||
{
|
||||
SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf;
|
||||
DWORD size = 0;
|
||||
BOOL success;
|
||||
size_t i, n;
|
||||
|
||||
/* Check for the required buffer size first. Note that if the zero
|
||||
size we use for the probe results in success, then there is no
|
||||
data available; fail in that case. */
|
||||
success = GetLogicalProcessorInformation(0, &size);
|
||||
if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
|
||||
return;
|
||||
}
|
||||
|
||||
n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
|
||||
size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
|
||||
buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n);
|
||||
if (!GetLogicalProcessorInformation(buf, &size)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (buf[i].Relationship == RelationCache
|
||||
&& buf[i].Cache.Level == 1) {
|
||||
switch (buf[i].Cache.Type) {
|
||||
case CacheUnified:
|
||||
*isize = *dsize = buf[i].Cache.LineSize;
|
||||
break;
|
||||
case CacheInstruction:
|
||||
*isize = buf[i].Cache.LineSize;
|
||||
break;
|
||||
case CacheData:
|
||||
*dsize = buf[i].Cache.LineSize;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
fail:
|
||||
g_free(buf);
|
||||
}
|
||||
|
||||
#elif defined(__APPLE__)
|
||||
# include <sys/sysctl.h>
|
||||
static void sys_cache_info(int *isize, int *dsize)
|
||||
{
|
||||
/* There's only a single sysctl for both I/D cache line sizes. */
|
||||
long size;
|
||||
size_t len = sizeof(size);
|
||||
if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) {
|
||||
*isize = *dsize = size;
|
||||
}
|
||||
}
|
||||
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
# include <sys/sysctl.h>
|
||||
static void sys_cache_info(int *isize, int *dsize)
|
||||
{
|
||||
/* There's only a single sysctl for both I/D cache line sizes. */
|
||||
int size;
|
||||
size_t len = sizeof(size);
|
||||
if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) {
|
||||
*isize = *dsize = size;
|
||||
}
|
||||
}
|
||||
#else
|
||||
/* POSIX */
|
||||
|
||||
static void sys_cache_info(int *isize, int *dsize)
|
||||
{
|
||||
# ifdef _SC_LEVEL1_ICACHE_LINESIZE
|
||||
int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
|
||||
if (tmp_isize > 0) {
|
||||
*isize = tmp_isize;
|
||||
}
|
||||
# endif
|
||||
# ifdef _SC_LEVEL1_DCACHE_LINESIZE
|
||||
int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
|
||||
if (tmp_dsize > 0) {
|
||||
*dsize = tmp_dsize;
|
||||
}
|
||||
# endif
|
||||
}
|
||||
#endif /* sys_cache_info */
|
||||
|
||||
/*
|
||||
* Architecture (+ OS) specific detection mechanisms.
|
||||
*/
|
||||
|
||||
#if defined(__aarch64__)
|
||||
|
||||
static void arch_cache_info(int *isize, int *dsize)
|
||||
{
|
||||
if (*isize == 0 || *dsize == 0) {
|
||||
uint64_t ctr;
|
||||
|
||||
/* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1,
|
||||
but (at least under Linux) these are marked protected by the
|
||||
kernel. However, CTR_EL0 contains the minimum linesize in the
|
||||
entire hierarchy, and is used by userspace cache flushing. */
|
||||
asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr));
|
||||
if (*isize == 0) {
|
||||
*isize = 4 << (ctr & 0xf);
|
||||
}
|
||||
if (*dsize == 0) {
|
||||
*dsize = 4 << ((ctr >> 16) & 0xf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#elif defined(_ARCH_PPC) && defined(__linux__)
|
||||
# include "elf.h"
|
||||
|
||||
static void arch_cache_info(int *isize, int *dsize)
|
||||
{
|
||||
if (*isize == 0) {
|
||||
*isize = qemu_getauxval(AT_ICACHEBSIZE);
|
||||
}
|
||||
if (*dsize == 0) {
|
||||
*dsize = qemu_getauxval(AT_DCACHEBSIZE);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
static void arch_cache_info(int *isize, int *dsize) { }
|
||||
#endif /* arch_cache_info */
|
||||
|
||||
/*
|
||||
* ... and if all else fails ...
|
||||
*/
|
||||
|
||||
static void fallback_cache_info(int *isize, int *dsize)
|
||||
{
|
||||
/* If we can only find one of the two, assume they're the same. */
|
||||
if (*isize) {
|
||||
if (*dsize) {
|
||||
/* Success! */
|
||||
} else {
|
||||
*dsize = *isize;
|
||||
}
|
||||
} else if (*dsize) {
|
||||
*isize = *dsize;
|
||||
} else {
|
||||
#if defined(_ARCH_PPC)
|
||||
/* For PPC, we're going to use the icache size computed for
|
||||
flush_icache_range. Which means that we must use the
|
||||
architecture minimum. */
|
||||
*isize = *dsize = 16;
|
||||
#else
|
||||
/* Otherwise, 64 bytes is not uncommon. */
|
||||
*isize = *dsize = 64;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void init_cache_info(struct uc_struct *uc)
|
||||
{
|
||||
int isize = 0, dsize = 0;
|
||||
|
||||
sys_cache_info(&isize, &dsize);
|
||||
arch_cache_info(&isize, &dsize);
|
||||
fallback_cache_info(&isize, &dsize);
|
||||
|
||||
assert((isize & (isize - 1)) == 0);
|
||||
assert((dsize & (dsize - 1)) == 0);
|
||||
|
||||
uc->qemu_icache_linesize = isize;
|
||||
}
|
@ -25,7 +25,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/crc32c.h"
|
||||
|
||||
/*
|
||||
@ -113,3 +113,86 @@ uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length)
|
||||
return crc^0xffffffff;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the CRC-32 table
|
||||
* Generated with:
|
||||
* width = 32 bits
|
||||
* poly = 0xEDB88320
|
||||
*/
|
||||
|
||||
static const uint32_t crc32_table[256] = {
|
||||
0x00000000U, 0x77073096U, 0xEE0E612CU, 0x990951BAU,
|
||||
0x076DC419U, 0x706AF48FU, 0xE963A535U, 0x9E6495A3U,
|
||||
0x0EDB8832U, 0x79DCB8A4U, 0xE0D5E91EU, 0x97D2D988U,
|
||||
0x09B64C2BU, 0x7EB17CBDU, 0xE7B82D07U, 0x90BF1D91U,
|
||||
0x1DB71064U, 0x6AB020F2U, 0xF3B97148U, 0x84BE41DEU,
|
||||
0x1ADAD47DU, 0x6DDDE4EBU, 0xF4D4B551U, 0x83D385C7U,
|
||||
0x136C9856U, 0x646BA8C0U, 0xFD62F97AU, 0x8A65C9ECU,
|
||||
0x14015C4FU, 0x63066CD9U, 0xFA0F3D63U, 0x8D080DF5U,
|
||||
0x3B6E20C8U, 0x4C69105EU, 0xD56041E4U, 0xA2677172U,
|
||||
0x3C03E4D1U, 0x4B04D447U, 0xD20D85FDU, 0xA50AB56BU,
|
||||
0x35B5A8FAU, 0x42B2986CU, 0xDBBBC9D6U, 0xACBCF940U,
|
||||
0x32D86CE3U, 0x45DF5C75U, 0xDCD60DCFU, 0xABD13D59U,
|
||||
0x26D930ACU, 0x51DE003AU, 0xC8D75180U, 0xBFD06116U,
|
||||
0x21B4F4B5U, 0x56B3C423U, 0xCFBA9599U, 0xB8BDA50FU,
|
||||
0x2802B89EU, 0x5F058808U, 0xC60CD9B2U, 0xB10BE924U,
|
||||
0x2F6F7C87U, 0x58684C11U, 0xC1611DABU, 0xB6662D3DU,
|
||||
0x76DC4190U, 0x01DB7106U, 0x98D220BCU, 0xEFD5102AU,
|
||||
0x71B18589U, 0x06B6B51FU, 0x9FBFE4A5U, 0xE8B8D433U,
|
||||
0x7807C9A2U, 0x0F00F934U, 0x9609A88EU, 0xE10E9818U,
|
||||
0x7F6A0DBBU, 0x086D3D2DU, 0x91646C97U, 0xE6635C01U,
|
||||
0x6B6B51F4U, 0x1C6C6162U, 0x856530D8U, 0xF262004EU,
|
||||
0x6C0695EDU, 0x1B01A57BU, 0x8208F4C1U, 0xF50FC457U,
|
||||
0x65B0D9C6U, 0x12B7E950U, 0x8BBEB8EAU, 0xFCB9887CU,
|
||||
0x62DD1DDFU, 0x15DA2D49U, 0x8CD37CF3U, 0xFBD44C65U,
|
||||
0x4DB26158U, 0x3AB551CEU, 0xA3BC0074U, 0xD4BB30E2U,
|
||||
0x4ADFA541U, 0x3DD895D7U, 0xA4D1C46DU, 0xD3D6F4FBU,
|
||||
0x4369E96AU, 0x346ED9FCU, 0xAD678846U, 0xDA60B8D0U,
|
||||
0x44042D73U, 0x33031DE5U, 0xAA0A4C5FU, 0xDD0D7CC9U,
|
||||
0x5005713CU, 0x270241AAU, 0xBE0B1010U, 0xC90C2086U,
|
||||
0x5768B525U, 0x206F85B3U, 0xB966D409U, 0xCE61E49FU,
|
||||
0x5EDEF90EU, 0x29D9C998U, 0xB0D09822U, 0xC7D7A8B4U,
|
||||
0x59B33D17U, 0x2EB40D81U, 0xB7BD5C3BU, 0xC0BA6CADU,
|
||||
0xEDB88320U, 0x9ABFB3B6U, 0x03B6E20CU, 0x74B1D29AU,
|
||||
0xEAD54739U, 0x9DD277AFU, 0x04DB2615U, 0x73DC1683U,
|
||||
0xE3630B12U, 0x94643B84U, 0x0D6D6A3EU, 0x7A6A5AA8U,
|
||||
0xE40ECF0BU, 0x9309FF9DU, 0x0A00AE27U, 0x7D079EB1U,
|
||||
0xF00F9344U, 0x8708A3D2U, 0x1E01F268U, 0x6906C2FEU,
|
||||
0xF762575DU, 0x806567CBU, 0x196C3671U, 0x6E6B06E7U,
|
||||
0xFED41B76U, 0x89D32BE0U, 0x10DA7A5AU, 0x67DD4ACCU,
|
||||
0xF9B9DF6FU, 0x8EBEEFF9U, 0x17B7BE43U, 0x60B08ED5U,
|
||||
0xD6D6A3E8U, 0xA1D1937EU, 0x38D8C2C4U, 0x4FDFF252U,
|
||||
0xD1BB67F1U, 0xA6BC5767U, 0x3FB506DDU, 0x48B2364BU,
|
||||
0xD80D2BDAU, 0xAF0A1B4CU, 0x36034AF6U, 0x41047A60U,
|
||||
0xDF60EFC3U, 0xA867DF55U, 0x316E8EEFU, 0x4669BE79U,
|
||||
0xCB61B38CU, 0xBC66831AU, 0x256FD2A0U, 0x5268E236U,
|
||||
0xCC0C7795U, 0xBB0B4703U, 0x220216B9U, 0x5505262FU,
|
||||
0xC5BA3BBEU, 0xB2BD0B28U, 0x2BB45A92U, 0x5CB36A04U,
|
||||
0xC2D7FFA7U, 0xB5D0CF31U, 0x2CD99E8BU, 0x5BDEAE1DU,
|
||||
0x9B64C2B0U, 0xEC63F226U, 0x756AA39CU, 0x026D930AU,
|
||||
0x9C0906A9U, 0xEB0E363FU, 0x72076785U, 0x05005713U,
|
||||
0x95BF4A82U, 0xE2B87A14U, 0x7BB12BAEU, 0x0CB61B38U,
|
||||
0x92D28E9BU, 0xE5D5BE0DU, 0x7CDCEFB7U, 0x0BDBDF21U,
|
||||
0x86D3D2D4U, 0xF1D4E242U, 0x68DDB3F8U, 0x1FDA836EU,
|
||||
0x81BE16CDU, 0xF6B9265BU, 0x6FB077E1U, 0x18B74777U,
|
||||
0x88085AE6U, 0xFF0F6A70U, 0x66063BCAU, 0x11010B5CU,
|
||||
0x8F659EFFU, 0xF862AE69U, 0x616BFFD3U, 0x166CCF45U,
|
||||
0xA00AE278U, 0xD70DD2EEU, 0x4E048354U, 0x3903B3C2U,
|
||||
0xA7672661U, 0xD06016F7U, 0x4969474DU, 0x3E6E77DBU,
|
||||
0xAED16A4AU, 0xD9D65ADCU, 0x40DF0B66U, 0x37D83BF0U,
|
||||
0xA9BCAE53U, 0xDEBB9EC5U, 0x47B2CF7FU, 0x30B5FFE9U,
|
||||
0xBDBDF21CU, 0xCABAC28AU, 0x53B39330U, 0x24B4A3A6U,
|
||||
0xBAD03605U, 0xCDD70693U, 0x54DE5729U, 0x23D967BFU,
|
||||
0xB3667A2EU, 0xC4614AB8U, 0x5D681B02U, 0x2A6F2B94U,
|
||||
0xB40BBE37U, 0xC30C8EA1U, 0x5A05DF1BU, 0x2D02EF8DU,
|
||||
};
|
||||
|
||||
uint32_t crc32(uint32_t crc, const uint8_t *data, unsigned int length)
|
||||
{
|
||||
int i;
|
||||
crc = ~crc;
|
||||
for (i = 0; i < length; i++) {
|
||||
crc = (crc >> 8) ^ crc32_table[(crc ^ data[i]) & 0xff];
|
||||
}
|
||||
return ~crc;
|
||||
}
|
||||
|
@ -21,12 +21,10 @@
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/host-utils.h"
|
||||
#include <math.h>
|
||||
#include <limits.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "qemu/cutils.h"
|
||||
|
||||
void pstrcpy(char *buf, int buf_size, const char *str)
|
||||
{
|
||||
@ -54,104 +52,3 @@ char *pstrcat(char *buf, int buf_size, const char *s)
|
||||
pstrcpy(buf + len, buf_size - len, s);
|
||||
return buf;
|
||||
}
|
||||
|
||||
int strstart(const char *str, const char *val, const char **ptr)
|
||||
{
|
||||
const char *p, *q;
|
||||
p = str;
|
||||
q = val;
|
||||
while (*q != '\0') {
|
||||
if (*p != *q)
|
||||
return 0;
|
||||
p++;
|
||||
q++;
|
||||
}
|
||||
if (ptr)
|
||||
*ptr = p;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int qemu_fls(int i)
|
||||
{
|
||||
return 32 - clz32(i);
|
||||
}
|
||||
|
||||
static int64_t suffix_mul(char suffix, int64_t unit)
|
||||
{
|
||||
switch (qemu_toupper(suffix)) {
|
||||
case STRTOSZ_DEFSUFFIX_B:
|
||||
return 1;
|
||||
case STRTOSZ_DEFSUFFIX_KB:
|
||||
return unit;
|
||||
case STRTOSZ_DEFSUFFIX_MB:
|
||||
return unit * unit;
|
||||
case STRTOSZ_DEFSUFFIX_GB:
|
||||
return unit * unit * unit;
|
||||
case STRTOSZ_DEFSUFFIX_TB:
|
||||
return unit * unit * unit * unit;
|
||||
case STRTOSZ_DEFSUFFIX_PB:
|
||||
return unit * unit * unit * unit * unit;
|
||||
case STRTOSZ_DEFSUFFIX_EB:
|
||||
return unit * unit * unit * unit * unit * unit;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert string to bytes, allowing either B/b for bytes, K/k for KB,
|
||||
* M/m for MB, G/g for GB or T/t for TB. End pointer will be returned
|
||||
* in *end, if not NULL. Return -ERANGE on overflow, Return -EINVAL on
|
||||
* other error.
|
||||
*/
|
||||
int64_t strtosz_suffix_unit(const char *nptr, char **end,
|
||||
const char default_suffix, int64_t unit)
|
||||
{
|
||||
int64_t retval = -EINVAL;
|
||||
char *endptr;
|
||||
unsigned char c;
|
||||
int mul_required = 0;
|
||||
double val, mul, integral, fraction;
|
||||
|
||||
errno = 0;
|
||||
val = strtod(nptr, &endptr);
|
||||
if (isnan(val) || endptr == nptr || errno != 0) {
|
||||
goto fail;
|
||||
}
|
||||
fraction = modf(val, &integral);
|
||||
if (fraction != 0) {
|
||||
mul_required = 1;
|
||||
}
|
||||
c = *endptr;
|
||||
mul = (double)suffix_mul(c, unit);
|
||||
if (mul >= 0) {
|
||||
endptr++;
|
||||
} else {
|
||||
mul = (double)suffix_mul(default_suffix, unit);
|
||||
assert(mul >= 0);
|
||||
}
|
||||
if (mul == 1 && mul_required) {
|
||||
goto fail;
|
||||
}
|
||||
if ((val * mul >= (double)INT64_MAX) || val < 0) {
|
||||
retval = -ERANGE;
|
||||
goto fail;
|
||||
}
|
||||
retval = (int64_t)(val * mul);
|
||||
|
||||
fail:
|
||||
if (end) {
|
||||
*end = endptr;
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix)
|
||||
{
|
||||
return strtosz_suffix_unit(nptr, end, default_suffix, 1024);
|
||||
}
|
||||
|
||||
int64_t strtosz(const char *nptr, char **end)
|
||||
{
|
||||
return strtosz_suffix(nptr, end, STRTOSZ_DEFSUFFIX_MB);
|
||||
}
|
||||
|
@ -1,129 +0,0 @@
|
||||
/*
|
||||
* QEMU Error Objects
|
||||
*
|
||||
* Copyright IBM, Corp. 2011
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU LGPL, version 2. See
|
||||
* the COPYING.LIB file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
struct Error
|
||||
{
|
||||
char *msg;
|
||||
ErrorClass err_class;
|
||||
};
|
||||
|
||||
Error *error_abort;
|
||||
|
||||
void error_set(Error **errp, ErrorClass err_class, const char *fmt, ...)
|
||||
{
|
||||
Error *err;
|
||||
va_list ap;
|
||||
int saved_errno = errno;
|
||||
|
||||
if (errp == NULL) {
|
||||
return;
|
||||
}
|
||||
assert(*errp == NULL);
|
||||
|
||||
err = g_malloc0(sizeof(*err));
|
||||
|
||||
va_start(ap, fmt);
|
||||
err->msg = g_strdup_vprintf(fmt, ap);
|
||||
va_end(ap);
|
||||
err->err_class = err_class;
|
||||
|
||||
if (errp == &error_abort) {
|
||||
// abort();
|
||||
}
|
||||
|
||||
*errp = err;
|
||||
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
void error_set_errno(Error **errp, int os_errno, ErrorClass err_class,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
Error *err;
|
||||
char *msg1;
|
||||
va_list ap;
|
||||
int saved_errno = errno;
|
||||
|
||||
if (errp == NULL) {
|
||||
return;
|
||||
}
|
||||
assert(*errp == NULL);
|
||||
|
||||
err = g_malloc0(sizeof(*err));
|
||||
|
||||
va_start(ap, fmt);
|
||||
msg1 = g_strdup_vprintf(fmt, ap);
|
||||
if (os_errno != 0) {
|
||||
err->msg = g_strdup_printf("%s: %s", msg1, strerror(os_errno));
|
||||
g_free(msg1);
|
||||
} else {
|
||||
err->msg = msg1;
|
||||
}
|
||||
va_end(ap);
|
||||
err->err_class = err_class;
|
||||
|
||||
if (errp == &error_abort) {
|
||||
// abort();
|
||||
}
|
||||
|
||||
*errp = err;
|
||||
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
void error_setg_file_open(Error **errp, int os_errno, const char *filename)
|
||||
{
|
||||
error_setg_errno(errp, os_errno, "Could not open '%s'", filename);
|
||||
}
|
||||
|
||||
Error *error_copy(const Error *err)
|
||||
{
|
||||
Error *err_new;
|
||||
|
||||
err_new = g_malloc0(sizeof(*err));
|
||||
err_new->msg = g_strdup(err->msg);
|
||||
err_new->err_class = err->err_class;
|
||||
|
||||
return err_new;
|
||||
}
|
||||
|
||||
ErrorClass error_get_class(const Error *err)
|
||||
{
|
||||
return err->err_class;
|
||||
}
|
||||
|
||||
const char *error_get_pretty(Error *err)
|
||||
{
|
||||
return err->msg;
|
||||
}
|
||||
|
||||
void error_free(Error *err)
|
||||
{
|
||||
if (err) {
|
||||
g_free(err->msg);
|
||||
g_free(err);
|
||||
}
|
||||
}
|
||||
|
||||
void error_propagate(Error **dst_errp, Error *local_err)
|
||||
{
|
||||
if (local_err && dst_errp == &error_abort) {
|
||||
// abort();
|
||||
} else if (dst_errp && !*dst_errp) {
|
||||
*dst_errp = local_err;
|
||||
} else if (local_err) {
|
||||
error_free(local_err);
|
||||
}
|
||||
}
|
@ -22,7 +22,6 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#ifdef CONFIG_GETAUXVAL
|
||||
@ -76,7 +75,7 @@ static const ElfW_auxv_t *qemu_init_auxval(void)
|
||||
auxv = a = g_realloc(a, size);
|
||||
r = read(fd, (char *)a + ofs, ofs);
|
||||
} while (r == ofs);
|
||||
}
|
||||
}
|
||||
|
||||
close(fd);
|
||||
return a;
|
||||
@ -99,16 +98,6 @@ unsigned long qemu_getauxval(unsigned long type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#elif defined(__FreeBSD__)
|
||||
#include <sys/auxv.h>
|
||||
|
||||
unsigned long qemu_getauxval(unsigned long type)
|
||||
{
|
||||
unsigned long aux = 0;
|
||||
elf_aux_info(type, &aux, sizeof(aux));
|
||||
return aux;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
unsigned long qemu_getauxval(unsigned long type)
|
||||
|
81
qemu/util/guest-random.c
Normal file
81
qemu/util/guest-random.c
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* QEMU guest-visible random functions
|
||||
*
|
||||
* Copyright 2019 Linaro, Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/cutils.h"
|
||||
//#include "qapi/error.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "crypto/random.h"
|
||||
//#include "sysemu/replay.h"
|
||||
|
||||
|
||||
#ifndef _MSC_VER
|
||||
static __thread GRand *thread_rand;
|
||||
#endif
|
||||
static bool deterministic = true;
|
||||
|
||||
|
||||
static int glib_random_bytes(void *buf, size_t len)
|
||||
{
|
||||
#ifndef _MSC_VER
|
||||
GRand *rand = thread_rand;
|
||||
size_t i;
|
||||
uint32_t x;
|
||||
|
||||
if (unlikely(rand == NULL)) {
|
||||
/* Thread not initialized for a cpu, or main w/o -seed. */
|
||||
thread_rand = rand = g_rand_new();
|
||||
}
|
||||
|
||||
for (i = 0; i + 4 <= len; i += 4) {
|
||||
x = g_rand_int(rand);
|
||||
__builtin_memcpy(buf + i, &x, 4);
|
||||
}
|
||||
if (i < len) {
|
||||
x = g_rand_int(rand);
|
||||
__builtin_memcpy(buf + i, &x, i - len);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qemu_guest_getrandom(void *buf, size_t len)
|
||||
{
|
||||
return glib_random_bytes(buf, len);
|
||||
}
|
||||
|
||||
void qemu_guest_getrandom_nofail(void *buf, size_t len)
|
||||
{
|
||||
(void)qemu_guest_getrandom(buf, len);
|
||||
}
|
||||
|
||||
uint64_t qemu_guest_random_seed_thread_part1(void)
|
||||
{
|
||||
if (deterministic) {
|
||||
uint64_t ret;
|
||||
glib_random_bytes(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qemu_guest_random_seed_thread_part2(uint64_t seed)
|
||||
{
|
||||
#ifndef _MSC_VER
|
||||
g_assert(thread_rand == NULL);
|
||||
if (deterministic) {
|
||||
thread_rand =
|
||||
g_rand_new_with_seed_array((const guint32 *)&seed,
|
||||
sizeof(seed) / sizeof(guint32));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -23,8 +23,7 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include "unicorn/platform.h"
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/host-utils.h"
|
||||
|
||||
#ifndef CONFIG_INT128
|
||||
@ -54,10 +53,10 @@ static inline void mul64(uint64_t *plow, uint64_t *phigh,
|
||||
rh.ll = (uint64_t)a0.l.high * b0.l.high;
|
||||
|
||||
c = (uint64_t)rl.l.high + rm.l.low + rn.l.low;
|
||||
rl.l.high = (uint32_t)c;
|
||||
rl.l.high = c;
|
||||
c >>= 32;
|
||||
c = c + rm.l.high + rn.l.high + rh.l.low;
|
||||
rh.l.low = (uint32_t)c;
|
||||
rh.l.low = c;
|
||||
rh.l.high += (uint32_t)(c >> 32);
|
||||
|
||||
*plow = rl.ll;
|
||||
@ -160,8 +159,69 @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
|
||||
|
||||
return overflow;
|
||||
}
|
||||
#else
|
||||
// avoid empty object file
|
||||
void dummy_func(void);
|
||||
void dummy_func(void) {}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* urshift - 128-bit Unsigned Right Shift.
|
||||
* @plow: in/out - lower 64-bit integer.
|
||||
* @phigh: in/out - higher 64-bit integer.
|
||||
* @shift: in - bytes to shift, between 0 and 127.
|
||||
*
|
||||
* Result is zero-extended and stored in plow/phigh, which are
|
||||
* input/output variables. Shift values outside the range will
|
||||
* be mod to 128. In other words, the caller is responsible to
|
||||
* verify/assert both the shift range and plow/phigh pointers.
|
||||
*/
|
||||
void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift)
|
||||
{
|
||||
shift &= 127;
|
||||
if (shift == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint64_t h = *phigh >> (shift & 63);
|
||||
if (shift >= 64) {
|
||||
*plow = h;
|
||||
*phigh = 0;
|
||||
} else {
|
||||
*plow = (*plow >> (shift & 63)) | (*phigh << (64 - (shift & 63)));
|
||||
*phigh = h;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ulshift - 128-bit Unsigned Left Shift.
|
||||
* @plow: in/out - lower 64-bit integer.
|
||||
* @phigh: in/out - higher 64-bit integer.
|
||||
* @shift: in - bytes to shift, between 0 and 127.
|
||||
* @overflow: out - true if any 1-bit is shifted out.
|
||||
*
|
||||
* Result is zero-extended and stored in plow/phigh, which are
|
||||
* input/output variables. Shift values outside the range will
|
||||
* be mod to 128. In other words, the caller is responsible to
|
||||
* verify/assert both the shift range and plow/phigh pointers.
|
||||
*/
|
||||
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow)
|
||||
{
|
||||
uint64_t low = *plow;
|
||||
uint64_t high = *phigh;
|
||||
|
||||
shift &= 127;
|
||||
if (shift == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* check if any bit will be shifted out */
|
||||
urshift(&low, &high, 128 - shift);
|
||||
if (low | high) {
|
||||
*overflow = true;
|
||||
}
|
||||
|
||||
if (shift >= 64) {
|
||||
*phigh = *plow << (shift & 63);
|
||||
*plow = 0;
|
||||
} else {
|
||||
*phigh = (*plow >> (64 - (shift & 63))) | (*phigh << (shift & 63));
|
||||
*plow = *plow << shift;
|
||||
}
|
||||
}
|
||||
|
@ -1,57 +0,0 @@
|
||||
/*
|
||||
* QEMU Module Infrastructure
|
||||
*
|
||||
* Copyright IBM, Corp. 2009
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*
|
||||
* Contributions after 2012-01-13 are licensed under the terms of the
|
||||
* GNU GPL, version 2 or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/queue.h"
|
||||
|
||||
#include "uc_priv.h"
|
||||
|
||||
static void init_lists(struct uc_struct *uc)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MODULE_INIT_MAX; i++) {
|
||||
QTAILQ_INIT(&uc->init_type_list[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static ModuleTypeList *find_type(struct uc_struct *uc, module_init_type type)
|
||||
{
|
||||
ModuleTypeList *l;
|
||||
|
||||
init_lists(uc);
|
||||
|
||||
l = &uc->init_type_list[type];
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
static void module_load(module_init_type type)
|
||||
{
|
||||
}
|
||||
|
||||
void module_call_init(struct uc_struct *uc, module_init_type type)
|
||||
{
|
||||
ModuleTypeList *l;
|
||||
ModuleEntry *e;
|
||||
|
||||
module_load(type);
|
||||
l = find_type(uc, type);
|
||||
|
||||
QTAILQ_FOREACH(e, l, node) {
|
||||
e->init();
|
||||
}
|
||||
}
|
90
qemu/util/osdep.c
Normal file
90
qemu/util/osdep.c
Normal file
@ -0,0 +1,90 @@
|
||||
/*
|
||||
* QEMU low level functions
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
/* Needed early for CONFIG_BSD etc. */
|
||||
|
||||
#ifdef CONFIG_SOLARIS
|
||||
#include <sys/statvfs.h>
|
||||
/* See MySQL bug #7156 (http://bugs.mysql.com/bug.php?id=7156) for
|
||||
discussion about Solaris header problems */
|
||||
extern int madvise(char *, size_t, int);
|
||||
#endif
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/cutils.h"
|
||||
|
||||
int qemu_madvise(void *addr, size_t len, int advice)
|
||||
{
|
||||
if (advice == QEMU_MADV_INVALID) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
#if defined(CONFIG_MADVISE)
|
||||
return madvise(addr, len, advice);
|
||||
#elif defined(CONFIG_POSIX_MADVISE)
|
||||
return posix_madvise(addr, len, advice);
|
||||
#else
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int qemu_mprotect__osdep(void *addr, size_t size, int prot)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
DWORD old_protect;
|
||||
|
||||
if (!VirtualProtect(addr, size, prot, &old_protect)) {
|
||||
// g_autofree gchar *emsg = g_win32_error_message(GetLastError());
|
||||
// error_report("%s: VirtualProtect failed: %s", __func__, emsg);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
if (mprotect(addr, size, prot)) {
|
||||
// error_report("%s: mprotect failed: %s", __func__, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int qemu_mprotect_rwx(void *addr, size_t size)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
return qemu_mprotect__osdep(addr, size, PAGE_EXECUTE_READWRITE);
|
||||
#else
|
||||
return qemu_mprotect__osdep(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
#endif
|
||||
}
|
||||
|
||||
int qemu_mprotect_none(void *addr, size_t size)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
return qemu_mprotect__osdep(addr, size, PAGE_NOACCESS);
|
||||
#else
|
||||
return qemu_mprotect__osdep(addr, size, PROT_NONE);
|
||||
#endif
|
||||
}
|
@ -26,40 +26,23 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#if defined(__linux__) && (defined(__x86_64__) || defined(__arm__))
|
||||
/* Use 2 MiB alignment so transparent hugepages can be used by KVM.
|
||||
Valgrind does not support alignments larger than 1 MiB,
|
||||
therefore we need special code which handles running on Valgrind. */
|
||||
# define QEMU_VMALLOC_ALIGN (512 * 4096)
|
||||
#elif defined(__linux__) && defined(__s390x__)
|
||||
/* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
|
||||
# define QEMU_VMALLOC_ALIGN (256 * 4096)
|
||||
#else
|
||||
# define QEMU_VMALLOC_ALIGN getpagesize()
|
||||
#endif
|
||||
#define HUGETLBFS_MAGIC 0x958458f6
|
||||
|
||||
#include "unicorn/platform.h"
|
||||
#include "config-host.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include <sys/mman.h>
|
||||
#include <libgen.h>
|
||||
#include <setjmp.h>
|
||||
#ifdef __HAIKU__
|
||||
#include <posix/signal.h>
|
||||
#else
|
||||
#include <sys/signal.h>
|
||||
#endif
|
||||
#include <uc_priv.h>
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#ifdef CONFIG_LINUX
|
||||
#if !defined(__CYGWIN__)
|
||||
#include <sys/syscall.h>
|
||||
#endif
|
||||
#include <sys/vfs.h>
|
||||
#endif
|
||||
#include <linux/mman.h>
|
||||
#else /* !CONFIG_LINUX */
|
||||
#define MAP_SYNC 0x0
|
||||
#define MAP_SHARED_VALIDATE 0x0
|
||||
#endif /* CONFIG_LINUX */
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#include <sys/sysctl.h>
|
||||
#ifndef __MINGW32__
|
||||
static void *qemu_ram_mmap(struct uc_struct *uc,
|
||||
size_t size,
|
||||
size_t align,
|
||||
bool shared);
|
||||
|
||||
static void qemu_ram_munmap(struct uc_struct *uc, void *ptr, size_t size);
|
||||
#endif
|
||||
|
||||
void *qemu_oom_check(void *ptr)
|
||||
@ -79,7 +62,7 @@ void *qemu_try_memalign(size_t alignment, size_t size)
|
||||
alignment = sizeof(void*);
|
||||
}
|
||||
|
||||
#if defined(_POSIX_C_SOURCE) && !defined(__sun__)
|
||||
#if defined(CONFIG_POSIX_MEMALIGN)
|
||||
int ret;
|
||||
ret = posix_memalign(&ptr, alignment, size);
|
||||
if (ret != 0) {
|
||||
@ -88,9 +71,12 @@ void *qemu_try_memalign(size_t alignment, size_t size)
|
||||
}
|
||||
#elif defined(CONFIG_BSD)
|
||||
ptr = valloc(size);
|
||||
#elif defined(__MINGW32__)
|
||||
ptr = __mingw_aligned_malloc(size, alignment);
|
||||
#else
|
||||
ptr = memalign(alignment, size);
|
||||
#endif
|
||||
//trace_qemu_memalign(alignment, size, ptr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
@ -99,14 +85,32 @@ void *qemu_memalign(size_t alignment, size_t size)
|
||||
return qemu_oom_check(qemu_try_memalign(alignment, size));
|
||||
}
|
||||
|
||||
/* alloc shared memory pages */
|
||||
void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment)
|
||||
#ifdef __MINGW32__
|
||||
static int get_allocation_granularity(void)
|
||||
{
|
||||
SYSTEM_INFO system_info;
|
||||
|
||||
GetSystemInfo(&system_info);
|
||||
return system_info.dwAllocationGranularity;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* alloc shared memory pages */
|
||||
void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *alignment)
|
||||
{
|
||||
#ifdef __MINGW32__
|
||||
void *ptr;
|
||||
|
||||
ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
|
||||
// trace_qemu_anon_ram_alloc(size, ptr);
|
||||
|
||||
if (ptr && alignment) {
|
||||
*alignment = MAX(get_allocation_granularity(), getpagesize());
|
||||
}
|
||||
return ptr;
|
||||
#else
|
||||
size_t align = QEMU_VMALLOC_ALIGN;
|
||||
size_t total = size + align - getpagesize();
|
||||
void *ptr = mmap(0, total, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr;
|
||||
void *ptr = qemu_ram_mmap(uc, size, align, false);
|
||||
|
||||
if (ptr == MAP_FAILED) {
|
||||
return NULL;
|
||||
@ -115,27 +119,156 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment)
|
||||
if (alignment) {
|
||||
*alignment = align;
|
||||
}
|
||||
ptr += offset;
|
||||
total -= offset;
|
||||
|
||||
//trace_qemu_anon_ram_alloc(size, ptr);
|
||||
return ptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
void qemu_vfree(void *ptr)
|
||||
{
|
||||
#ifdef __MINGW32__
|
||||
if (ptr) {
|
||||
VirtualFree(ptr, 0, MEM_RELEASE);
|
||||
}
|
||||
#else
|
||||
//trace_qemu_vfree(ptr);
|
||||
free(ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size)
|
||||
{
|
||||
#ifdef __MINGW32__
|
||||
if (ptr) {
|
||||
VirtualFree(ptr, 0, MEM_RELEASE);
|
||||
}
|
||||
#else
|
||||
//trace_qemu_anon_ram_free(ptr, size);
|
||||
qemu_ram_munmap(uc, ptr, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(__powerpc64__) && defined(__linux__)
|
||||
static size_t qemu_fd_getpagesize(struct uc_struct *uc)
|
||||
{
|
||||
#ifdef CONFIG_LINUX
|
||||
#ifdef __sparc__
|
||||
/* SPARC Linux needs greater alignment than the pagesize */
|
||||
return QEMU_VMALLOC_ALIGN;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return uc->qemu_real_host_page_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __MINGW32__
|
||||
static void *qemu_ram_mmap(struct uc_struct *uc,
|
||||
size_t size,
|
||||
size_t align,
|
||||
bool shared)
|
||||
{
|
||||
int flags;
|
||||
int map_sync_flags = 0;
|
||||
int guardfd;
|
||||
size_t offset;
|
||||
size_t pagesize;
|
||||
size_t total;
|
||||
void *guardptr;
|
||||
void *ptr;
|
||||
|
||||
/*
|
||||
* Note: this always allocates at least one extra page of virtual address
|
||||
* space, even if size is already aligned.
|
||||
*/
|
||||
total = size + align;
|
||||
|
||||
#if defined(__powerpc64__) && defined(__linux__)
|
||||
/* On ppc64 mappings in the same segment (aka slice) must share the same
|
||||
* page size. Since we will be re-allocating part of this segment
|
||||
* from the supplied fd, we should make sure to use the same page size, to
|
||||
* this end we mmap the supplied fd. In this case, set MAP_NORESERVE to
|
||||
* avoid allocating backing store memory.
|
||||
* We do this unless we are using the system page size, in which case
|
||||
* anonymous memory is OK.
|
||||
*/
|
||||
flags = MAP_PRIVATE;
|
||||
pagesize = qemu_fd_getpagesize(uc);
|
||||
if (pagesize == uc->qemu_real_host_page_size) {
|
||||
guardfd = -1;
|
||||
flags |= MAP_ANONYMOUS;
|
||||
} else {
|
||||
guardfd = -1;
|
||||
flags |= MAP_NORESERVE;
|
||||
}
|
||||
#else
|
||||
guardfd = -1;
|
||||
pagesize = uc->qemu_real_host_page_size;
|
||||
flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
#endif
|
||||
|
||||
guardptr = mmap(0, total, PROT_NONE, flags, guardfd, 0);
|
||||
|
||||
if (guardptr == MAP_FAILED) {
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
||||
assert(is_power_of_2(align));
|
||||
/* Always align to host page size */
|
||||
assert(align >= pagesize);
|
||||
|
||||
flags = MAP_FIXED;
|
||||
flags |= MAP_ANONYMOUS;
|
||||
flags |= shared ? MAP_SHARED : MAP_PRIVATE;
|
||||
|
||||
offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
|
||||
|
||||
ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE,
|
||||
flags | map_sync_flags, -1, 0);
|
||||
|
||||
if (ptr == MAP_FAILED && map_sync_flags) {
|
||||
/*
|
||||
* if map failed with MAP_SHARED_VALIDATE | MAP_SYNC,
|
||||
* we will remove these flags to handle compatibility.
|
||||
*/
|
||||
ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE,
|
||||
flags, -1, 0);
|
||||
}
|
||||
|
||||
if (ptr == MAP_FAILED) {
|
||||
munmap(guardptr, total);
|
||||
return MAP_FAILED;
|
||||
}
|
||||
|
||||
if (offset > 0) {
|
||||
munmap(ptr - offset, offset);
|
||||
munmap(guardptr, offset);
|
||||
}
|
||||
if (total > size) {
|
||||
munmap(ptr + size, total - size);
|
||||
|
||||
/*
|
||||
* Leave a single PROT_NONE page allocated after the RAM block, to serve as
|
||||
* a guard page guarding against potential buffer overflows.
|
||||
*/
|
||||
total -= offset;
|
||||
if (total > size + pagesize) {
|
||||
munmap(ptr + size + pagesize, total - size - pagesize);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void qemu_vfree(void *ptr)
|
||||
static void qemu_ram_munmap(struct uc_struct *uc, void *ptr, size_t size)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
size_t pagesize;
|
||||
|
||||
void qemu_anon_ram_free(void *ptr, size_t size)
|
||||
{
|
||||
if (ptr) {
|
||||
munmap(ptr, size);
|
||||
/* Unmap both the RAM block and the guard page */
|
||||
#if defined(__powerpc64__) && defined(__linux__)
|
||||
pagesize = qemu_fd_getpagesize(uc);
|
||||
#else
|
||||
pagesize = uc->qemu_real_host_page_size;
|
||||
#endif
|
||||
munmap(ptr, size + pagesize);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -2,7 +2,7 @@
|
||||
* os-win32.c
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2010 Red Hat, Inc.
|
||||
* Copyright (c) 2010-2016 Red Hat, Inc.
|
||||
*
|
||||
* QEMU library functions for win32 which are shared between QEMU and
|
||||
* the QEMU tools.
|
||||
@ -25,21 +25,18 @@
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*
|
||||
* The implementation of g_poll (functions poll_rest, g_poll) at the end of
|
||||
* this file are based on code from GNOME glib-2 and use a different license,
|
||||
* see the license comment there.
|
||||
*/
|
||||
#include <winsock2.h>
|
||||
|
||||
#include <uc_priv.h>
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include <windows.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include "config-host.h"
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
/* this must come after including "trace.h" */
|
||||
/* The pragmas are to fix this issue: https://connect.microsoft.com/VisualStudio/feedback/details/976983 */
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4091)
|
||||
#include <shlobj.h>
|
||||
#pragma warning(pop)
|
||||
|
||||
void *qemu_oom_check(void *ptr)
|
||||
{
|
||||
if (ptr == NULL) {
|
||||
@ -57,7 +54,7 @@ void *qemu_try_memalign(size_t alignment, size_t size)
|
||||
abort();
|
||||
}
|
||||
ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
|
||||
// trace_qemu_memalign(alignment, size, ptr);
|
||||
//trace_qemu_memalign(alignment, size, ptr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
@ -65,36 +62,45 @@ void *qemu_memalign(size_t alignment, size_t size)
|
||||
{
|
||||
return qemu_oom_check(qemu_try_memalign(alignment, size));
|
||||
}
|
||||
|
||||
void *qemu_anon_ram_alloc(size_t size, uint64_t *align)
|
||||
|
||||
static int get_allocation_granularity(void)
|
||||
{
|
||||
SYSTEM_INFO system_info;
|
||||
|
||||
GetSystemInfo(&system_info);
|
||||
return system_info.dwAllocationGranularity;
|
||||
}
|
||||
|
||||
void *qemu_anon_ram_alloc(struct uc_struct *uc, size_t size, uint64_t *align)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
/* FIXME: this is not exactly optimal solution since VirtualAlloc
|
||||
has 64Kb granularity, but at least it guarantees us that the
|
||||
memory is page aligned. */
|
||||
ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
|
||||
// trace_qemu_anon_ram_alloc(size, ptr);
|
||||
|
||||
if (ptr && align) {
|
||||
*align = MAX(get_allocation_granularity(), getpagesize());
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void qemu_vfree(void *ptr)
|
||||
{
|
||||
// trace_qemu_vfree(ptr);
|
||||
//trace_qemu_vfree(ptr);
|
||||
if (ptr) {
|
||||
VirtualFree(ptr, 0, MEM_RELEASE);
|
||||
}
|
||||
}
|
||||
|
||||
void qemu_anon_ram_free(void *ptr, size_t size)
|
||||
void qemu_anon_ram_free(struct uc_struct *uc, void *ptr, size_t size)
|
||||
{
|
||||
// trace_qemu_anon_ram_free(ptr, size);
|
||||
//trace_qemu_anon_ram_free(ptr, size);
|
||||
if (ptr) {
|
||||
VirtualFree(ptr, 0, MEM_RELEASE);
|
||||
}
|
||||
}
|
||||
|
||||
size_t getpagesize(void)
|
||||
int getpagesize(void)
|
||||
{
|
||||
SYSTEM_INFO system_info;
|
||||
|
||||
|
16
qemu/util/pagesize.c
Normal file
16
qemu/util/pagesize.c
Normal file
@ -0,0 +1,16 @@
|
||||
/*
|
||||
* pagesize.c - query the host about its page size
|
||||
*
|
||||
* Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
|
||||
* License: GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include <uc_priv.h>
|
||||
|
||||
void init_real_host_page_size(struct uc_struct *uc)
|
||||
{
|
||||
uc->qemu_real_host_page_size = getpagesize();
|
||||
}
|
219
qemu/util/qdist.c
Normal file
219
qemu/util/qdist.c
Normal file
@ -0,0 +1,219 @@
|
||||
/*
|
||||
* qdist.c - QEMU helpers for handling frequency distributions of data.
|
||||
*
|
||||
* Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
|
||||
*
|
||||
* License: GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/qdist.h"
|
||||
|
||||
#include <math.h>
|
||||
#ifndef NAN
|
||||
#define NAN (0.0 / 0.0)
|
||||
#endif
|
||||
|
||||
#define QDIST_EMPTY_STR "(empty)"
|
||||
|
||||
void qdist_init(struct qdist *dist)
|
||||
{
|
||||
dist->entries = g_new(struct qdist_entry, 1);
|
||||
dist->size = 1;
|
||||
dist->n = 0;
|
||||
}
|
||||
|
||||
void qdist_destroy(struct qdist *dist)
|
||||
{
|
||||
g_free(dist->entries);
|
||||
}
|
||||
|
||||
static inline int qdist_cmp_double(double a, double b)
|
||||
{
|
||||
if (a > b) {
|
||||
return 1;
|
||||
} else if (a < b) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qdist_cmp(const void *ap, const void *bp)
|
||||
{
|
||||
const struct qdist_entry *a = ap;
|
||||
const struct qdist_entry *b = bp;
|
||||
|
||||
return qdist_cmp_double(a->x, b->x);
|
||||
}
|
||||
|
||||
void qdist_add(struct qdist *dist, double x, long count)
|
||||
{
|
||||
struct qdist_entry *entry = NULL;
|
||||
|
||||
if (dist->n) {
|
||||
struct qdist_entry e;
|
||||
|
||||
e.x = x;
|
||||
entry = bsearch(&e, dist->entries, dist->n, sizeof(e), qdist_cmp);
|
||||
}
|
||||
|
||||
if (entry) {
|
||||
entry->count += count;
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(dist->n == dist->size)) {
|
||||
dist->size *= 2;
|
||||
dist->entries = g_renew(struct qdist_entry, dist->entries, dist->size);
|
||||
}
|
||||
dist->n++;
|
||||
entry = &dist->entries[dist->n - 1];
|
||||
entry->x = x;
|
||||
entry->count = count;
|
||||
qsort(dist->entries, dist->n, sizeof(*entry), qdist_cmp);
|
||||
}
|
||||
|
||||
void qdist_inc(struct qdist *dist, double x)
|
||||
{
|
||||
qdist_add(dist, x, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bin the distribution in @from into @n bins of consecutive, non-overlapping
|
||||
* intervals, copying the result to @to.
|
||||
*
|
||||
* This function is internal to qdist: only this file and test code should
|
||||
* ever call it.
|
||||
*
|
||||
* Note: calling this function on an already-binned qdist is a bug.
|
||||
*
|
||||
* If @n == 0 or @from->n == 1, use @from->n.
|
||||
*/
|
||||
void qdist_bin__internal(struct qdist *to, const struct qdist *from, size_t n)
|
||||
{
|
||||
double xmin, xmax;
|
||||
double step;
|
||||
size_t i, j;
|
||||
|
||||
qdist_init(to);
|
||||
|
||||
if (from->n == 0) {
|
||||
return;
|
||||
}
|
||||
if (n == 0 || from->n == 1) {
|
||||
n = from->n;
|
||||
}
|
||||
|
||||
/* set equally-sized bins between @from's left and right */
|
||||
xmin = qdist_xmin(from);
|
||||
xmax = qdist_xmax(from);
|
||||
step = (xmax - xmin) / n;
|
||||
|
||||
if (n == from->n) {
|
||||
/* if @from's entries are equally spaced, no need to re-bin */
|
||||
for (i = 0; i < from->n; i++) {
|
||||
if (from->entries[i].x != xmin + i * step) {
|
||||
goto rebin;
|
||||
}
|
||||
}
|
||||
/* they're equally spaced, so copy the dist and bail out */
|
||||
to->entries = g_renew(struct qdist_entry, to->entries, n);
|
||||
to->n = from->n;
|
||||
memcpy(to->entries, from->entries, sizeof(*to->entries) * to->n);
|
||||
return;
|
||||
}
|
||||
|
||||
rebin:
|
||||
j = 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
double x;
|
||||
double left, right;
|
||||
|
||||
left = xmin + i * step;
|
||||
right = xmin + (i + 1) * step;
|
||||
|
||||
/* Add x, even if it might not get any counts later */
|
||||
x = left;
|
||||
qdist_add(to, x, 0);
|
||||
|
||||
/*
|
||||
* To avoid double-counting we capture [left, right) ranges, except for
|
||||
* the righmost bin, which captures a [left, right] range.
|
||||
*/
|
||||
while (j < from->n && (from->entries[j].x < right || i == n - 1)) {
|
||||
struct qdist_entry *o = &from->entries[j];
|
||||
|
||||
qdist_add(to, x, o->count);
|
||||
j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline double qdist_x(const struct qdist *dist, int index)
|
||||
{
|
||||
if (dist->n == 0) {
|
||||
return NAN;
|
||||
}
|
||||
return dist->entries[index].x;
|
||||
}
|
||||
|
||||
double qdist_xmin(const struct qdist *dist)
|
||||
{
|
||||
return qdist_x(dist, 0);
|
||||
}
|
||||
|
||||
double qdist_xmax(const struct qdist *dist)
|
||||
{
|
||||
return qdist_x(dist, dist->n - 1);
|
||||
}
|
||||
|
||||
size_t qdist_unique_entries(const struct qdist *dist)
|
||||
{
|
||||
return dist->n;
|
||||
}
|
||||
|
||||
unsigned long qdist_sample_count(const struct qdist *dist)
|
||||
{
|
||||
unsigned long count = 0;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < dist->n; i++) {
|
||||
struct qdist_entry *e = &dist->entries[i];
|
||||
|
||||
count += e->count;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static double qdist_pairwise_avg(const struct qdist *dist, size_t index,
|
||||
size_t n, unsigned long count)
|
||||
{
|
||||
/* amortize the recursion by using a base case > 2 */
|
||||
if (n <= 8) {
|
||||
size_t i;
|
||||
double ret = 0;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct qdist_entry *e = &dist->entries[index + i];
|
||||
|
||||
ret += e->x * e->count / count;
|
||||
}
|
||||
return ret;
|
||||
} else {
|
||||
size_t n2 = n / 2;
|
||||
|
||||
return qdist_pairwise_avg(dist, index, n2, count) +
|
||||
qdist_pairwise_avg(dist, index + n2, n - n2, count);
|
||||
}
|
||||
}
|
||||
|
||||
double qdist_avg(const struct qdist *dist)
|
||||
{
|
||||
unsigned long count;
|
||||
|
||||
count = qdist_sample_count(dist);
|
||||
if (!count) {
|
||||
return NAN;
|
||||
}
|
||||
return qdist_pairwise_avg(dist, 0, dist->n, count);
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Error reporting
|
||||
*
|
||||
* Copyright (C) 2010 Red Hat Inc.
|
||||
*
|
||||
* Authors:
|
||||
* Markus Armbruster <armbru@redhat.com>,
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
|
||||
static const char *progname;
|
||||
|
||||
/*
|
||||
* Set the program name for error_print_loc().
|
||||
*/
|
||||
void error_set_progname(const char *argv0)
|
||||
{
|
||||
const char *p = strrchr(argv0, '/');
|
||||
progname = p ? p + 1 : argv0;
|
||||
}
|
||||
|
||||
const char *error_get_progname(void)
|
||||
{
|
||||
return progname;
|
||||
}
|
||||
|
||||
/*
|
||||
* Print current location to current monitor if we have one, else to stderr.
|
||||
*/
|
||||
static void error_print_loc(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Print an error message to current monitor if we have one, else to stderr.
|
||||
* Format arguments like vsprintf(). The result should not contain
|
||||
* newlines.
|
||||
* Prepend the current location and append a newline.
|
||||
* It's wrong to call this in a QMP monitor. Use qerror_report() there.
|
||||
*/
|
||||
#ifdef _WIN32
|
||||
void error_vreport(const char *fmt, va_list ap)
|
||||
{
|
||||
error_print_loc();
|
||||
vfprintf(stderr, fmt, ap);
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
#else
|
||||
void error_vreport(const char *fmt, va_list ap)
|
||||
{
|
||||
GTimeVal tv;
|
||||
gchar *timestr;
|
||||
|
||||
error_print_loc();
|
||||
error_vprintf(fmt, ap);
|
||||
error_printf("\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Print an error message to current monitor if we have one, else to stderr.
|
||||
* Format arguments like sprintf(). The result should not contain
|
||||
* newlines.
|
||||
* Prepend the current location and append a newline.
|
||||
* It's wrong to call this in a QMP monitor. Use qerror_report() there.
|
||||
*/
|
||||
void error_report(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
error_vreport(fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
@ -12,18 +12,9 @@
|
||||
*/
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <time.h>
|
||||
#include <signal.h>
|
||||
#include "unicorn/platform.h"
|
||||
#include <string.h>
|
||||
#include <limits.h>
|
||||
#ifdef __linux__
|
||||
#include <sys/syscall.h>
|
||||
#include <linux/futex.h>
|
||||
#endif
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/atomic.h"
|
||||
|
||||
static void error_exit(int err, const char *msg)
|
||||
{
|
||||
@ -35,7 +26,9 @@ int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *nam
|
||||
void *(*start_routine)(void*),
|
||||
void *arg, int mode)
|
||||
{
|
||||
#ifndef __MINGW32__
|
||||
sigset_t set, oldset;
|
||||
#endif
|
||||
int err;
|
||||
pthread_attr_t attr;
|
||||
|
||||
@ -52,8 +45,9 @@ int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *nam
|
||||
}
|
||||
}
|
||||
|
||||
/* Leave signal handling to the iothread. */
|
||||
#ifndef __MINGW32__
|
||||
sigfillset(&set);
|
||||
#endif
|
||||
pthread_sigmask(SIG_SETMASK, &set, &oldset);
|
||||
err = pthread_create(&thread->thread, &attr, start_routine, arg);
|
||||
if (err) {
|
||||
|
@ -21,23 +21,38 @@
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/timer.h"
|
||||
|
||||
/***********************************************************/
|
||||
/* real time host monotonic timer */
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
int64_t clock_freq;
|
||||
|
||||
INITIALIZER(init_get_clock)
|
||||
void init_get_clock(void)
|
||||
{
|
||||
LARGE_INTEGER freq;
|
||||
int ret;
|
||||
ret = QueryPerformanceFrequency(&freq);
|
||||
int ret = QueryPerformanceFrequency(&freq);
|
||||
if (ret == 0) {
|
||||
fprintf(stderr, "Could not calibrate ticks\n");
|
||||
exit(1);
|
||||
}
|
||||
clock_freq = freq.QuadPart;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int use_rt_clock;
|
||||
|
||||
void init_get_clock(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
use_rt_clock = 0;
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
|
||||
use_rt_clock = 1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
34
qemu/util/qemu-timer.c
Normal file
34
qemu/util/qemu-timer.c
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* QEMU System Emulator
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/timer.h"
|
||||
#include "uc_priv.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/queue.h"
|
||||
|
||||
int64_t qemu_clock_get_ns(QEMUClockType type)
|
||||
{
|
||||
return get_clock();
|
||||
}
|
761
qemu/util/qht.c
Normal file
761
qemu/util/qht.c
Normal file
@ -0,0 +1,761 @@
|
||||
/*
|
||||
* qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
|
||||
*
|
||||
* Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
|
||||
*
|
||||
* License: GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
* Assumptions:
|
||||
* - NULL cannot be inserted/removed as a pointer value.
|
||||
* - Trying to insert an already-existing hash-pointer pair is OK. However,
|
||||
* it is not OK to insert into the same hash table different hash-pointer
|
||||
* pairs that have the same pointer value, but not the hashes.
|
||||
* - Lookups are performed under an RCU read-critical section; removals
|
||||
* must wait for a grace period to elapse before freeing removed objects.
|
||||
*
|
||||
* Features:
|
||||
* - Reads (i.e. lookups and iterators) can be concurrent with other reads.
|
||||
* Lookups that are concurrent with writes to the same bucket will retry
|
||||
* via a seqlock; iterators acquire all bucket locks and therefore can be
|
||||
* concurrent with lookups and are serialized wrt writers.
|
||||
* - Writes (i.e. insertions/removals) can be concurrent with writes to
|
||||
* different buckets; writes to the same bucket are serialized through a lock.
|
||||
* - Optional auto-resizing: the hash table resizes up if the load surpasses
|
||||
* a certain threshold. Resizing is done concurrently with readers; writes
|
||||
* are serialized with the resize operation.
|
||||
*
|
||||
* The key structure is the bucket, which is cacheline-sized. Buckets
|
||||
* contain a few hash values and pointers; the u32 hash values are stored in
|
||||
* full so that resizing is fast. Having this structure instead of directly
|
||||
* chaining items has two advantages:
|
||||
* - Failed lookups fail fast, and touch a minimum number of cache lines.
|
||||
* - Resizing the hash table with concurrent lookups is easy.
|
||||
*
|
||||
* There are two types of buckets:
|
||||
* 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
|
||||
* 2. all "non-head" buckets (i.e. all others) are members of a chain that
|
||||
* starts from a head bucket.
|
||||
* Note that the seqlock and spinlock of a head bucket applies to all buckets
|
||||
* chained to it; these two fields are unused in non-head buckets.
|
||||
*
|
||||
* On removals, we move the last valid item in the chain to the position of the
|
||||
* just-removed entry. This makes lookups slightly faster, since the moment an
|
||||
* invalid entry is found, the (failed) lookup is over.
|
||||
*
|
||||
* Resizing is done by taking all bucket spinlocks (so that no other writers can
|
||||
* race with us) and then copying all entries into a new hash map. Then, the
|
||||
* ht->map pointer is set, and the old map is freed once no RCU readers can see
|
||||
* it anymore.
|
||||
*
|
||||
* Writers check for concurrent resizes by comparing ht->map before and after
|
||||
* acquiring their bucket lock. If they don't match, a resize has occured
|
||||
* while the bucket spinlock was being acquired.
|
||||
*
|
||||
* Related Work:
|
||||
* - Idea of cacheline-sized buckets with full hashes taken from:
|
||||
* David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
|
||||
* The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
|
||||
* - Why not RCU-based hash tables? They would allow us to get rid of the
|
||||
* seqlock, but resizing would take forever since RCU read critical
|
||||
* sections in QEMU take quite a long time.
|
||||
* More info on relativistic hash tables:
|
||||
* + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
|
||||
* Tables via Relativistic Programming", USENIX ATC'11.
|
||||
* + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
|
||||
* https://lwn.net/Articles/612021/
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/qht.h"
|
||||
#include "qemu/atomic.h"
|
||||
//#include "qemu/rcu.h"
|
||||
|
||||
//#define QHT_DEBUG
|
||||
|
||||
/*
|
||||
* We want to avoid false sharing of cache lines. Most systems have 64-byte
|
||||
* cache lines so we go with it for simplicity.
|
||||
*
|
||||
* Note that systems with smaller cache lines will be fine (the struct is
|
||||
* almost 64-bytes); systems with larger cache lines might suffer from
|
||||
* some false sharing.
|
||||
*/
|
||||
#define QHT_BUCKET_ALIGN 64
|
||||
|
||||
/* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
|
||||
#if HOST_LONG_BITS == 32
|
||||
#define QHT_BUCKET_ENTRIES 6
|
||||
#else /* 64-bit */
|
||||
#define QHT_BUCKET_ENTRIES 4
|
||||
#endif
|
||||
|
||||
enum qht_iter_type {
|
||||
QHT_ITER_VOID, /* do nothing; use retvoid */
|
||||
QHT_ITER_RM, /* remove element if retbool returns true */
|
||||
};
|
||||
|
||||
struct qht_iter {
|
||||
union {
|
||||
qht_iter_func_t retvoid;
|
||||
qht_iter_bool_func_t retbool;
|
||||
} f;
|
||||
enum qht_iter_type type;
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: reading partially-updated pointers in @pointers could lead to
|
||||
* segfaults. We thus access them with atomic_read/set; this guarantees
|
||||
* that the compiler makes all those accesses atomic. We also need the
|
||||
* volatile-like behavior in atomic_read, since otherwise the compiler
|
||||
* might refetch the pointer.
|
||||
* atomic_read's are of course not necessary when the bucket lock is held.
|
||||
*
|
||||
* If both ht->lock and b->lock are grabbed, ht->lock should always
|
||||
* be grabbed first.
|
||||
*/
|
||||
struct qht_bucket {
|
||||
uint32_t hashes[QHT_BUCKET_ENTRIES];
|
||||
void *pointers[QHT_BUCKET_ENTRIES];
|
||||
struct qht_bucket *next;
|
||||
} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
|
||||
|
||||
QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
|
||||
|
||||
/**
|
||||
* struct qht_map - structure to track an array of buckets
|
||||
* @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
|
||||
* find the whole struct.
|
||||
* @buckets: array of head buckets. It is constant once the map is created.
|
||||
* @n_buckets: number of head buckets. It is constant once the map is created.
|
||||
* @n_added_buckets: number of added (i.e. "non-head") buckets
|
||||
* @n_added_buckets_threshold: threshold to trigger an upward resize once the
|
||||
* number of added buckets surpasses it.
|
||||
*
|
||||
* Buckets are tracked in what we call a "map", i.e. this structure.
|
||||
*/
|
||||
struct qht_map {
|
||||
struct qht_bucket *buckets;
|
||||
size_t n_buckets;
|
||||
size_t n_added_buckets;
|
||||
size_t n_added_buckets_threshold;
|
||||
};
|
||||
|
||||
/* trigger a resize when n_added_buckets > n_buckets / div */
|
||||
#define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
|
||||
|
||||
static void qht_do_resize_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new,
|
||||
bool reset);
|
||||
static void qht_grow_maybe(struct uc_struct *uc, struct qht *ht);
|
||||
|
||||
#define qht_debug_assert(X) do { (void)(X); } while (0)
|
||||
|
||||
static inline size_t qht_elems_to_buckets(size_t n_elems)
|
||||
{
|
||||
return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
|
||||
}
|
||||
|
||||
static inline void qht_head_init(struct qht_bucket *b)
|
||||
{
|
||||
memset(b, 0, sizeof(*b));
|
||||
}
|
||||
|
||||
static inline
|
||||
struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
|
||||
{
|
||||
return &map->buckets[hash & (map->n_buckets - 1)];
|
||||
}
|
||||
|
||||
/*
|
||||
* Grab all bucket locks, and set @pmap after making sure the map isn't stale.
|
||||
*
|
||||
* Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
|
||||
*
|
||||
* Note: callers cannot have ht->lock held.
|
||||
*/
|
||||
static inline
|
||||
void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
|
||||
{
|
||||
struct qht_map *map;
|
||||
map = ht->map;
|
||||
*pmap = map;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a head bucket and lock it, making sure its parent map is not stale.
|
||||
* @pmap is filled with a pointer to the bucket's parent map.
|
||||
*/
|
||||
static inline
|
||||
struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
|
||||
struct qht_map **pmap)
|
||||
{
|
||||
struct qht_bucket *b;
|
||||
struct qht_map *map;
|
||||
|
||||
map = ht->map;
|
||||
b = qht_map_to_bucket(map, hash);
|
||||
*pmap = map;
|
||||
return b;
|
||||
}
|
||||
|
||||
static inline bool qht_map_needs_resize(const struct qht_map *map)
|
||||
{
|
||||
return map->n_added_buckets > map->n_added_buckets_threshold;
|
||||
}
|
||||
|
||||
static inline void qht_chain_destroy(const struct qht_bucket *head)
|
||||
{
|
||||
struct qht_bucket *curr = head->next;
|
||||
struct qht_bucket *prev;
|
||||
|
||||
while (curr) {
|
||||
prev = curr;
|
||||
curr = curr->next;
|
||||
qemu_vfree(prev);
|
||||
}
|
||||
}
|
||||
|
||||
/* pass only an orphan map */
|
||||
static void qht_map_destroy(struct qht_map *map)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < map->n_buckets; i++) {
|
||||
qht_chain_destroy(&map->buckets[i]);
|
||||
}
|
||||
qemu_vfree(map->buckets);
|
||||
g_free(map);
|
||||
}
|
||||
|
||||
static struct qht_map *qht_map_create(size_t n_buckets)
|
||||
{
|
||||
struct qht_map *map;
|
||||
size_t i;
|
||||
|
||||
map = g_malloc(sizeof(*map));
|
||||
map->n_buckets = n_buckets;
|
||||
|
||||
map->n_added_buckets = 0;
|
||||
map->n_added_buckets_threshold = n_buckets /
|
||||
QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
|
||||
|
||||
/* let tiny hash tables to at least add one non-head bucket */
|
||||
if (unlikely(map->n_added_buckets_threshold == 0)) {
|
||||
map->n_added_buckets_threshold = 1;
|
||||
}
|
||||
|
||||
map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
|
||||
sizeof(*map->buckets) * n_buckets);
|
||||
for (i = 0; i < n_buckets; i++) {
|
||||
qht_head_init(&map->buckets[i]);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
|
||||
unsigned int mode)
|
||||
{
|
||||
struct qht_map *map;
|
||||
size_t n_buckets = qht_elems_to_buckets(n_elems);
|
||||
|
||||
g_assert(cmp);
|
||||
ht->cmp = cmp;
|
||||
ht->mode = mode;
|
||||
map = qht_map_create(n_buckets);
|
||||
ht->map = map;
|
||||
}
|
||||
|
||||
/* call only when there are no readers/writers left */
|
||||
void qht_destroy(struct qht *ht)
|
||||
{
|
||||
qht_map_destroy(ht->map);
|
||||
memset(ht, 0, sizeof(*ht));
|
||||
}
|
||||
|
||||
static void qht_bucket_reset__locked(struct qht_bucket *head)
|
||||
{
|
||||
struct qht_bucket *b = head;
|
||||
int i;
|
||||
|
||||
do {
|
||||
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
|
||||
if (b->pointers[i] == NULL) {
|
||||
goto done;
|
||||
}
|
||||
b->hashes[i] = 0;
|
||||
b->pointers[i] = NULL;
|
||||
}
|
||||
b = b->next;
|
||||
} while (b);
|
||||
done:
|
||||
return;
|
||||
}
|
||||
|
||||
/* call with all bucket locks held */
|
||||
static void qht_map_reset__all_locked(struct qht_map *map)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < map->n_buckets; i++) {
|
||||
qht_bucket_reset__locked(&map->buckets[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void qht_reset(struct qht *ht)
|
||||
{
|
||||
struct qht_map *map;
|
||||
|
||||
qht_map_lock_buckets__no_stale(ht, &map);
|
||||
qht_map_reset__all_locked(map);
|
||||
}
|
||||
|
||||
static inline void qht_do_resize(struct uc_struct *uc, struct qht *ht, struct qht_map *new)
|
||||
{
|
||||
qht_do_resize_reset(uc, ht, new, false);
|
||||
}
|
||||
|
||||
static inline void qht_do_resize_and_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new)
|
||||
{
|
||||
qht_do_resize_reset(uc, ht, new, true);
|
||||
}
|
||||
|
||||
bool qht_reset_size(struct uc_struct *uc, struct qht *ht, size_t n_elems)
|
||||
{
|
||||
struct qht_map *new = NULL;
|
||||
struct qht_map *map;
|
||||
size_t n_buckets;
|
||||
|
||||
n_buckets = qht_elems_to_buckets(n_elems);
|
||||
|
||||
map = ht->map;
|
||||
if (n_buckets != map->n_buckets) {
|
||||
new = qht_map_create(n_buckets);
|
||||
}
|
||||
qht_do_resize_and_reset(uc, ht, new);
|
||||
|
||||
return !!new;
|
||||
}
|
||||
|
||||
static inline
|
||||
void *qht_do_lookup(struct uc_struct *uc, const struct qht_bucket *head, qht_lookup_func_t func,
|
||||
const void *userp, uint32_t hash)
|
||||
{
|
||||
const struct qht_bucket *b = head;
|
||||
int i;
|
||||
|
||||
do {
|
||||
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
|
||||
if (b->hashes[i] == hash) {
|
||||
void *p = b->pointers[i];
|
||||
|
||||
if (likely(p) && likely(func(uc, p, userp))) {
|
||||
return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
b = b->next;
|
||||
} while (b);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *qht_lookup_custom(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash,
|
||||
qht_lookup_func_t func)
|
||||
{
|
||||
const struct qht_bucket *b;
|
||||
const struct qht_map *map;
|
||||
void *ret;
|
||||
|
||||
map = ht->map;
|
||||
b = qht_map_to_bucket(map, hash);
|
||||
|
||||
ret = qht_do_lookup(uc, b, func, userp, hash);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *qht_lookup(struct uc_struct *uc, const struct qht *ht, const void *userp, uint32_t hash)
|
||||
{
|
||||
return qht_lookup_custom(uc, ht, userp, hash, ht->cmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* call with head->lock held
|
||||
* @ht is const since it is only used for ht->cmp()
|
||||
*/
|
||||
static void *qht_insert__locked(struct uc_struct *uc, const struct qht *ht, struct qht_map *map,
|
||||
struct qht_bucket *head, void *p, uint32_t hash,
|
||||
bool *needs_resize)
|
||||
{
|
||||
struct qht_bucket *b = head;
|
||||
struct qht_bucket *prev = NULL;
|
||||
struct qht_bucket *new = NULL;
|
||||
int i;
|
||||
|
||||
do {
|
||||
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
|
||||
if (b->pointers[i]) {
|
||||
if (unlikely(b->hashes[i] == hash &&
|
||||
ht->cmp(uc, b->pointers[i], p))) {
|
||||
return b->pointers[i];
|
||||
}
|
||||
} else {
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
prev = b;
|
||||
b = b->next;
|
||||
} while (b);
|
||||
|
||||
b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
|
||||
memset(b, 0, sizeof(*b));
|
||||
new = b;
|
||||
i = 0;
|
||||
map->n_added_buckets++;
|
||||
if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
|
||||
*needs_resize = true;
|
||||
}
|
||||
|
||||
found:
|
||||
/* found an empty key: acquire the seqlock and write */
|
||||
if (new) {
|
||||
prev->next = b;
|
||||
}
|
||||
b->hashes[i] = hash;
|
||||
b->pointers[i] = p;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
static void qht_grow_maybe(struct uc_struct *uc, struct qht *ht)
|
||||
#else
|
||||
static __attribute__((noinline)) void qht_grow_maybe(struct uc_struct *uc, struct qht *ht)
|
||||
#endif
|
||||
{
|
||||
struct qht_map *map;
|
||||
|
||||
map = ht->map;
|
||||
/* another thread might have just performed the resize we were after */
|
||||
if (qht_map_needs_resize(map)) {
|
||||
struct qht_map *new = qht_map_create(map->n_buckets * 2);
|
||||
|
||||
qht_do_resize(uc, ht, new);
|
||||
}
|
||||
}
|
||||
|
||||
bool qht_insert(struct uc_struct *uc, struct qht *ht, void *p, uint32_t hash, void **existing)
|
||||
{
|
||||
struct qht_bucket *b;
|
||||
struct qht_map *map;
|
||||
bool needs_resize = false;
|
||||
void *prev;
|
||||
|
||||
/* NULL pointers are not supported */
|
||||
qht_debug_assert(p);
|
||||
|
||||
b = qht_bucket_lock__no_stale(ht, hash, &map);
|
||||
prev = qht_insert__locked(uc, ht, map, b, p, hash, &needs_resize);
|
||||
|
||||
if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
|
||||
qht_grow_maybe(uc, ht);
|
||||
}
|
||||
if (likely(prev == NULL)) {
|
||||
return true;
|
||||
}
|
||||
if (existing) {
|
||||
*existing = prev;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
|
||||
{
|
||||
if (pos == QHT_BUCKET_ENTRIES - 1) {
|
||||
if (b->next == NULL) {
|
||||
return true;
|
||||
}
|
||||
return b->next->pointers[0] == NULL;
|
||||
}
|
||||
return b->pointers[pos + 1] == NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
|
||||
{
|
||||
qht_debug_assert(!(to == from && i == j));
|
||||
qht_debug_assert(to->pointers[i]);
|
||||
qht_debug_assert(from->pointers[j]);
|
||||
|
||||
to->hashes[i] = from->hashes[j];
|
||||
to->pointers[i] = from->pointers[j];
|
||||
from->hashes[j] = 0;
|
||||
from->pointers[j] = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the last valid entry in @orig, and swap it with @orig[pos], which has
|
||||
* just been invalidated.
|
||||
*/
|
||||
static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
|
||||
{
|
||||
struct qht_bucket *b = orig;
|
||||
struct qht_bucket *prev = NULL;
|
||||
int i;
|
||||
|
||||
if (qht_entry_is_last(orig, pos)) {
|
||||
orig->hashes[pos] = 0;
|
||||
orig->pointers[pos] = NULL;
|
||||
return;
|
||||
}
|
||||
do {
|
||||
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
|
||||
if (b->pointers[i]) {
|
||||
continue;
|
||||
}
|
||||
if (i > 0) {
|
||||
qht_entry_move(orig, pos, b, i - 1);
|
||||
return;
|
||||
}
|
||||
qht_debug_assert(prev);
|
||||
qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
|
||||
return;
|
||||
}
|
||||
prev = b;
|
||||
b = b->next;
|
||||
} while (b);
|
||||
/* no free entries other than orig[pos], so swap it with the last one */
|
||||
qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
|
||||
}
|
||||
|
||||
/* call with b->lock held */
|
||||
static inline
|
||||
bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash)
|
||||
{
|
||||
struct qht_bucket *b = head;
|
||||
int i;
|
||||
|
||||
do {
|
||||
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
|
||||
void *q = b->pointers[i];
|
||||
|
||||
if (unlikely(q == NULL)) {
|
||||
return false;
|
||||
}
|
||||
if (q == p) {
|
||||
qht_debug_assert(b->hashes[i] == hash);
|
||||
qht_bucket_remove_entry(b, i);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
b = b->next;
|
||||
} while (b);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
|
||||
{
|
||||
struct qht_bucket *b;
|
||||
struct qht_map *map;
|
||||
bool ret;
|
||||
|
||||
/* NULL pointers are not supported */
|
||||
qht_debug_assert(p);
|
||||
|
||||
b = qht_bucket_lock__no_stale(ht, hash, &map);
|
||||
ret = qht_remove__locked(b, p, hash);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void qht_bucket_iter(struct uc_struct *uc, struct qht_bucket *head,
|
||||
const struct qht_iter *iter, void *userp)
|
||||
{
|
||||
struct qht_bucket *b = head;
|
||||
int i;
|
||||
|
||||
do {
|
||||
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
|
||||
if (b->pointers[i] == NULL) {
|
||||
return;
|
||||
}
|
||||
switch (iter->type) {
|
||||
case QHT_ITER_VOID:
|
||||
iter->f.retvoid(uc, b->pointers[i], b->hashes[i], userp);
|
||||
break;
|
||||
case QHT_ITER_RM:
|
||||
if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
|
||||
/* replace i with the last valid element in the bucket */
|
||||
qht_bucket_remove_entry(b, i);
|
||||
/* reevaluate i, since it just got replaced */
|
||||
i--;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
b = b->next;
|
||||
} while (b);
|
||||
}
|
||||
|
||||
/* call with all of the map's locks held */
|
||||
static inline void qht_map_iter__all_locked(struct uc_struct *uc, struct qht_map *map,
|
||||
const struct qht_iter *iter,
|
||||
void *userp)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < map->n_buckets; i++) {
|
||||
qht_bucket_iter(uc, &map->buckets[i], iter, userp);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
do_qht_iter(struct uc_struct *uc, struct qht *ht, const struct qht_iter *iter, void *userp)
|
||||
{
|
||||
struct qht_map *map;
|
||||
|
||||
map = ht->map;
|
||||
qht_map_iter__all_locked(uc, map, iter, userp);
|
||||
}
|
||||
|
||||
void qht_iter(struct uc_struct *uc, struct qht *ht, qht_iter_func_t func, void *userp)
|
||||
{
|
||||
const struct qht_iter iter = {
|
||||
.f.retvoid = func,
|
||||
.type = QHT_ITER_VOID,
|
||||
};
|
||||
|
||||
do_qht_iter(uc, ht, &iter, userp);
|
||||
}
|
||||
|
||||
void qht_iter_remove(struct uc_struct *uc, struct qht *ht, qht_iter_bool_func_t func, void *userp)
|
||||
{
|
||||
const struct qht_iter iter = {
|
||||
.f.retbool = func,
|
||||
.type = QHT_ITER_RM,
|
||||
};
|
||||
|
||||
do_qht_iter(uc, ht, &iter, userp);
|
||||
}
|
||||
|
||||
struct qht_map_copy_data {
|
||||
struct qht *ht;
|
||||
struct qht_map *new;
|
||||
};
|
||||
|
||||
static void qht_map_copy(struct uc_struct *uc, void *p, uint32_t hash, void *userp)
|
||||
{
|
||||
struct qht_map_copy_data *data = userp;
|
||||
struct qht *ht = data->ht;
|
||||
struct qht_map *new = data->new;
|
||||
struct qht_bucket *b = qht_map_to_bucket(new, hash);
|
||||
|
||||
/* no need to acquire b->lock because no thread has seen this map yet */
|
||||
qht_insert__locked(uc, ht, new, b, p, hash, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically perform a resize and/or reset.
|
||||
* Call with ht->lock held.
|
||||
*/
|
||||
static void qht_do_resize_reset(struct uc_struct *uc, struct qht *ht, struct qht_map *new, bool reset)
|
||||
{
|
||||
struct qht_map *old;
|
||||
const struct qht_iter iter = {
|
||||
.f.retvoid = qht_map_copy,
|
||||
.type = QHT_ITER_VOID,
|
||||
};
|
||||
struct qht_map_copy_data data;
|
||||
|
||||
old = ht->map;
|
||||
|
||||
if (reset) {
|
||||
qht_map_reset__all_locked(old);
|
||||
}
|
||||
|
||||
if (new == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
g_assert(new->n_buckets != old->n_buckets);
|
||||
data.ht = ht;
|
||||
data.new = new;
|
||||
qht_map_iter__all_locked(uc, old, &iter, &data);
|
||||
|
||||
ht->map = new;
|
||||
qht_map_destroy(old);
|
||||
}
|
||||
|
||||
bool qht_resize(struct uc_struct *uc, struct qht *ht, size_t n_elems)
|
||||
{
|
||||
size_t n_buckets = qht_elems_to_buckets(n_elems);
|
||||
size_t ret = false;
|
||||
|
||||
if (n_buckets != ht->map->n_buckets) {
|
||||
struct qht_map *new;
|
||||
|
||||
new = qht_map_create(n_buckets);
|
||||
qht_do_resize(uc, ht, new);
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pass @stats to qht_statistics_destroy() when done */
|
||||
void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
|
||||
{
|
||||
const struct qht_map *map;
|
||||
int i;
|
||||
|
||||
map = ht->map;
|
||||
|
||||
stats->used_head_buckets = 0;
|
||||
stats->entries = 0;
|
||||
qdist_init(&stats->chain);
|
||||
qdist_init(&stats->occupancy);
|
||||
/* bail out if the qht has not yet been initialized */
|
||||
if (unlikely(map == NULL)) {
|
||||
stats->head_buckets = 0;
|
||||
return;
|
||||
}
|
||||
stats->head_buckets = map->n_buckets;
|
||||
|
||||
for (i = 0; i < map->n_buckets; i++) {
|
||||
const struct qht_bucket *head = &map->buckets[i];
|
||||
const struct qht_bucket *b;
|
||||
size_t buckets;
|
||||
size_t entries;
|
||||
int j;
|
||||
|
||||
buckets = 0;
|
||||
entries = 0;
|
||||
b = head;
|
||||
do {
|
||||
for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
|
||||
if (b->pointers[j] == NULL) {
|
||||
break;
|
||||
}
|
||||
entries++;
|
||||
}
|
||||
buckets++;
|
||||
b = b->next;
|
||||
} while (b);
|
||||
|
||||
if (entries) {
|
||||
qdist_inc(&stats->chain, buckets);
|
||||
qdist_inc(&stats->occupancy,
|
||||
(double)entries / QHT_BUCKET_ENTRIES / buckets);
|
||||
stats->used_head_buckets++;
|
||||
stats->entries += entries;
|
||||
} else {
|
||||
qdist_inc(&stats->occupancy, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void qht_statistics_destroy(struct qht_stats *stats)
|
||||
{
|
||||
qdist_destroy(&stats->occupancy);
|
||||
qdist_destroy(&stats->chain);
|
||||
}
|
78
qemu/util/range.c
Normal file
78
qemu/util/range.c
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* QEMU 64-bit address ranges
|
||||
*
|
||||
* Copyright (c) 2015-2016 Red Hat, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/range.h"
|
||||
|
||||
/*
|
||||
* Return -1 if @a < @b, 1 @a > @b, and 0 if they touch or overlap.
|
||||
* Both @a and @b must not be empty.
|
||||
*/
|
||||
static inline int range_compare(Range *a, Range *b)
|
||||
{
|
||||
assert(!range_is_empty(a) && !range_is_empty(b));
|
||||
|
||||
/* Careful, avoid wraparound */
|
||||
if (b->lob && b->lob - 1 > a->upb) {
|
||||
return -1;
|
||||
}
|
||||
if (a->lob && a->lob - 1 > b->upb) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Insert @data into @list of ranges; caller no longer owns @data */
|
||||
GList *range_list_insert(GList *list, Range *data)
|
||||
{
|
||||
GList *l;
|
||||
|
||||
assert(!range_is_empty(data));
|
||||
|
||||
/* Skip all list elements strictly less than data */
|
||||
for (l = list; l && range_compare(l->data, data) < 0; l = l->next) {
|
||||
}
|
||||
|
||||
if (!l || range_compare(l->data, data) > 0) {
|
||||
/* Rest of the list (if any) is strictly greater than @data */
|
||||
return g_list_insert_before(list, l, data);
|
||||
}
|
||||
|
||||
/* Current list element overlaps @data, merge the two */
|
||||
range_extend(l->data, data);
|
||||
g_free(data);
|
||||
|
||||
/* Merge any subsequent list elements that now also overlap */
|
||||
while (l->next && range_compare(l->data, l->next->data) == 0) {
|
||||
#ifndef NDEBUG
|
||||
GList *new_l;
|
||||
#endif
|
||||
|
||||
range_extend(l->data, l->next->data);
|
||||
g_free(l->next->data);
|
||||
#ifndef NDEBUG
|
||||
new_l = g_list_delete_link(list, l->next);
|
||||
#else
|
||||
g_list_delete_link(list, l->next);
|
||||
#endif
|
||||
assert(new_l == list);
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
Reference in New Issue
Block a user