From 97b92d8861deccc22428962dd40191248d26860e Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Mon, 6 Dec 2021 04:19:37 +0800 Subject: [PATCH 01/38] initial systemz support --- CMakeLists.txt | 72 ++++++++++++++++++++++++++++++++++++-- README.md | 2 +- include/uc_priv.h | 1 + include/unicorn/unicorn.h | 2 ++ qemu/configure | 2 +- qemu/include/qemu-common.h | 2 ++ symbols.sh | 2 +- uc.c | 25 +++++++++++++ 8 files changed, 103 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b561cf5c..38e7b535 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,7 +17,8 @@ option(UNICORN_BUILD_SHARED "Build shared instead of static library" ON) if (NOT UNICORN_ARCH) # build all architectures - set(UNICORN_ARCH "x86 arm aarch64 riscv mips sparc m68k ppc") + # set(UNICORN_ARCH "x86 arm aarch64 riscv mips sparc m68k ppc s390x") + set(UNICORN_ARCH "s390x") endif() string(TOUPPER ${UNICORN_ARCH} UNICORN_ARCH) @@ -197,6 +198,9 @@ else() if (UNICORN_HAS_RISCV) set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_RISCV ") endif() + if (UNICORN_HAS_S390X) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_S390X ") + endif() set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-fPIC") if(ANDROID_ABI) @@ -232,6 +236,9 @@ else() if (UNICORN_HAS_RISCV) set (TARGET_LIST "${TARGET_LIST}riscv32-softmmu, riscv64-softmmu, ") endif() + if (UNICORN_HAS_S390X) + set (TARGET_LIST "${TARGET_LIST}s390x-softmmu, ") + endif() set (TARGET_LIST "${TARGET_LIST} ") # GEN config-host.mak & target directories @@ -325,6 +332,12 @@ else() OUTPUT_FILE ${CMAKE_BINARY_DIR}/riscv64-softmmu/config-target.h ) endif() + if (UNICORN_HAS_S390X) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/s390x-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/s390x-softmmu/config-target.h + ) + endif() add_compile_options( ${UNICORN_CFLAGS} -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/tcg/${UNICORN_TARGET_ARCH} @@ -950,6 +963,54 @@ else() endif() endif() +if (UNICORN_HAS_S390X) +add_library(s390x-softmmu + ${UNICORN_ARCH_COMMON} + + qemu/hw/s390x/s390-skeys.c + + qemu/target/s390x/cc_helper.c + qemu/target/s390x/cpu.c + qemu/target/s390x/cpu_features.c + qemu/target/s390x/cpu_models.c + qemu/target/s390x/crypto_helper.c + qemu/target/s390x/excp_helper.c + qemu/target/s390x/fpu_helper.c + qemu/target/s390x/helper.c + qemu/target/s390x/interrupt.c + qemu/target/s390x/int_helper.c + qemu/target/s390x/ioinst.c + qemu/target/s390x/mem_helper.c + qemu/target/s390x/misc_helper.c + qemu/target/s390x/mmu_helper.c + qemu/target/s390x/sigp.c + qemu/target/s390x/tcg-stub.c + qemu/target/s390x/translate.c + # qemu/target/s390x/translate_vx.inc.c + qemu/target/s390x/vec_fpu_helper.c + qemu/target/s390x/vec_helper.c + qemu/target/s390x/vec_int_helper.c + qemu/target/s390x/vec_string_helper.c + qemu/target/s390x/unicorn.c +) + +if(MSVC) + target_compile_options(s390x-softmmu PRIVATE + -DNEED_CPU_H + /FIs390x.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/s390x-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/s390x + ) +else() + target_compile_options(s390x-softmmu PRIVATE + -DNEED_CPU_H + -include s390x.h + -I${CMAKE_BINARY_DIR}/s390x-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/s390x + ) +endif() +endif() + set(UNICORN_SRCS uc.c @@ -1095,6 +1156,13 @@ if (UNICORN_HAS_RISCV) target_link_libraries(riscv64-softmmu unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_riscv) endif() +if (UNICORN_HAS_S390X) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_S390X) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} s390x-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_s390x) + target_link_libraries(s390x-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_s390x) +endif() target_compile_options(unicorn PRIVATE ${UNICORN_COMPILE_OPTIONS} @@ -1146,7 +1214,7 @@ endif() if(UNICORN_FUZZ) - set(UNICORN_FUZZ_SUFFIX "arm_arm;arm_armbe;arm_thumb;arm64_arm;arm64_armbe;m68k_be;mips_32be;mips_32le;sparc_32be;x86_16;x86_32;x86_64") + set(UNICORN_FUZZ_SUFFIX "arm_arm;arm_armbe;arm_thumb;arm64_arm;arm64_armbe;m68k_be;mips_32be;mips_32le;sparc_32be;x86_16;x86_32;x86_64;s390x") set(SAMPLES_LIB ${SAMPLES_LIB} rt) foreach(SUFFIX ${UNICORN_FUZZ_SUFFIX}) add_executable(fuzz_emu_${SUFFIX} diff --git a/README.md b/README.md index 19a71e76..27124a34 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Unicorn is a lightweight, multi-platform, multi-architecture CPU emulator framew Unicorn offers some unparalleled features: -- Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, PowerPC, RISCV, SPARC, and X86 (16, 32, 64-bit) +- Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, PowerPC, RISCV, SPARC, S390X and X86 (16, 32, 64-bit) - Clean/simple/lightweight/intuitive architecture-neutral API - Implemented in pure C language, with bindings for Crystal, Clojure, Visual Basic, Perl, Rust, Ruby, Python, Java, .NET, Go, Delphi/Free Pascal, Haskell, Pharo, and Lua. - Native support for Windows & *nix (with Mac OSX, Linux, Android, *BSD & Solaris confirmed) diff --git a/include/uc_priv.h b/include/uc_priv.h index 8c564396..4866e6b3 100644 --- a/include/uc_priv.h +++ b/include/uc_priv.h @@ -23,6 +23,7 @@ #define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN) #define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN) #define UC_MODE_RISCV_MASK (UC_MODE_RISCV32|UC_MODE_RISCV64|UC_MODE_LITTLE_ENDIAN) +#define UC_MODE_S390X_MASK (UC_MODE_BIG_ENDIAN) #define ARR_SIZE(a) (sizeof(a)/sizeof(a[0])) diff --git a/include/unicorn/unicorn.h b/include/unicorn/unicorn.h index d7780e56..940f3081 100644 --- a/include/unicorn/unicorn.h +++ b/include/unicorn/unicorn.h @@ -34,6 +34,7 @@ typedef size_t uc_hook; #include "sparc.h" #include "ppc.h" #include "riscv.h" +#include "s390x.h" #ifdef __GNUC__ #define DEFAULT_VISIBILITY __attribute__((visibility("default"))) @@ -98,6 +99,7 @@ typedef enum uc_arch { UC_ARCH_SPARC, // Sparc architecture UC_ARCH_M68K, // M68K architecture UC_ARCH_RISCV, // RISCV architecture + UC_ARCH_S390X, // S390X architecture UC_ARCH_MAX, } uc_arch; diff --git a/qemu/configure b/qemu/configure index 851530c0..b67e75a9 100755 --- a/qemu/configure +++ b/qemu/configure @@ -852,7 +852,7 @@ QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS" default_target_list="aarch64eb-softmmu aarch64-softmmu armeb-softmmu \ arm-softmmu m68k-softmmu mips64el-softmmu mips64-softmmu mipsel-softmmu \ mips-softmmu ppc64-softmmu ppc-softmmu sparc64-softmmu sparc-softmmu \ - x86_64-softmmu riscv32-softmmu riscv64-softmmu" + x86_64-softmmu riscv32-softmmu riscv64-softmmu s390x-softmmu" if test x"$show_help" = x"yes" ; then cat << EOF diff --git a/qemu/include/qemu-common.h b/qemu/include/qemu-common.h index 5b6a6cba..1cda0555 100644 --- a/qemu/include/qemu-common.h +++ b/qemu/include/qemu-common.h @@ -78,4 +78,6 @@ void os_setup_early_signal_handling(void); void page_size_init(struct uc_struct *uc); +CPUState *qemu_get_cpu(struct uc_struct *uc, int index); + #endif diff --git a/symbols.sh b/symbols.sh index 19997fb4..f04ca8a3 100755 --- a/symbols.sh +++ b/symbols.sh @@ -6268,7 +6268,7 @@ ppc_irq_reset \ ppc64_SYMBOLS=${ppc_SYMBOLS} -ARCHS="x86_64 arm armeb aarch64 aarch64eb riscv32 riscv64 mips mipsel mips64 mips64el sparc sparc64 m68k ppc ppc64" +ARCHS="x86_64 arm armeb aarch64 aarch64eb riscv32 riscv64 mips mipsel mips64 mips64el sparc sparc64 m68k ppc ppc64 s390x" for arch in $ARCHS; do diff --git a/uc.c b/uc.c index 3e24674c..41088dbc 100644 --- a/uc.c +++ b/uc.c @@ -23,6 +23,7 @@ #include "qemu/target/sparc/unicorn.h" #include "qemu/target/ppc/unicorn.h" #include "qemu/target/riscv/unicorn.h" +#include "qemu/target/s390x/unicorn.h" #include "qemu/include/qemu/queue.h" @@ -124,6 +125,9 @@ bool uc_arch_supported(uc_arch arch) #endif #ifdef UNICORN_HAS_RISCV case UC_ARCH_RISCV: return true; +#endif +#ifdef UNICORN_HAS_S390X + case UC_ARCH_S390X: return true; #endif /* Invalid or disabled arch */ default: return false; @@ -291,6 +295,16 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) } break; #endif +#ifdef UNICORN_HAS_S390X + case UC_ARCH_S390X: + if ((mode & ~UC_MODE_S390X_MASK) || + !(mode & UC_MODE_BIG_ENDIAN)) { + free(uc); + return UC_ERR_MODE; + } + uc->init_arch = s390_uc_init; + break; +#endif } @@ -699,6 +713,11 @@ uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t time case UC_ARCH_RISCV: uc_reg_write(uc, UC_RISCV_REG_PC, &begin); break; +#endif +#ifdef UNICORN_HAS_S390X + case UC_ARCH_S390X: + uc_reg_write(uc, UC_S390X_REG_PC, &begin); + break; #endif } @@ -1621,6 +1640,12 @@ static void find_context_reg_rw_function(uc_arch arch, uc_mode mode, context_reg rw->context_reg_write = riscv64_context_reg_write; } break; +#endif +#ifdef UNICORN_HAS_S390X + case UC_ARCH_S390X: + rw->context_reg_read = s390_context_reg_read; + rw->context_reg_write = s390_context_reg_write; + break; #endif } From b042a6a01d77da2c42d941eb191e7d03e7581d33 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Mon, 6 Dec 2021 04:28:13 +0800 Subject: [PATCH 02/38] add missing files --- TODO-s390 | 12 + include/unicorn/s390x.h | 99 + msvc/s390x-softmmu/config-target.h | 8 + qemu/hw/s390x/s390-skeys.c | 282 + qemu/include/hw/s390x/ebcdic.h | 104 + qemu/include/hw/s390x/ioinst.h | 248 + qemu/include/hw/s390x/sclp.h | 221 + qemu/include/hw/s390x/storage-keys.h | 66 + qemu/s390x.h | 1277 ++++ qemu/target/s390x/cc_helper.c | 595 ++ qemu/target/s390x/cpu-param.h | 17 + qemu/target/s390x/cpu-qom.h | 64 + qemu/target/s390x/cpu.c | 301 + qemu/target/s390x/cpu.h | 840 +++ qemu/target/s390x/cpu_features.c | 210 + qemu/target/s390x/cpu_features.h | 91 + qemu/target/s390x/cpu_features_def.h | 25 + qemu/target/s390x/cpu_features_def.inc.h | 370 ++ qemu/target/s390x/cpu_models.c | 579 ++ qemu/target/s390x/cpu_models.h | 109 + qemu/target/s390x/crypto_helper.c | 60 + qemu/target/s390x/excp_helper.c | 600 ++ qemu/target/s390x/fpu_helper.c | 888 +++ qemu/target/s390x/gen-features.c | 986 +++ qemu/target/s390x/gen-features.h | 135 + qemu/target/s390x/helper.c | 358 ++ qemu/target/s390x/helper.h | 358 ++ qemu/target/s390x/insn-data.def | 1371 +++++ qemu/target/s390x/insn-format.def | 81 + qemu/target/s390x/int_helper.c | 148 + qemu/target/s390x/internal.h | 366 ++ qemu/target/s390x/interrupt.c | 233 + qemu/target/s390x/ioinst.c | 788 +++ qemu/target/s390x/mem_helper.c | 2892 +++++++++ qemu/target/s390x/misc_helper.c | 815 +++ qemu/target/s390x/mmu_helper.c | 554 ++ qemu/target/s390x/s390-tod.h | 29 + qemu/target/s390x/sigp.c | 466 ++ qemu/target/s390x/tcg-stub.c | 30 + qemu/target/s390x/tcg_s390x.h | 24 + qemu/target/s390x/translate.c | 6938 ++++++++++++++++++++++ qemu/target/s390x/translate_vx.inc.c | 2882 +++++++++ qemu/target/s390x/unicorn.c | 173 + qemu/target/s390x/unicorn.h | 16 + qemu/target/s390x/vec.h | 141 + qemu/target/s390x/vec_fpu_helper.c | 625 ++ qemu/target/s390x/vec_helper.c | 192 + qemu/target/s390x/vec_int_helper.c | 618 ++ qemu/target/s390x/vec_string_helper.c | 473 ++ samples/sample_s390x.c | 83 + tests/unit/test_s390x.c | 8 + 51 files changed, 28849 insertions(+) create mode 100644 TODO-s390 create mode 100644 include/unicorn/s390x.h create mode 100644 msvc/s390x-softmmu/config-target.h create mode 100644 qemu/hw/s390x/s390-skeys.c create mode 100644 qemu/include/hw/s390x/ebcdic.h create mode 100644 qemu/include/hw/s390x/ioinst.h create mode 100644 qemu/include/hw/s390x/sclp.h create mode 100644 qemu/include/hw/s390x/storage-keys.h create mode 100644 qemu/s390x.h create mode 100644 qemu/target/s390x/cc_helper.c create mode 100644 qemu/target/s390x/cpu-param.h create mode 100644 qemu/target/s390x/cpu-qom.h create mode 100644 qemu/target/s390x/cpu.c create mode 100644 qemu/target/s390x/cpu.h create mode 100644 qemu/target/s390x/cpu_features.c create mode 100644 qemu/target/s390x/cpu_features.h create mode 100644 qemu/target/s390x/cpu_features_def.h create mode 100644 qemu/target/s390x/cpu_features_def.inc.h create mode 100644 qemu/target/s390x/cpu_models.c create mode 100644 qemu/target/s390x/cpu_models.h create mode 100644 qemu/target/s390x/crypto_helper.c create mode 100644 qemu/target/s390x/excp_helper.c create mode 100644 qemu/target/s390x/fpu_helper.c create mode 100644 qemu/target/s390x/gen-features.c create mode 100644 qemu/target/s390x/gen-features.h create mode 100644 qemu/target/s390x/helper.c create mode 100644 qemu/target/s390x/helper.h create mode 100644 qemu/target/s390x/insn-data.def create mode 100644 qemu/target/s390x/insn-format.def create mode 100644 qemu/target/s390x/int_helper.c create mode 100644 qemu/target/s390x/internal.h create mode 100644 qemu/target/s390x/interrupt.c create mode 100644 qemu/target/s390x/ioinst.c create mode 100644 qemu/target/s390x/mem_helper.c create mode 100644 qemu/target/s390x/misc_helper.c create mode 100644 qemu/target/s390x/mmu_helper.c create mode 100644 qemu/target/s390x/s390-tod.h create mode 100644 qemu/target/s390x/sigp.c create mode 100644 qemu/target/s390x/tcg-stub.c create mode 100644 qemu/target/s390x/tcg_s390x.h create mode 100644 qemu/target/s390x/translate.c create mode 100644 qemu/target/s390x/translate_vx.inc.c create mode 100644 qemu/target/s390x/unicorn.c create mode 100644 qemu/target/s390x/unicorn.h create mode 100644 qemu/target/s390x/vec.h create mode 100644 qemu/target/s390x/vec_fpu_helper.c create mode 100644 qemu/target/s390x/vec_helper.c create mode 100644 qemu/target/s390x/vec_int_helper.c create mode 100644 qemu/target/s390x/vec_string_helper.c create mode 100644 samples/sample_s390x.c create mode 100644 tests/unit/test_s390x.c diff --git a/TODO-s390 b/TODO-s390 new file mode 100644 index 00000000..a208265e --- /dev/null +++ b/TODO-s390 @@ -0,0 +1,12 @@ +current status: + +- only build s390x arch (see CMakeLists.txt) +- sample_s390x crash, due to qemu/target/s390x/cpu.c :: cpu_s390_init() still has bugs + +Todo: + +- fix qemu/target/s390x/cpu.c, so sample_s390x works +- enable building all arch to fix conflicts +- remove all static vars in qemu/target/s390x/translate.c +- support more registers in qemu/target/s390x/unicorn.c +- find & fix potential memory leaking with valgrind diff --git a/include/unicorn/s390x.h b/include/unicorn/s390x.h new file mode 100644 index 00000000..89348290 --- /dev/null +++ b/include/unicorn/s390x.h @@ -0,0 +1,99 @@ +/* Unicorn Engine */ +/* By Nguyen Anh Quynh , 2015-2021 */ + +#ifndef UNICORN_S390X_H +#define UNICORN_S390X_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> S390X registers +typedef enum uc_s390x_reg { + UC_S390X_REG_INVALID = 0, + //> General purpose registers + UC_S390X_REG_R0, + UC_S390X_REG_R1, + UC_S390X_REG_R2, + UC_S390X_REG_R3, + UC_S390X_REG_R4, + UC_S390X_REG_R5, + UC_S390X_REG_R6, + UC_S390X_REG_R7, + UC_S390X_REG_R8, + UC_S390X_REG_R9, + UC_S390X_REG_R10, + UC_S390X_REG_R11, + UC_S390X_REG_R12, + UC_S390X_REG_R13, + UC_S390X_REG_R14, + UC_S390X_REG_R15, + + //> Floating point registers + UC_S390X_REG_F0, + UC_S390X_REG_F1, + UC_S390X_REG_F2, + UC_S390X_REG_F3, + UC_S390X_REG_F4, + UC_S390X_REG_F5, + UC_S390X_REG_F6, + UC_S390X_REG_F7, + UC_S390X_REG_F8, + UC_S390X_REG_F9, + UC_S390X_REG_F10, + UC_S390X_REG_F11, + UC_S390X_REG_F12, + UC_S390X_REG_F13, + UC_S390X_REG_F14, + UC_S390X_REG_F15, + UC_S390X_REG_F16, + UC_S390X_REG_F17, + UC_S390X_REG_F18, + UC_S390X_REG_F19, + UC_S390X_REG_F20, + UC_S390X_REG_F21, + UC_S390X_REG_F22, + UC_S390X_REG_F23, + UC_S390X_REG_F24, + UC_S390X_REG_F25, + UC_S390X_REG_F26, + UC_S390X_REG_F27, + UC_S390X_REG_F28, + UC_S390X_REG_F29, + UC_S390X_REG_F30, + UC_S390X_REG_F31, + + //> Access registers + UC_S390X_REG_A0, + UC_S390X_REG_A1, + UC_S390X_REG_A2, + UC_S390X_REG_A3, + UC_S390X_REG_A4, + UC_S390X_REG_A5, + UC_S390X_REG_A6, + UC_S390X_REG_A7, + UC_S390X_REG_A8, + UC_S390X_REG_A9, + UC_S390X_REG_A10, + UC_S390X_REG_A11, + UC_S390X_REG_A12, + UC_S390X_REG_A13, + UC_S390X_REG_A14, + UC_S390X_REG_A15, + + UC_S390X_REG_PC, // PC register + + UC_S390X_REG_ENDING, // <-- mark the end of the list or registers + + //> Alias registers +} uc_s390x_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/msvc/s390x-softmmu/config-target.h b/msvc/s390x-softmmu/config-target.h new file mode 100644 index 00000000..5c7a0d19 --- /dev/null +++ b/msvc/s390x-softmmu/config-target.h @@ -0,0 +1,8 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_S390X 1 +#define TARGET_NAME "s390x" +#define TARGET_S390X 1 +#define TARGET_SYSTBL_ABI common,64 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 +#define TARGET_SUPPORTS_MTTCG 1 diff --git a/qemu/hw/s390x/s390-skeys.c b/qemu/hw/s390x/s390-skeys.c new file mode 100644 index 00000000..3f943f14 --- /dev/null +++ b/qemu/hw/s390x/s390-skeys.c @@ -0,0 +1,282 @@ +/* + * s390 storage key device + * + * Copyright 2015 IBM Corp. + * Author(s): Jason J. Herne + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "target/s390x/cpu.h" +#include "hw/s390x/storage-keys.h" + +#define S390_SKEYS_BUFFER_SIZE (128 * KiB) /* Room for 128k storage keys */ +#define S390_SKEYS_SAVE_FLAG_EOS 0x01 +#define S390_SKEYS_SAVE_FLAG_SKEYS 0x02 +#define S390_SKEYS_SAVE_FLAG_ERROR 0x04 + +#if 0 +S390SKeysState *s390_get_skeys_device(void) +{ + S390SKeysState *ss; + + ss = S390_SKEYS(object_resolve_path_type("", TYPE_S390_SKEYS, NULL)); + assert(ss); + return ss; +} + +void s390_skeys_init(void) +{ + Object *obj; + + obj = object_new(TYPE_QEMU_S390_SKEYS); + object_property_add_child(qdev_get_machine(), TYPE_S390_SKEYS, + obj, NULL); + object_unref(obj); + + qdev_init_nofail(DEVICE(obj)); +} + +static void qemu_s390_skeys_init(Object *obj) +{ + QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(obj); + MachineState *machine = MACHINE(qdev_get_machine()); + + skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE; + skeys->keydata = g_malloc0(skeys->key_count); +} + +static int qemu_s390_skeys_enabled(S390SKeysState *ss) +{ + return 1; +} + +/* + * TODO: for memory hotplug support qemu_s390_skeys_set and qemu_s390_skeys_get + * will have to make sure that the given gfn belongs to a memory region and not + * a memory hole. + */ +static int qemu_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn, + uint64_t count, uint8_t *keys) +{ + QEMUS390SKeysState *skeydev = QEMU_S390_SKEYS(ss); + int i; + + /* Check for uint64 overflow and access beyond end of key data */ + if (start_gfn + count > skeydev->key_count || start_gfn + count < count) { + error_report("Error: Setting storage keys for page beyond the end " + "of memory: gfn=%" PRIx64 " count=%" PRId64, + start_gfn, count); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + skeydev->keydata[start_gfn + i] = keys[i]; + } + return 0; +} + +static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn, + uint64_t count, uint8_t *keys) +{ + QEMUS390SKeysState *skeydev = QEMU_S390_SKEYS(ss); + int i; + + /* Check for uint64 overflow and access beyond end of key data */ + if (start_gfn + count > skeydev->key_count || start_gfn + count < count) { + error_report("Error: Getting storage keys for page beyond the end " + "of memory: gfn=%" PRIx64 " count=%" PRId64, + start_gfn, count); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + keys[i] = skeydev->keydata[start_gfn + i]; + } + return 0; +} + +static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data) +{ + S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); + DeviceClass *dc = DEVICE_CLASS(oc); + + skeyclass->skeys_enabled = qemu_s390_skeys_enabled; + skeyclass->get_skeys = qemu_s390_skeys_get; + skeyclass->set_skeys = qemu_s390_skeys_set; + + /* Reason: Internal device (only one skeys device for the whole memory) */ + dc->user_creatable = false; +} + +static const TypeInfo qemu_s390_skeys_info = { + .name = TYPE_QEMU_S390_SKEYS, + .parent = TYPE_S390_SKEYS, + .instance_init = qemu_s390_skeys_init, + .instance_size = sizeof(QEMUS390SKeysState), + .class_init = qemu_s390_skeys_class_init, + .class_size = sizeof(S390SKeysClass), +}; + +static void write_keys(FILE *f, uint8_t *keys, uint64_t startgfn, + uint64_t count, Error **errp) +{ + uint64_t curpage = startgfn; + uint64_t maxpage = curpage + count - 1; + + for (; curpage <= maxpage; curpage++) { + uint8_t acc = (*keys & 0xF0) >> 4; + int fp = (*keys & 0x08); + int ref = (*keys & 0x04); + int ch = (*keys & 0x02); + int res = (*keys & 0x01); + + fprintf(f, "page=%03" PRIx64 ": key(%d) => ACC=%X, FP=%d, REF=%d," + " ch=%d, reserved=%d\n", + curpage, *keys, acc, fp, ref, ch, res); + keys++; + } +} + +static void s390_storage_keys_save(QEMUFile *f, void *opaque) +{ + S390SKeysState *ss = S390_SKEYS(opaque); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); + uint64_t pages_left = ram_size / TARGET_PAGE_SIZE; + uint64_t read_count, eos = S390_SKEYS_SAVE_FLAG_EOS; + vaddr cur_gfn = 0; + int error = 0; + uint8_t *buf; + + if (!skeyclass->skeys_enabled(ss)) { + goto end_stream; + } + + buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE); + if (!buf) { + error_report("storage key save could not allocate memory"); + goto end_stream; + } + + /* We only support initial memory. Standby memory is not handled yet. */ + qemu_put_be64(f, (cur_gfn * TARGET_PAGE_SIZE) | S390_SKEYS_SAVE_FLAG_SKEYS); + qemu_put_be64(f, pages_left); + + while (pages_left) { + read_count = MIN(pages_left, S390_SKEYS_BUFFER_SIZE); + + if (!error) { + error = skeyclass->get_skeys(ss, cur_gfn, read_count, buf); + if (error) { + /* + * If error: we want to fill the stream with valid data instead + * of stopping early so we pad the stream with 0x00 values and + * use S390_SKEYS_SAVE_FLAG_ERROR to indicate failure to the + * reading side. + */ + error_report("S390_GET_KEYS error %d", error); + memset(buf, 0, S390_SKEYS_BUFFER_SIZE); + eos = S390_SKEYS_SAVE_FLAG_ERROR; + } + } + + qemu_put_buffer(f, buf, read_count); + cur_gfn += read_count; + pages_left -= read_count; + } + + g_free(buf); +end_stream: + qemu_put_be64(f, eos); +} + +static int s390_storage_keys_load(QEMUFile *f, void *opaque, int version_id) +{ + S390SKeysState *ss = S390_SKEYS(opaque); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); + int ret = 0; + + while (!ret) { + ram_addr_t addr; + int flags; + + addr = qemu_get_be64(f); + flags = addr & ~TARGET_PAGE_MASK; + addr &= TARGET_PAGE_MASK; + + switch (flags) { + case S390_SKEYS_SAVE_FLAG_SKEYS: { + const uint64_t total_count = qemu_get_be64(f); + uint64_t handled_count = 0, cur_count; + uint64_t cur_gfn = addr / TARGET_PAGE_SIZE; + uint8_t *buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE); + + if (!buf) { + error_report("storage key load could not allocate memory"); + ret = -ENOMEM; + break; + } + + while (handled_count < total_count) { + cur_count = MIN(total_count - handled_count, + S390_SKEYS_BUFFER_SIZE); + qemu_get_buffer(f, buf, cur_count); + + ret = skeyclass->set_skeys(ss, cur_gfn, cur_count, buf); + if (ret < 0) { + error_report("S390_SET_KEYS error %d", ret); + break; + } + handled_count += cur_count; + cur_gfn += cur_count; + } + g_free(buf); + break; + } + case S390_SKEYS_SAVE_FLAG_ERROR: { + error_report("Storage key data is incomplete"); + ret = -EINVAL; + break; + } + case S390_SKEYS_SAVE_FLAG_EOS: + /* normal exit */ + return 0; + default: + error_report("Unexpected storage key flag data: %#x", flags); + ret = -EINVAL; + } + } + + return ret; +} + +static void s390_skeys_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->hotpluggable = false; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo s390_skeys_info = { + .name = TYPE_S390_SKEYS, + .parent = TYPE_DEVICE, + .instance_init = NULL, + .instance_size = sizeof(S390SKeysState), + .class_init = s390_skeys_class_init, + .class_size = sizeof(S390SKeysClass), + .abstract = true, +}; + +static void qemu_s390_skeys_register_types(void) +{ + type_register_static(&s390_skeys_info); + type_register_static(&qemu_s390_skeys_info); +} + +type_init(qemu_s390_skeys_register_types) +#endif diff --git a/qemu/include/hw/s390x/ebcdic.h b/qemu/include/hw/s390x/ebcdic.h new file mode 100644 index 00000000..69a04cab --- /dev/null +++ b/qemu/include/hw/s390x/ebcdic.h @@ -0,0 +1,104 @@ +/* + * EBCDIC/ASCII conversion Support + * + * Copyright (c) 2011 Alexander Graf + * Copyright IBM, Corp. 2013 + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#ifndef EBCDIC_H +#define EBCDIC_H + +/* EBCDIC handling */ +static const uint8_t ebcdic2ascii[] = { + 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F, + 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07, + 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07, + 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04, + 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A, + 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86, + 0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21, + 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07, + 0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E, + 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F, + 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, + 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, + 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1, + 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, + 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07, + 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07, + 0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC, + 0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07, + 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07, + 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, + 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98, + 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07, +}; + +static const uint8_t ascii2ebcdic[] = { + 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, + 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, + 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, + 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, + 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, + 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, + 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, + 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, + 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, + 0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D, + 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, + 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, + 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF +}; + +static inline void ebcdic_put(uint8_t *p, const char *ascii, int len) +{ + int i; + + for (i = 0; i < len; i++) { + p[i] = ascii2ebcdic[(uint8_t)ascii[i]]; + } +} + +static inline void ascii_put(uint8_t *p, const char *ebcdic, int len) +{ + int i; + + for (i = 0; i < len; i++) { + p[i] = ebcdic2ascii[(uint8_t)ebcdic[i]]; + } +} + +#endif /* EBCDIC_H */ diff --git a/qemu/include/hw/s390x/ioinst.h b/qemu/include/hw/s390x/ioinst.h new file mode 100644 index 00000000..c6737a30 --- /dev/null +++ b/qemu/include/hw/s390x/ioinst.h @@ -0,0 +1,248 @@ +/* + * S/390 channel I/O instructions + * + * Copyright 2012 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. +*/ + +#ifndef S390X_IOINST_H +#define S390X_IOINST_H + +/* + * Channel I/O related definitions, as defined in the Principles + * Of Operation (and taken from the Linux implementation). + */ + +/* subchannel status word (command mode only) */ +typedef struct SCSW { + uint16_t flags; + uint16_t ctrl; + uint32_t cpa; + uint8_t dstat; + uint8_t cstat; + uint16_t count; +} SCSW; +QEMU_BUILD_BUG_MSG(sizeof(SCSW) != 12, "size of SCSW is wrong"); + +#define SCSW_FLAGS_MASK_KEY 0xf000 +#define SCSW_FLAGS_MASK_SCTL 0x0800 +#define SCSW_FLAGS_MASK_ESWF 0x0400 +#define SCSW_FLAGS_MASK_CC 0x0300 +#define SCSW_FLAGS_MASK_FMT 0x0080 +#define SCSW_FLAGS_MASK_PFCH 0x0040 +#define SCSW_FLAGS_MASK_ISIC 0x0020 +#define SCSW_FLAGS_MASK_ALCC 0x0010 +#define SCSW_FLAGS_MASK_SSI 0x0008 +#define SCSW_FLAGS_MASK_ZCC 0x0004 +#define SCSW_FLAGS_MASK_ECTL 0x0002 +#define SCSW_FLAGS_MASK_PNO 0x0001 + +#define SCSW_CTRL_MASK_FCTL 0x7000 +#define SCSW_CTRL_MASK_ACTL 0x0fe0 +#define SCSW_CTRL_MASK_STCTL 0x001f + +#define SCSW_FCTL_CLEAR_FUNC 0x1000 +#define SCSW_FCTL_HALT_FUNC 0x2000 +#define SCSW_FCTL_START_FUNC 0x4000 + +#define SCSW_ACTL_SUSP 0x0020 +#define SCSW_ACTL_DEVICE_ACTIVE 0x0040 +#define SCSW_ACTL_SUBCH_ACTIVE 0x0080 +#define SCSW_ACTL_CLEAR_PEND 0x0100 +#define SCSW_ACTL_HALT_PEND 0x0200 +#define SCSW_ACTL_START_PEND 0x0400 +#define SCSW_ACTL_RESUME_PEND 0x0800 + +#define SCSW_STCTL_STATUS_PEND 0x0001 +#define SCSW_STCTL_SECONDARY 0x0002 +#define SCSW_STCTL_PRIMARY 0x0004 +#define SCSW_STCTL_INTERMEDIATE 0x0008 +#define SCSW_STCTL_ALERT 0x0010 + +#define SCSW_DSTAT_ATTENTION 0x80 +#define SCSW_DSTAT_STAT_MOD 0x40 +#define SCSW_DSTAT_CU_END 0x20 +#define SCSW_DSTAT_BUSY 0x10 +#define SCSW_DSTAT_CHANNEL_END 0x08 +#define SCSW_DSTAT_DEVICE_END 0x04 +#define SCSW_DSTAT_UNIT_CHECK 0x02 +#define SCSW_DSTAT_UNIT_EXCEP 0x01 + +#define SCSW_CSTAT_PCI 0x80 +#define SCSW_CSTAT_INCORR_LEN 0x40 +#define SCSW_CSTAT_PROG_CHECK 0x20 +#define SCSW_CSTAT_PROT_CHECK 0x10 +#define SCSW_CSTAT_DATA_CHECK 0x08 +#define SCSW_CSTAT_CHN_CTRL_CHK 0x04 +#define SCSW_CSTAT_INTF_CTRL_CHK 0x02 +#define SCSW_CSTAT_CHAIN_CHECK 0x01 + +/* path management control word */ +typedef struct PMCW { + uint32_t intparm; + uint16_t flags; + uint16_t devno; + uint8_t lpm; + uint8_t pnom; + uint8_t lpum; + uint8_t pim; + uint16_t mbi; + uint8_t pom; + uint8_t pam; + uint8_t chpid[8]; + uint32_t chars; +} PMCW; +QEMU_BUILD_BUG_MSG(sizeof(PMCW) != 28, "size of PMCW is wrong"); + +#define PMCW_FLAGS_MASK_QF 0x8000 +#define PMCW_FLAGS_MASK_W 0x4000 +#define PMCW_FLAGS_MASK_ISC 0x3800 +#define PMCW_FLAGS_MASK_ENA 0x0080 +#define PMCW_FLAGS_MASK_LM 0x0060 +#define PMCW_FLAGS_MASK_MME 0x0018 +#define PMCW_FLAGS_MASK_MP 0x0004 +#define PMCW_FLAGS_MASK_TF 0x0002 +#define PMCW_FLAGS_MASK_DNV 0x0001 +#define PMCW_FLAGS_MASK_INVALID 0x0700 + +#define PMCW_CHARS_MASK_ST 0x00e00000 +#define PMCW_CHARS_MASK_MBFC 0x00000004 +#define PMCW_CHARS_MASK_XMWME 0x00000002 +#define PMCW_CHARS_MASK_CSENSE 0x00000001 +#define PMCW_CHARS_MASK_INVALID 0xff1ffff8 + +/* subchannel information block */ +typedef struct SCHIB { + PMCW pmcw; + SCSW scsw; + uint64_t mba; + uint8_t mda[4]; +} QEMU_PACKED SCHIB; + +/* interruption response block */ +typedef struct IRB { + SCSW scsw; + uint32_t esw[5]; + uint32_t ecw[8]; + uint32_t emw[8]; +} IRB; +QEMU_BUILD_BUG_MSG(sizeof(IRB) != 96, "size of IRB is wrong"); + +/* operation request block */ +typedef struct ORB { + uint32_t intparm; + uint16_t ctrl0; + uint8_t lpm; + uint8_t ctrl1; + uint32_t cpa; +} ORB; +QEMU_BUILD_BUG_MSG(sizeof(ORB) != 12, "size of ORB is wrong"); + +#define ORB_CTRL0_MASK_KEY 0xf000 +#define ORB_CTRL0_MASK_SPND 0x0800 +#define ORB_CTRL0_MASK_STR 0x0400 +#define ORB_CTRL0_MASK_MOD 0x0200 +#define ORB_CTRL0_MASK_SYNC 0x0100 +#define ORB_CTRL0_MASK_FMT 0x0080 +#define ORB_CTRL0_MASK_PFCH 0x0040 +#define ORB_CTRL0_MASK_ISIC 0x0020 +#define ORB_CTRL0_MASK_ALCC 0x0010 +#define ORB_CTRL0_MASK_SSIC 0x0008 +#define ORB_CTRL0_MASK_C64 0x0002 +#define ORB_CTRL0_MASK_I2K 0x0001 +#define ORB_CTRL0_MASK_INVALID 0x0004 + +#define ORB_CTRL1_MASK_ILS 0x80 +#define ORB_CTRL1_MASK_MIDAW 0x40 +#define ORB_CTRL1_MASK_ORBX 0x01 +#define ORB_CTRL1_MASK_INVALID 0x3e + +/* channel command word (type 0) */ +typedef struct CCW0 { + uint8_t cmd_code; + uint8_t cda0; + uint16_t cda1; + uint8_t flags; + uint8_t reserved; + uint16_t count; +} CCW0; +QEMU_BUILD_BUG_MSG(sizeof(CCW0) != 8, "size of CCW0 is wrong"); + +/* channel command word (type 1) */ +typedef struct CCW1 { + uint8_t cmd_code; + uint8_t flags; + uint16_t count; + uint32_t cda; +} CCW1; +QEMU_BUILD_BUG_MSG(sizeof(CCW1) != 8, "size of CCW1 is wrong"); + +#define CCW_FLAG_DC 0x80 +#define CCW_FLAG_CC 0x40 +#define CCW_FLAG_SLI 0x20 +#define CCW_FLAG_SKIP 0x10 +#define CCW_FLAG_PCI 0x08 +#define CCW_FLAG_IDA 0x04 +#define CCW_FLAG_SUSPEND 0x02 +#define CCW_FLAG_MIDA 0x01 + +#define CCW_CMD_NOOP 0x03 +#define CCW_CMD_BASIC_SENSE 0x04 +#define CCW_CMD_TIC 0x08 +#define CCW_CMD_SENSE_ID 0xe4 + +typedef struct CRW { + uint16_t flags; + uint16_t rsid; +} CRW; +QEMU_BUILD_BUG_MSG(sizeof(CRW) != 4, "size of CRW is wrong"); + +#define CRW_FLAGS_MASK_S 0x4000 +#define CRW_FLAGS_MASK_R 0x2000 +#define CRW_FLAGS_MASK_C 0x1000 +#define CRW_FLAGS_MASK_RSC 0x0f00 +#define CRW_FLAGS_MASK_A 0x0080 +#define CRW_FLAGS_MASK_ERC 0x003f + +#define CRW_ERC_EVENT 0x00 /* event information pending */ +#define CRW_ERC_AVAIL 0x01 /* available */ +#define CRW_ERC_INIT 0x02 /* initialized */ +#define CRW_ERC_TERROR 0x03 /* temporary error */ +#define CRW_ERC_IPI 0x04 /* installed parm initialized */ +#define CRW_ERC_TERM 0x05 /* terminal */ +#define CRW_ERC_PERRN 0x06 /* perm. error, facility not init */ +#define CRW_ERC_PERRI 0x07 /* perm. error, facility init */ +#define CRW_ERC_PMOD 0x08 /* installed parameters modified */ +#define CRW_ERC_IPR 0x0A /* installed parameters restored */ + +#define CRW_RSC_SUBCH 0x3 +#define CRW_RSC_CHP 0x4 +#define CRW_RSC_CSS 0xb + +/* I/O interruption code */ +typedef struct IOIntCode { + uint32_t subsys_id; + uint32_t intparm; + uint32_t interrupt_id; +} QEMU_PACKED IOIntCode; + +/* schid disintegration */ +#define IOINST_SCHID_ONE(_schid) ((_schid & 0x00010000) >> 16) +#define IOINST_SCHID_M(_schid) ((_schid & 0x00080000) >> 19) +#define IOINST_SCHID_CSSID(_schid) ((_schid & 0xff000000) >> 24) +#define IOINST_SCHID_SSID(_schid) ((_schid & 0x00060000) >> 17) +#define IOINST_SCHID_NR(_schid) (_schid & 0x0000ffff) + +#define IO_INT_WORD_ISC(_int_word) ((_int_word & 0x38000000) >> 27) +#define ISC_TO_ISC_BITS(_isc) ((0x80 >> _isc) << 24) + +#define IO_INT_WORD_AI 0x80000000 + +int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid, + int *schid); + +#endif diff --git a/qemu/include/hw/s390x/sclp.h b/qemu/include/hw/s390x/sclp.h new file mode 100644 index 00000000..92084838 --- /dev/null +++ b/qemu/include/hw/s390x/sclp.h @@ -0,0 +1,221 @@ +/* + * SCLP Support + * + * Copyright IBM, Corp. 2012 + * + * Authors: + * Christian Borntraeger + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + * + */ + +#ifndef HW_S390_SCLP_H +#define HW_S390_SCLP_H + +//#include "hw/sysbus.h" +#include "target/s390x/cpu-qom.h" + +#define SCLP_CMD_CODE_MASK 0xffff00ff + +/* SCLP command codes */ +#define SCLP_CMDW_READ_SCP_INFO 0x00020001 +#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 +#define SCLP_READ_STORAGE_ELEMENT_INFO 0x00040001 +#define SCLP_ATTACH_STORAGE_ELEMENT 0x00080001 +#define SCLP_ASSIGN_STORAGE 0x000D0001 +#define SCLP_UNASSIGN_STORAGE 0x000C0001 +#define SCLP_CMD_READ_EVENT_DATA 0x00770005 +#define SCLP_CMD_WRITE_EVENT_DATA 0x00760005 +#define SCLP_CMD_WRITE_EVENT_MASK 0x00780005 + +/* SCLP Memory hotplug codes */ +#define SCLP_FC_ASSIGN_ATTACH_READ_STOR 0xE00000000000ULL +#define SCLP_STARTING_SUBINCREMENT_ID 0x10001 +#define SCLP_INCREMENT_UNIT 0x10000 +#define MAX_STORAGE_INCREMENTS 1020 + +/* CPU hotplug SCLP codes */ +#define SCLP_HAS_CPU_INFO 0x0C00000000000000ULL +#define SCLP_CMDW_READ_CPU_INFO 0x00010001 +#define SCLP_CMDW_CONFIGURE_CPU 0x00110001 +#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 + +/* SCLP PCI codes */ +#define SCLP_HAS_IOA_RECONFIG 0x0000000040000000ULL +#define SCLP_CMDW_CONFIGURE_IOA 0x001a0001 +#define SCLP_CMDW_DECONFIGURE_IOA 0x001b0001 +#define SCLP_RECONFIG_PCI_ATYPE 2 + +/* SCLP response codes */ +#define SCLP_RC_NORMAL_READ_COMPLETION 0x0010 +#define SCLP_RC_NORMAL_COMPLETION 0x0020 +#define SCLP_RC_SCCB_BOUNDARY_VIOLATION 0x0100 +#define SCLP_RC_NO_ACTION_REQUIRED 0x0120 +#define SCLP_RC_INVALID_SCLP_COMMAND 0x01f0 +#define SCLP_RC_CONTAINED_EQUIPMENT_CHECK 0x0340 +#define SCLP_RC_INSUFFICIENT_SCCB_LENGTH 0x0300 +#define SCLP_RC_STANDBY_READ_COMPLETION 0x0410 +#define SCLP_RC_ADAPTER_IN_RESERVED_STATE 0x05f0 +#define SCLP_RC_ADAPTER_TYPE_NOT_RECOGNIZED 0x06f0 +#define SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED 0x09f0 +#define SCLP_RC_INVALID_FUNCTION 0x40f0 +#define SCLP_RC_NO_EVENT_BUFFERS_STORED 0x60f0 +#define SCLP_RC_INVALID_SELECTION_MASK 0x70f0 +#define SCLP_RC_INCONSISTENT_LENGTHS 0x72f0 +#define SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR 0x73f0 +#define SCLP_RC_INVALID_MASK_LENGTH 0x74f0 + + +/* Service Call Control Block (SCCB) and its elements */ + +#define SCCB_SIZE 4096 + +#define SCLP_VARIABLE_LENGTH_RESPONSE 0x80 +#define SCLP_EVENT_BUFFER_ACCEPTED 0x80 + +#define SCLP_FC_NORMAL_WRITE 0 + +/* + * Normally packed structures are not the right thing to do, since all code + * must take care of endianness. We cannot use ldl_phys and friends for two + * reasons, though: + * - some of the embedded structures below the SCCB can appear multiple times + * at different locations, so there is no fixed offset + * - we work on a private copy of the SCCB, since there are several length + * fields, that would cause a security nightmare if we allow the guest to + * alter the structure while we parse it. We cannot use ldl_p and friends + * either without doing pointer arithmetics + * So we have to double check that all users of sclp data structures use the + * right endianness wrappers. + */ +typedef struct SCCBHeader { + uint16_t length; + uint8_t function_code; + uint8_t control_mask[3]; + uint16_t response_code; +} QEMU_PACKED SCCBHeader; + +#define SCCB_DATA_LEN (SCCB_SIZE - sizeof(SCCBHeader)) +#define SCCB_CPU_FEATURE_LEN 6 + +/* CPU information */ +typedef struct CPUEntry { + uint8_t address; + uint8_t reserved0; + uint8_t features[SCCB_CPU_FEATURE_LEN]; + uint8_t reserved2[6]; + uint8_t type; + uint8_t reserved1; +} QEMU_PACKED CPUEntry; + +typedef struct ReadInfo { + SCCBHeader h; + uint16_t rnmax; + uint8_t rnsize; + uint8_t _reserved1[16 - 11]; /* 11-15 */ + uint16_t entries_cpu; /* 16-17 */ + uint16_t offset_cpu; /* 18-19 */ + uint8_t _reserved2[24 - 20]; /* 20-23 */ + uint8_t loadparm[8]; /* 24-31 */ + uint8_t _reserved3[48 - 32]; /* 32-47 */ + uint64_t facilities; /* 48-55 */ + uint8_t _reserved0[76 - 56]; /* 56-75 */ + uint32_t ibc_val; + uint8_t conf_char[99 - 80]; /* 80-98 */ + uint8_t mha_pow; + uint32_t rnsize2; + uint64_t rnmax2; + uint8_t _reserved6[116 - 112]; /* 112-115 */ + uint8_t conf_char_ext[120 - 116]; /* 116-119 */ + uint16_t highest_cpu; + uint8_t _reserved5[124 - 122]; /* 122-123 */ + uint32_t hmfai; + struct CPUEntry entries[]; +} QEMU_PACKED ReadInfo; + +typedef struct ReadCpuInfo { + SCCBHeader h; + uint16_t nr_configured; /* 8-9 */ + uint16_t offset_configured; /* 10-11 */ + uint16_t nr_standby; /* 12-13 */ + uint16_t offset_standby; /* 14-15 */ + uint8_t reserved0[24-16]; /* 16-23 */ + struct CPUEntry entries[]; +} QEMU_PACKED ReadCpuInfo; + +typedef struct ReadStorageElementInfo { + SCCBHeader h; + uint16_t max_id; + uint16_t assigned; + uint16_t standby; + uint8_t _reserved0[16 - 14]; /* 14-15 */ + uint32_t entries[]; +} QEMU_PACKED ReadStorageElementInfo; + +typedef struct AttachStorageElement { + SCCBHeader h; + uint8_t _reserved0[10 - 8]; /* 8-9 */ + uint16_t assigned; + uint8_t _reserved1[16 - 12]; /* 12-15 */ + uint32_t entries[]; +} QEMU_PACKED AttachStorageElement; + +typedef struct AssignStorage { + SCCBHeader h; + uint16_t rn; +} QEMU_PACKED AssignStorage; + +typedef struct IoaCfgSccb { + SCCBHeader header; + uint8_t atype; + uint8_t reserved1; + uint16_t reserved2; + uint32_t aid; +} QEMU_PACKED IoaCfgSccb; + +typedef struct SCCB { + SCCBHeader h; + char data[SCCB_DATA_LEN]; + } QEMU_PACKED SCCB; + +#define TYPE_SCLP "sclp" +#define SCLP(obj) OBJECT_CHECK(SCLPDevice, (obj), TYPE_SCLP) +#define SCLP_CLASS(oc) OBJECT_CLASS_CHECK(SCLPDeviceClass, (oc), TYPE_SCLP) +#define SCLP_GET_CLASS(obj) OBJECT_GET_CLASS(SCLPDeviceClass, (obj), TYPE_SCLP) + +typedef struct SCLPEventFacility SCLPEventFacility; + +typedef struct SCLPDevice { + /* private */ + CPUState parent_obj; + SCLPEventFacility *event_facility; + int increment_size; + + /* public */ +} SCLPDevice; + +typedef struct SCLPDeviceClass { + /* private */ + DeviceClass parent_class; + void (*read_SCP_info)(SCLPDevice *sclp, SCCB *sccb); + void (*read_cpu_info)(SCLPDevice *sclp, SCCB *sccb); + + /* public */ + void (*execute)(SCLPDevice *sclp, SCCB *sccb, uint32_t code); + void (*service_interrupt)(SCLPDevice *sclp, uint32_t sccb); +} SCLPDeviceClass; + +static inline int sccb_data_len(SCCB *sccb) +{ + return be16_to_cpu(sccb->h.length) - sizeof(sccb->h); +} + + +void s390_sclp_init(void); +void sclp_service_interrupt(uint32_t sccb); +void raise_irq_cpu_hotplug(void); +int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code); + +#endif diff --git a/qemu/include/hw/s390x/storage-keys.h b/qemu/include/hw/s390x/storage-keys.h new file mode 100644 index 00000000..427b6774 --- /dev/null +++ b/qemu/include/hw/s390x/storage-keys.h @@ -0,0 +1,66 @@ +/* + * s390 storage key device + * + * Copyright 2015 IBM Corp. + * Author(s): Jason J. Herne + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef S390_STORAGE_KEYS_H +#define S390_STORAGE_KEYS_H + +/* +#define TYPE_S390_SKEYS "s390-skeys" +#define S390_SKEYS(obj) \ + OBJECT_CHECK(S390SKeysState, (obj), TYPE_S390_SKEYS) + +#define S390_CPU(obj) \ + OBJECT_CHECK(S390CPU, (obj), TYPE_S390_CPU) +#define S390_CPU(obj) ((S390CPU *)obj) +*/ + +typedef struct S390SKeysState { + CPUState parent_obj; +} S390SKeysState; + +/* +#define S390_SKEYS_CLASS(klass) \ + OBJECT_CLASS_CHECK(S390SKeysClass, (klass), TYPE_S390_SKEYS) +*/ +#define S390_SKEYS_CLASS(klass) ((S390SKeysClass *)klass) + +/* +#define S390_SKEYS_GET_CLASS(obj) \ + OBJECT_GET_CLASS(S390SKeysClass, (obj), TYPE_S390_SKEYS) +*/ +#define S390_SKEYS_GET_CLASS(obj) (&((S390CPU *)obj)->skey) + + +typedef struct S390SKeysClass { + CPUClass parent_class; + int (*skeys_enabled)(S390SKeysState *ks); + int (*get_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count, + uint8_t *keys); + int (*set_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count, + uint8_t *keys); +} S390SKeysClass; + +#define TYPE_KVM_S390_SKEYS "s390-skeys-kvm" +#define TYPE_QEMU_S390_SKEYS "s390-skeys-qemu" +#define QEMU_S390_SKEYS(obj) \ + OBJECT_CHECK(QEMUS390SKeysState, (obj), TYPE_QEMU_S390_SKEYS) + +typedef struct QEMUS390SKeysState { + S390SKeysState parent_obj; + uint8_t *keydata; + uint32_t key_count; +} QEMUS390SKeysState; + +void s390_skeys_init(void); + +S390SKeysState *s390_get_skeys_device(void); + +#endif /* S390_STORAGE_KEYS_H */ diff --git a/qemu/s390x.h b/qemu/s390x.h new file mode 100644 index 00000000..ffa6c59e --- /dev/null +++ b/qemu/s390x.h @@ -0,0 +1,1277 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_s390x_H +#define UNICORN_AUTOGEN_s390x_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _s390x +#endif +#define use_idiv_instructions use_idiv_instructions_s390x +#define arm_arch arm_arch_s390x +#define tb_target_set_jmp_target tb_target_set_jmp_target_s390x +#define have_bmi1 have_bmi1_s390x +#define have_popcnt have_popcnt_s390x +#define have_avx1 have_avx1_s390x +#define have_avx2 have_avx2_s390x +#define have_isa have_isa_s390x +#define have_altivec have_altivec_s390x +#define have_vsx have_vsx_s390x +#define flush_icache_range flush_icache_range_s390x +#define s390_facilities s390_facilities_s390x +#define tcg_dump_op tcg_dump_op_s390x +#define tcg_dump_ops tcg_dump_ops_s390x +#define tcg_gen_and_i64 tcg_gen_and_i64_s390x +#define tcg_gen_discard_i64 tcg_gen_discard_i64_s390x +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_s390x +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_s390x +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_s390x +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_s390x +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_s390x +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_s390x +#define tcg_gen_ld_i64 tcg_gen_ld_i64_s390x +#define tcg_gen_mov_i64 tcg_gen_mov_i64_s390x +#define tcg_gen_movi_i64 tcg_gen_movi_i64_s390x +#define tcg_gen_mul_i64 tcg_gen_mul_i64_s390x +#define tcg_gen_or_i64 tcg_gen_or_i64_s390x +#define tcg_gen_sar_i64 tcg_gen_sar_i64_s390x +#define tcg_gen_shl_i64 tcg_gen_shl_i64_s390x +#define tcg_gen_shr_i64 tcg_gen_shr_i64_s390x +#define tcg_gen_st_i64 tcg_gen_st_i64_s390x +#define tcg_gen_xor_i64 tcg_gen_xor_i64_s390x +#define cpu_icount_to_ns cpu_icount_to_ns_s390x +#define cpu_is_stopped cpu_is_stopped_s390x +#define cpu_get_ticks cpu_get_ticks_s390x +#define cpu_get_clock cpu_get_clock_s390x +#define cpu_resume cpu_resume_s390x +#define qemu_init_vcpu qemu_init_vcpu_s390x +#define cpu_stop_current cpu_stop_current_s390x +#define resume_all_vcpus resume_all_vcpus_s390x +#define vm_start vm_start_s390x +#define address_space_dispatch_compact address_space_dispatch_compact_s390x +#define flatview_translate flatview_translate_s390x +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_s390x +//#define qemu_get_cpu qemu_get_cpu_s390x +#define cpu_address_space_init cpu_address_space_init_s390x +#define cpu_get_address_space cpu_get_address_space_s390x +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_s390x +#define cpu_exec_initfn cpu_exec_initfn_s390x +#define cpu_exec_realizefn cpu_exec_realizefn_s390x +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_s390x +#define cpu_watchpoint_insert cpu_watchpoint_insert_s390x +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_s390x +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_s390x +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_s390x +#define cpu_breakpoint_insert cpu_breakpoint_insert_s390x +#define cpu_breakpoint_remove cpu_breakpoint_remove_s390x +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_s390x +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_s390x +#define cpu_abort cpu_abort_s390x +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_s390x +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_s390x +#define flatview_add_to_dispatch flatview_add_to_dispatch_s390x +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_s390x +#define qemu_ram_get_offset qemu_ram_get_offset_s390x +#define qemu_ram_get_used_length qemu_ram_get_used_length_s390x +#define qemu_ram_is_shared qemu_ram_is_shared_s390x +#define qemu_ram_pagesize qemu_ram_pagesize_s390x +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_s390x +#define qemu_ram_alloc qemu_ram_alloc_s390x +#define qemu_ram_free qemu_ram_free_s390x +#define qemu_map_ram_ptr qemu_map_ram_ptr_s390x +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_s390x +#define qemu_ram_block_from_host qemu_ram_block_from_host_s390x +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_s390x +#define cpu_check_watchpoint cpu_check_watchpoint_s390x +#define iotlb_to_section iotlb_to_section_s390x +#define address_space_dispatch_new address_space_dispatch_new_s390x +#define address_space_dispatch_free address_space_dispatch_free_s390x +#define flatview_read_continue flatview_read_continue_s390x +#define address_space_read_full address_space_read_full_s390x +#define address_space_write address_space_write_s390x +#define address_space_rw address_space_rw_s390x +#define cpu_physical_memory_rw cpu_physical_memory_rw_s390x +#define address_space_write_rom address_space_write_rom_s390x +#define cpu_flush_icache_range cpu_flush_icache_range_s390x +#define cpu_exec_init_all cpu_exec_init_all_s390x +#define address_space_access_valid address_space_access_valid_s390x +#define address_space_map address_space_map_s390x +#define address_space_unmap address_space_unmap_s390x +#define cpu_physical_memory_map cpu_physical_memory_map_s390x +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_s390x +#define cpu_memory_rw_debug cpu_memory_rw_debug_s390x +#define qemu_target_page_size qemu_target_page_size_s390x +#define qemu_target_page_bits qemu_target_page_bits_s390x +#define qemu_target_page_bits_min qemu_target_page_bits_min_s390x +#define target_words_bigendian target_words_bigendian_s390x +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_s390x +#define ram_block_discard_range ram_block_discard_range_s390x +#define ramblock_is_pmem ramblock_is_pmem_s390x +#define page_size_init page_size_init_s390x +#define set_preferred_target_page_bits set_preferred_target_page_bits_s390x +#define finalize_target_page_bits finalize_target_page_bits_s390x +#define cpu_outb cpu_outb_s390x +#define cpu_outw cpu_outw_s390x +#define cpu_outl cpu_outl_s390x +#define cpu_inb cpu_inb_s390x +#define cpu_inw cpu_inw_s390x +#define cpu_inl cpu_inl_s390x +#define memory_map memory_map_s390x +#define memory_map_io memory_map_io_s390x +#define memory_map_ptr memory_map_ptr_s390x +#define memory_unmap memory_unmap_s390x +#define memory_free memory_free_s390x +#define flatview_unref flatview_unref_s390x +#define address_space_get_flatview address_space_get_flatview_s390x +#define memory_region_transaction_begin memory_region_transaction_begin_s390x +#define memory_region_transaction_commit memory_region_transaction_commit_s390x +#define memory_region_init memory_region_init_s390x +#define memory_region_access_valid memory_region_access_valid_s390x +#define memory_region_dispatch_read memory_region_dispatch_read_s390x +#define memory_region_dispatch_write memory_region_dispatch_write_s390x +#define memory_region_init_io memory_region_init_io_s390x +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_s390x +#define memory_region_size memory_region_size_s390x +#define memory_region_set_readonly memory_region_set_readonly_s390x +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_s390x +#define memory_region_from_host memory_region_from_host_s390x +#define memory_region_get_ram_addr memory_region_get_ram_addr_s390x +#define memory_region_add_subregion memory_region_add_subregion_s390x +#define memory_region_del_subregion memory_region_del_subregion_s390x +#define memory_region_find memory_region_find_s390x +#define memory_listener_register memory_listener_register_s390x +#define memory_listener_unregister memory_listener_unregister_s390x +#define address_space_remove_listeners address_space_remove_listeners_s390x +#define address_space_init address_space_init_s390x +#define address_space_destroy address_space_destroy_s390x +#define memory_region_init_ram memory_region_init_ram_s390x +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_s390x +#define exec_inline_op exec_inline_op_s390x +#define floatx80_default_nan floatx80_default_nan_s390x +#define float_raise float_raise_s390x +#define float16_is_quiet_nan float16_is_quiet_nan_s390x +#define float16_is_signaling_nan float16_is_signaling_nan_s390x +#define float32_is_quiet_nan float32_is_quiet_nan_s390x +#define float32_is_signaling_nan float32_is_signaling_nan_s390x +#define float64_is_quiet_nan float64_is_quiet_nan_s390x +#define float64_is_signaling_nan float64_is_signaling_nan_s390x +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_s390x +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_s390x +#define floatx80_silence_nan floatx80_silence_nan_s390x +#define propagateFloatx80NaN propagateFloatx80NaN_s390x +#define float128_is_quiet_nan float128_is_quiet_nan_s390x +#define float128_is_signaling_nan float128_is_signaling_nan_s390x +#define float128_silence_nan float128_silence_nan_s390x +#define float16_add float16_add_s390x +#define float16_sub float16_sub_s390x +#define float32_add float32_add_s390x +#define float32_sub float32_sub_s390x +#define float64_add float64_add_s390x +#define float64_sub float64_sub_s390x +#define float16_mul float16_mul_s390x +#define float32_mul float32_mul_s390x +#define float64_mul float64_mul_s390x +#define float16_muladd float16_muladd_s390x +#define float32_muladd float32_muladd_s390x +#define float64_muladd float64_muladd_s390x +#define float16_div float16_div_s390x +#define float32_div float32_div_s390x +#define float64_div float64_div_s390x +#define float16_to_float32 float16_to_float32_s390x +#define float16_to_float64 float16_to_float64_s390x +#define float32_to_float16 float32_to_float16_s390x +#define float32_to_float64 float32_to_float64_s390x +#define float64_to_float16 float64_to_float16_s390x +#define float64_to_float32 float64_to_float32_s390x +#define float16_round_to_int float16_round_to_int_s390x +#define float32_round_to_int float32_round_to_int_s390x +#define float64_round_to_int float64_round_to_int_s390x +#define float16_to_int16_scalbn float16_to_int16_scalbn_s390x +#define float16_to_int32_scalbn float16_to_int32_scalbn_s390x +#define float16_to_int64_scalbn float16_to_int64_scalbn_s390x +#define float32_to_int16_scalbn float32_to_int16_scalbn_s390x +#define float32_to_int32_scalbn float32_to_int32_scalbn_s390x +#define float32_to_int64_scalbn float32_to_int64_scalbn_s390x +#define float64_to_int16_scalbn float64_to_int16_scalbn_s390x +#define float64_to_int32_scalbn float64_to_int32_scalbn_s390x +#define float64_to_int64_scalbn float64_to_int64_scalbn_s390x +#define float16_to_int16 float16_to_int16_s390x +#define float16_to_int32 float16_to_int32_s390x +#define float16_to_int64 float16_to_int64_s390x +#define float32_to_int16 float32_to_int16_s390x +#define float32_to_int32 float32_to_int32_s390x +#define float32_to_int64 float32_to_int64_s390x +#define float64_to_int16 float64_to_int16_s390x +#define float64_to_int32 float64_to_int32_s390x +#define float64_to_int64 float64_to_int64_s390x +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_s390x +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_s390x +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_s390x +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_s390x +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_s390x +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_s390x +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_s390x +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_s390x +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_s390x +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_s390x +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_s390x +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_s390x +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_s390x +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_s390x +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_s390x +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_s390x +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_s390x +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_s390x +#define float16_to_uint16 float16_to_uint16_s390x +#define float16_to_uint32 float16_to_uint32_s390x +#define float16_to_uint64 float16_to_uint64_s390x +#define float32_to_uint16 float32_to_uint16_s390x +#define float32_to_uint32 float32_to_uint32_s390x +#define float32_to_uint64 float32_to_uint64_s390x +#define float64_to_uint16 float64_to_uint16_s390x +#define float64_to_uint32 float64_to_uint32_s390x +#define float64_to_uint64 float64_to_uint64_s390x +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_s390x +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_s390x +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_s390x +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_s390x +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_s390x +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_s390x +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_s390x +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_s390x +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_s390x +#define int64_to_float16_scalbn int64_to_float16_scalbn_s390x +#define int32_to_float16_scalbn int32_to_float16_scalbn_s390x +#define int16_to_float16_scalbn int16_to_float16_scalbn_s390x +#define int64_to_float16 int64_to_float16_s390x +#define int32_to_float16 int32_to_float16_s390x +#define int16_to_float16 int16_to_float16_s390x +#define int64_to_float32_scalbn int64_to_float32_scalbn_s390x +#define int32_to_float32_scalbn int32_to_float32_scalbn_s390x +#define int16_to_float32_scalbn int16_to_float32_scalbn_s390x +#define int64_to_float32 int64_to_float32_s390x +#define int32_to_float32 int32_to_float32_s390x +#define int16_to_float32 int16_to_float32_s390x +#define int64_to_float64_scalbn int64_to_float64_scalbn_s390x +#define int32_to_float64_scalbn int32_to_float64_scalbn_s390x +#define int16_to_float64_scalbn int16_to_float64_scalbn_s390x +#define int64_to_float64 int64_to_float64_s390x +#define int32_to_float64 int32_to_float64_s390x +#define int16_to_float64 int16_to_float64_s390x +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_s390x +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_s390x +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_s390x +#define uint64_to_float16 uint64_to_float16_s390x +#define uint32_to_float16 uint32_to_float16_s390x +#define uint16_to_float16 uint16_to_float16_s390x +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_s390x +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_s390x +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_s390x +#define uint64_to_float32 uint64_to_float32_s390x +#define uint32_to_float32 uint32_to_float32_s390x +#define uint16_to_float32 uint16_to_float32_s390x +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_s390x +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_s390x +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_s390x +#define uint64_to_float64 uint64_to_float64_s390x +#define uint32_to_float64 uint32_to_float64_s390x +#define uint16_to_float64 uint16_to_float64_s390x +#define float16_min float16_min_s390x +#define float16_minnum float16_minnum_s390x +#define float16_minnummag float16_minnummag_s390x +#define float16_max float16_max_s390x +#define float16_maxnum float16_maxnum_s390x +#define float16_maxnummag float16_maxnummag_s390x +#define float32_min float32_min_s390x +#define float32_minnum float32_minnum_s390x +#define float32_minnummag float32_minnummag_s390x +#define float32_max float32_max_s390x +#define float32_maxnum float32_maxnum_s390x +#define float32_maxnummag float32_maxnummag_s390x +#define float64_min float64_min_s390x +#define float64_minnum float64_minnum_s390x +#define float64_minnummag float64_minnummag_s390x +#define float64_max float64_max_s390x +#define float64_maxnum float64_maxnum_s390x +#define float64_maxnummag float64_maxnummag_s390x +#define float16_compare float16_compare_s390x +#define float16_compare_quiet float16_compare_quiet_s390x +#define float32_compare float32_compare_s390x +#define float32_compare_quiet float32_compare_quiet_s390x +#define float64_compare float64_compare_s390x +#define float64_compare_quiet float64_compare_quiet_s390x +#define float16_scalbn float16_scalbn_s390x +#define float32_scalbn float32_scalbn_s390x +#define float64_scalbn float64_scalbn_s390x +#define float16_sqrt float16_sqrt_s390x +#define float32_sqrt float32_sqrt_s390x +#define float64_sqrt float64_sqrt_s390x +#define float16_default_nan float16_default_nan_s390x +#define float32_default_nan float32_default_nan_s390x +#define float64_default_nan float64_default_nan_s390x +#define float128_default_nan float128_default_nan_s390x +#define float16_silence_nan float16_silence_nan_s390x +#define float32_silence_nan float32_silence_nan_s390x +#define float64_silence_nan float64_silence_nan_s390x +#define float16_squash_input_denormal float16_squash_input_denormal_s390x +#define float32_squash_input_denormal float32_squash_input_denormal_s390x +#define float64_squash_input_denormal float64_squash_input_denormal_s390x +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_s390x +#define roundAndPackFloatx80 roundAndPackFloatx80_s390x +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_s390x +#define int32_to_floatx80 int32_to_floatx80_s390x +#define int32_to_float128 int32_to_float128_s390x +#define int64_to_floatx80 int64_to_floatx80_s390x +#define int64_to_float128 int64_to_float128_s390x +#define uint64_to_float128 uint64_to_float128_s390x +#define float32_to_floatx80 float32_to_floatx80_s390x +#define float32_to_float128 float32_to_float128_s390x +#define float32_rem float32_rem_s390x +#define float32_exp2 float32_exp2_s390x +#define float32_log2 float32_log2_s390x +#define float32_eq float32_eq_s390x +#define float32_le float32_le_s390x +#define float32_lt float32_lt_s390x +#define float32_unordered float32_unordered_s390x +#define float32_eq_quiet float32_eq_quiet_s390x +#define float32_le_quiet float32_le_quiet_s390x +#define float32_lt_quiet float32_lt_quiet_s390x +#define float32_unordered_quiet float32_unordered_quiet_s390x +#define float64_to_floatx80 float64_to_floatx80_s390x +#define float64_to_float128 float64_to_float128_s390x +#define float64_rem float64_rem_s390x +#define float64_log2 float64_log2_s390x +#define float64_eq float64_eq_s390x +#define float64_le float64_le_s390x +#define float64_lt float64_lt_s390x +#define float64_unordered float64_unordered_s390x +#define float64_eq_quiet float64_eq_quiet_s390x +#define float64_le_quiet float64_le_quiet_s390x +#define float64_lt_quiet float64_lt_quiet_s390x +#define float64_unordered_quiet float64_unordered_quiet_s390x +#define floatx80_to_int32 floatx80_to_int32_s390x +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_s390x +#define floatx80_to_int64 floatx80_to_int64_s390x +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_s390x +#define floatx80_to_float32 floatx80_to_float32_s390x +#define floatx80_to_float64 floatx80_to_float64_s390x +#define floatx80_to_float128 floatx80_to_float128_s390x +#define floatx80_round floatx80_round_s390x +#define floatx80_round_to_int floatx80_round_to_int_s390x +#define floatx80_add floatx80_add_s390x +#define floatx80_sub floatx80_sub_s390x +#define floatx80_mul floatx80_mul_s390x +#define floatx80_div floatx80_div_s390x +#define floatx80_rem floatx80_rem_s390x +#define floatx80_sqrt floatx80_sqrt_s390x +#define floatx80_eq floatx80_eq_s390x +#define floatx80_le floatx80_le_s390x +#define floatx80_lt floatx80_lt_s390x +#define floatx80_unordered floatx80_unordered_s390x +#define floatx80_eq_quiet floatx80_eq_quiet_s390x +#define floatx80_le_quiet floatx80_le_quiet_s390x +#define floatx80_lt_quiet floatx80_lt_quiet_s390x +#define floatx80_unordered_quiet floatx80_unordered_quiet_s390x +#define float128_to_int32 float128_to_int32_s390x +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_s390x +#define float128_to_int64 float128_to_int64_s390x +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_s390x +#define float128_to_uint64 float128_to_uint64_s390x +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_s390x +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_s390x +#define float128_to_uint32 float128_to_uint32_s390x +#define float128_to_float32 float128_to_float32_s390x +#define float128_to_float64 float128_to_float64_s390x +#define float128_to_floatx80 float128_to_floatx80_s390x +#define float128_round_to_int float128_round_to_int_s390x +#define float128_add float128_add_s390x +#define float128_sub float128_sub_s390x +#define float128_mul float128_mul_s390x +#define float128_div float128_div_s390x +#define float128_rem float128_rem_s390x +#define float128_sqrt float128_sqrt_s390x +#define float128_eq float128_eq_s390x +#define float128_le float128_le_s390x +#define float128_lt float128_lt_s390x +#define float128_unordered float128_unordered_s390x +#define float128_eq_quiet float128_eq_quiet_s390x +#define float128_le_quiet float128_le_quiet_s390x +#define float128_lt_quiet float128_lt_quiet_s390x +#define float128_unordered_quiet float128_unordered_quiet_s390x +#define floatx80_compare floatx80_compare_s390x +#define floatx80_compare_quiet floatx80_compare_quiet_s390x +#define float128_compare float128_compare_s390x +#define float128_compare_quiet float128_compare_quiet_s390x +#define floatx80_scalbn floatx80_scalbn_s390x +#define float128_scalbn float128_scalbn_s390x +#define softfloat_init softfloat_init_s390x +#define tcg_optimize tcg_optimize_s390x +#define gen_new_label gen_new_label_s390x +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_s390x +#define tcg_expand_vec_op tcg_expand_vec_op_s390x +#define tcg_register_jit tcg_register_jit_s390x +#define tcg_tb_insert tcg_tb_insert_s390x +#define tcg_tb_remove tcg_tb_remove_s390x +#define tcg_tb_lookup tcg_tb_lookup_s390x +#define tcg_tb_foreach tcg_tb_foreach_s390x +#define tcg_nb_tbs tcg_nb_tbs_s390x +#define tcg_region_reset_all tcg_region_reset_all_s390x +#define tcg_region_init tcg_region_init_s390x +#define tcg_code_size tcg_code_size_s390x +#define tcg_code_capacity tcg_code_capacity_s390x +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_s390x +#define tcg_malloc_internal tcg_malloc_internal_s390x +#define tcg_pool_reset tcg_pool_reset_s390x +#define tcg_context_init tcg_context_init_s390x +#define tcg_tb_alloc tcg_tb_alloc_s390x +#define tcg_prologue_init tcg_prologue_init_s390x +#define tcg_func_start tcg_func_start_s390x +#define tcg_set_frame tcg_set_frame_s390x +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_s390x +#define tcg_temp_new_internal tcg_temp_new_internal_s390x +#define tcg_temp_new_vec tcg_temp_new_vec_s390x +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_s390x +#define tcg_temp_free_internal tcg_temp_free_internal_s390x +#define tcg_const_i32 tcg_const_i32_s390x +#define tcg_const_i64 tcg_const_i64_s390x +#define tcg_const_local_i32 tcg_const_local_i32_s390x +#define tcg_const_local_i64 tcg_const_local_i64_s390x +#define tcg_op_supported tcg_op_supported_s390x +#define tcg_gen_callN tcg_gen_callN_s390x +#define tcg_op_remove tcg_op_remove_s390x +#define tcg_emit_op tcg_emit_op_s390x +#define tcg_op_insert_before tcg_op_insert_before_s390x +#define tcg_op_insert_after tcg_op_insert_after_s390x +#define tcg_cpu_exec_time tcg_cpu_exec_time_s390x +#define tcg_gen_code tcg_gen_code_s390x +#define tcg_gen_op1 tcg_gen_op1_s390x +#define tcg_gen_op2 tcg_gen_op2_s390x +#define tcg_gen_op3 tcg_gen_op3_s390x +#define tcg_gen_op4 tcg_gen_op4_s390x +#define tcg_gen_op5 tcg_gen_op5_s390x +#define tcg_gen_op6 tcg_gen_op6_s390x +#define tcg_gen_mb tcg_gen_mb_s390x +#define tcg_gen_addi_i32 tcg_gen_addi_i32_s390x +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_s390x +#define tcg_gen_subi_i32 tcg_gen_subi_i32_s390x +#define tcg_gen_andi_i32 tcg_gen_andi_i32_s390x +#define tcg_gen_ori_i32 tcg_gen_ori_i32_s390x +#define tcg_gen_xori_i32 tcg_gen_xori_i32_s390x +#define tcg_gen_shli_i32 tcg_gen_shli_i32_s390x +#define tcg_gen_shri_i32 tcg_gen_shri_i32_s390x +#define tcg_gen_sari_i32 tcg_gen_sari_i32_s390x +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_s390x +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_s390x +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_s390x +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_s390x +#define tcg_gen_muli_i32 tcg_gen_muli_i32_s390x +#define tcg_gen_div_i32 tcg_gen_div_i32_s390x +#define tcg_gen_rem_i32 tcg_gen_rem_i32_s390x +#define tcg_gen_divu_i32 tcg_gen_divu_i32_s390x +#define tcg_gen_remu_i32 tcg_gen_remu_i32_s390x +#define tcg_gen_andc_i32 tcg_gen_andc_i32_s390x +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_s390x +#define tcg_gen_nand_i32 tcg_gen_nand_i32_s390x +#define tcg_gen_nor_i32 tcg_gen_nor_i32_s390x +#define tcg_gen_orc_i32 tcg_gen_orc_i32_s390x +#define tcg_gen_clz_i32 tcg_gen_clz_i32_s390x +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_s390x +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_s390x +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_s390x +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_s390x +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_s390x +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_s390x +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_s390x +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_s390x +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_s390x +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_s390x +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_s390x +#define tcg_gen_extract_i32 tcg_gen_extract_i32_s390x +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_s390x +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_s390x +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_s390x +#define tcg_gen_add2_i32 tcg_gen_add2_i32_s390x +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_s390x +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_s390x +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_s390x +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_s390x +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_s390x +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_s390x +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_s390x +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_s390x +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_s390x +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_s390x +#define tcg_gen_smin_i32 tcg_gen_smin_i32_s390x +#define tcg_gen_umin_i32 tcg_gen_umin_i32_s390x +#define tcg_gen_smax_i32 tcg_gen_smax_i32_s390x +#define tcg_gen_umax_i32 tcg_gen_umax_i32_s390x +#define tcg_gen_abs_i32 tcg_gen_abs_i32_s390x +#define tcg_gen_addi_i64 tcg_gen_addi_i64_s390x +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_s390x +#define tcg_gen_subi_i64 tcg_gen_subi_i64_s390x +#define tcg_gen_andi_i64 tcg_gen_andi_i64_s390x +#define tcg_gen_ori_i64 tcg_gen_ori_i64_s390x +#define tcg_gen_xori_i64 tcg_gen_xori_i64_s390x +#define tcg_gen_shli_i64 tcg_gen_shli_i64_s390x +#define tcg_gen_shri_i64 tcg_gen_shri_i64_s390x +#define tcg_gen_sari_i64 tcg_gen_sari_i64_s390x +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_s390x +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_s390x +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_s390x +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_s390x +#define tcg_gen_muli_i64 tcg_gen_muli_i64_s390x +#define tcg_gen_div_i64 tcg_gen_div_i64_s390x +#define tcg_gen_rem_i64 tcg_gen_rem_i64_s390x +#define tcg_gen_divu_i64 tcg_gen_divu_i64_s390x +#define tcg_gen_remu_i64 tcg_gen_remu_i64_s390x +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_s390x +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_s390x +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_s390x +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_s390x +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_s390x +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_s390x +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_s390x +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_s390x +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_s390x +#define tcg_gen_not_i64 tcg_gen_not_i64_s390x +#define tcg_gen_andc_i64 tcg_gen_andc_i64_s390x +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_s390x +#define tcg_gen_nand_i64 tcg_gen_nand_i64_s390x +#define tcg_gen_nor_i64 tcg_gen_nor_i64_s390x +#define tcg_gen_orc_i64 tcg_gen_orc_i64_s390x +#define tcg_gen_clz_i64 tcg_gen_clz_i64_s390x +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_s390x +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_s390x +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_s390x +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_s390x +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_s390x +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_s390x +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_s390x +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_s390x +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_s390x +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_s390x +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_s390x +#define tcg_gen_extract_i64 tcg_gen_extract_i64_s390x +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_s390x +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_s390x +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_s390x +#define tcg_gen_add2_i64 tcg_gen_add2_i64_s390x +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_s390x +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_s390x +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_s390x +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_s390x +#define tcg_gen_smin_i64 tcg_gen_smin_i64_s390x +#define tcg_gen_umin_i64 tcg_gen_umin_i64_s390x +#define tcg_gen_smax_i64 tcg_gen_smax_i64_s390x +#define tcg_gen_umax_i64 tcg_gen_umax_i64_s390x +#define tcg_gen_abs_i64 tcg_gen_abs_i64_s390x +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_s390x +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_s390x +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_s390x +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_s390x +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_s390x +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_s390x +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_s390x +#define tcg_gen_exit_tb tcg_gen_exit_tb_s390x +#define tcg_gen_goto_tb tcg_gen_goto_tb_s390x +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_s390x +#define check_exit_request check_exit_request_s390x +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_s390x +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_s390x +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_s390x +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_s390x +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_s390x +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_s390x +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_s390x +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_s390x +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_s390x +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_s390x +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_s390x +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_s390x +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_s390x +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_s390x +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_s390x +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_s390x +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_s390x +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_s390x +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_s390x +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_s390x +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_s390x +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_s390x +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_s390x +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_s390x +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_s390x +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_s390x +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_s390x +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_s390x +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_s390x +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_s390x +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_s390x +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_s390x +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_s390x +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_s390x +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_s390x +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_s390x +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_s390x +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_s390x +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_s390x +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_s390x +#define simd_desc simd_desc_s390x +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_s390x +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_s390x +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_s390x +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_s390x +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_s390x +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_s390x +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_s390x +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_s390x +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_s390x +#define tcg_gen_gvec_2 tcg_gen_gvec_2_s390x +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_s390x +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_s390x +#define tcg_gen_gvec_3 tcg_gen_gvec_3_s390x +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_s390x +#define tcg_gen_gvec_4 tcg_gen_gvec_4_s390x +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_s390x +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_s390x +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_s390x +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_s390x +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_s390x +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_s390x +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_s390x +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_s390x +#define tcg_gen_gvec_not tcg_gen_gvec_not_s390x +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_s390x +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_s390x +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_s390x +#define tcg_gen_gvec_add tcg_gen_gvec_add_s390x +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_s390x +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_s390x +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_s390x +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_s390x +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_s390x +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_s390x +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_s390x +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_s390x +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_s390x +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_s390x +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_s390x +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_s390x +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_s390x +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_s390x +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_s390x +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_s390x +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_s390x +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_s390x +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_s390x +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_s390x +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_s390x +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_s390x +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_s390x +#define tcg_gen_gvec_and tcg_gen_gvec_and_s390x +#define tcg_gen_gvec_or tcg_gen_gvec_or_s390x +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_s390x +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_s390x +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_s390x +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_s390x +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_s390x +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_s390x +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_s390x +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_s390x +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_s390x +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_s390x +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_s390x +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_s390x +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_s390x +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_s390x +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_s390x +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_s390x +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_s390x +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_s390x +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_s390x +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_s390x +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_s390x +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_s390x +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_s390x +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_s390x +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_s390x +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_s390x +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_s390x +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_s390x +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_s390x +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_s390x +#define vec_gen_2 vec_gen_2_s390x +#define vec_gen_3 vec_gen_3_s390x +#define vec_gen_4 vec_gen_4_s390x +#define tcg_gen_mov_vec tcg_gen_mov_vec_s390x +#define tcg_const_zeros_vec tcg_const_zeros_vec_s390x +#define tcg_const_ones_vec tcg_const_ones_vec_s390x +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_s390x +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_s390x +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_s390x +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_s390x +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_s390x +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_s390x +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_s390x +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_s390x +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_s390x +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_s390x +#define tcg_gen_ld_vec tcg_gen_ld_vec_s390x +#define tcg_gen_st_vec tcg_gen_st_vec_s390x +#define tcg_gen_stl_vec tcg_gen_stl_vec_s390x +#define tcg_gen_and_vec tcg_gen_and_vec_s390x +#define tcg_gen_or_vec tcg_gen_or_vec_s390x +#define tcg_gen_xor_vec tcg_gen_xor_vec_s390x +#define tcg_gen_andc_vec tcg_gen_andc_vec_s390x +#define tcg_gen_orc_vec tcg_gen_orc_vec_s390x +#define tcg_gen_nand_vec tcg_gen_nand_vec_s390x +#define tcg_gen_nor_vec tcg_gen_nor_vec_s390x +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_s390x +#define tcg_gen_not_vec tcg_gen_not_vec_s390x +#define tcg_gen_neg_vec tcg_gen_neg_vec_s390x +#define tcg_gen_abs_vec tcg_gen_abs_vec_s390x +#define tcg_gen_shli_vec tcg_gen_shli_vec_s390x +#define tcg_gen_shri_vec tcg_gen_shri_vec_s390x +#define tcg_gen_sari_vec tcg_gen_sari_vec_s390x +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_s390x +#define tcg_gen_add_vec tcg_gen_add_vec_s390x +#define tcg_gen_sub_vec tcg_gen_sub_vec_s390x +#define tcg_gen_mul_vec tcg_gen_mul_vec_s390x +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_s390x +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_s390x +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_s390x +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_s390x +#define tcg_gen_smin_vec tcg_gen_smin_vec_s390x +#define tcg_gen_umin_vec tcg_gen_umin_vec_s390x +#define tcg_gen_smax_vec tcg_gen_smax_vec_s390x +#define tcg_gen_umax_vec tcg_gen_umax_vec_s390x +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_s390x +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_s390x +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_s390x +#define tcg_gen_shls_vec tcg_gen_shls_vec_s390x +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_s390x +#define tcg_gen_sars_vec tcg_gen_sars_vec_s390x +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_s390x +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_s390x +#define tb_htable_lookup tb_htable_lookup_s390x +#define tb_set_jmp_target tb_set_jmp_target_s390x +#define cpu_exec cpu_exec_s390x +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_s390x +#define cpu_reloading_memory_map cpu_reloading_memory_map_s390x +#define cpu_loop_exit cpu_loop_exit_s390x +#define cpu_loop_exit_restore cpu_loop_exit_restore_s390x +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_s390x +#define tlb_init tlb_init_s390x +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_s390x +#define tlb_flush tlb_flush_s390x +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_s390x +#define tlb_flush_all_cpus tlb_flush_all_cpus_s390x +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_s390x +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_s390x +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_s390x +#define tlb_flush_page tlb_flush_page_s390x +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_s390x +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_s390x +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_s390x +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_s390x +#define tlb_protect_code tlb_protect_code_s390x +#define tlb_unprotect_code tlb_unprotect_code_s390x +#define tlb_reset_dirty tlb_reset_dirty_s390x +#define tlb_set_dirty tlb_set_dirty_s390x +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_s390x +#define tlb_set_page tlb_set_page_s390x +#define get_page_addr_code_hostp get_page_addr_code_hostp_s390x +#define get_page_addr_code get_page_addr_code_s390x +#define probe_access probe_access_s390x +#define tlb_vaddr_to_host tlb_vaddr_to_host_s390x +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_s390x +#define helper_le_lduw_mmu helper_le_lduw_mmu_s390x +#define helper_be_lduw_mmu helper_be_lduw_mmu_s390x +#define helper_le_ldul_mmu helper_le_ldul_mmu_s390x +#define helper_be_ldul_mmu helper_be_ldul_mmu_s390x +#define helper_le_ldq_mmu helper_le_ldq_mmu_s390x +#define helper_be_ldq_mmu helper_be_ldq_mmu_s390x +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_s390x +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_s390x +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_s390x +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_s390x +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_s390x +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_s390x +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_s390x +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_s390x +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_s390x +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_s390x +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_s390x +#define cpu_ldub_data_ra cpu_ldub_data_ra_s390x +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_s390x +#define cpu_lduw_data_ra cpu_lduw_data_ra_s390x +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_s390x +#define cpu_ldl_data_ra cpu_ldl_data_ra_s390x +#define cpu_ldq_data_ra cpu_ldq_data_ra_s390x +#define cpu_ldub_data cpu_ldub_data_s390x +#define cpu_ldsb_data cpu_ldsb_data_s390x +#define cpu_lduw_data cpu_lduw_data_s390x +#define cpu_ldsw_data cpu_ldsw_data_s390x +#define cpu_ldl_data cpu_ldl_data_s390x +#define cpu_ldq_data cpu_ldq_data_s390x +#define helper_ret_stb_mmu helper_ret_stb_mmu_s390x +#define helper_le_stw_mmu helper_le_stw_mmu_s390x +#define helper_be_stw_mmu helper_be_stw_mmu_s390x +#define helper_le_stl_mmu helper_le_stl_mmu_s390x +#define helper_be_stl_mmu helper_be_stl_mmu_s390x +#define helper_le_stq_mmu helper_le_stq_mmu_s390x +#define helper_be_stq_mmu helper_be_stq_mmu_s390x +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_s390x +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_s390x +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_s390x +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_s390x +#define cpu_stb_data_ra cpu_stb_data_ra_s390x +#define cpu_stw_data_ra cpu_stw_data_ra_s390x +#define cpu_stl_data_ra cpu_stl_data_ra_s390x +#define cpu_stq_data_ra cpu_stq_data_ra_s390x +#define cpu_stb_data cpu_stb_data_s390x +#define cpu_stw_data cpu_stw_data_s390x +#define cpu_stl_data cpu_stl_data_s390x +#define cpu_stq_data cpu_stq_data_s390x +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_s390x +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_s390x +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_s390x +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_s390x +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_s390x +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_s390x +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_s390x +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_s390x +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_s390x +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_s390x +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_s390x +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_s390x +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_s390x +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_s390x +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_s390x +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_s390x +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_s390x +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_s390x +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_s390x +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_s390x +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_s390x +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_s390x +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_s390x +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_s390x +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_s390x +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_s390x +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_s390x +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_s390x +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_s390x +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_s390x +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_s390x +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_s390x +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_s390x +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_s390x +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_s390x +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_s390x +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_s390x +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_s390x +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_s390x +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_s390x +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_s390x +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_s390x +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_s390x +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_s390x +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_s390x +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_s390x +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_s390x +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_s390x +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_s390x +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_s390x +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_s390x +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_s390x +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_s390x +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_s390x +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_s390x +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_s390x +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_s390x +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_s390x +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_s390x +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_s390x +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_s390x +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_s390x +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_s390x +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_s390x +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_s390x +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_s390x +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_s390x +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_s390x +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_s390x +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_s390x +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_s390x +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_s390x +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_s390x +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_s390x +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_s390x +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_s390x +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_s390x +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_s390x +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_s390x +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_s390x +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_s390x +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_s390x +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_s390x +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_s390x +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_s390x +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_s390x +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_s390x +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_s390x +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_s390x +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_s390x +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_s390x +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_s390x +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_s390x +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_s390x +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_s390x +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_s390x +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_s390x +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_s390x +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_s390x +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_s390x +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_s390x +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_s390x +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_s390x +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_s390x +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_s390x +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_s390x +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_s390x +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_s390x +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_s390x +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_s390x +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_s390x +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_s390x +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_s390x +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_s390x +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_s390x +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_s390x +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_s390x +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_s390x +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_s390x +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_s390x +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_s390x +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_s390x +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_s390x +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_s390x +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_s390x +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_s390x +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_s390x +#define helper_atomic_xchgb helper_atomic_xchgb_s390x +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_s390x +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_s390x +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_s390x +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_s390x +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_s390x +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_s390x +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_s390x +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_s390x +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_s390x +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_s390x +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_s390x +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_s390x +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_s390x +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_s390x +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_s390x +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_s390x +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_s390x +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_s390x +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_s390x +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_s390x +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_s390x +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_s390x +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_s390x +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_s390x +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_s390x +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_s390x +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_s390x +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_s390x +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_s390x +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_s390x +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_s390x +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_s390x +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_s390x +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_s390x +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_s390x +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_s390x +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_s390x +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_s390x +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_s390x +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_s390x +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_s390x +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_s390x +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_s390x +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_s390x +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_s390x +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_s390x +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_s390x +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_s390x +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_s390x +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_s390x +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_s390x +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_s390x +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_s390x +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_s390x +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_s390x +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_s390x +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_s390x +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_s390x +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_s390x +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_s390x +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_s390x +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_s390x +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_s390x +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_s390x +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_s390x +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_s390x +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_s390x +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_s390x +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_s390x +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_s390x +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_s390x +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_s390x +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_s390x +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_s390x +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_s390x +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_s390x +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_s390x +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_s390x +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_s390x +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_s390x +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_s390x +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_s390x +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_s390x +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_s390x +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_s390x +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_s390x +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_s390x +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_s390x +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_s390x +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_s390x +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_s390x +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_s390x +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_s390x +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_s390x +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_s390x +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_s390x +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_s390x +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_s390x +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_s390x +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_s390x +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_s390x +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_s390x +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_s390x +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_s390x +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_s390x +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_s390x +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_s390x +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_s390x +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_s390x +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_s390x +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_s390x +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_s390x +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_s390x +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_s390x +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_s390x +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_s390x +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_s390x +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_s390x +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_s390x +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_s390x +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_s390x +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_s390x +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_s390x +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_s390x +#define cpu_ldub_code cpu_ldub_code_s390x +#define cpu_lduw_code cpu_lduw_code_s390x +#define cpu_ldl_code cpu_ldl_code_s390x +#define cpu_ldq_code cpu_ldq_code_s390x +#define helper_div_i32 helper_div_i32_s390x +#define helper_rem_i32 helper_rem_i32_s390x +#define helper_divu_i32 helper_divu_i32_s390x +#define helper_remu_i32 helper_remu_i32_s390x +#define helper_shl_i64 helper_shl_i64_s390x +#define helper_shr_i64 helper_shr_i64_s390x +#define helper_sar_i64 helper_sar_i64_s390x +#define helper_div_i64 helper_div_i64_s390x +#define helper_rem_i64 helper_rem_i64_s390x +#define helper_divu_i64 helper_divu_i64_s390x +#define helper_remu_i64 helper_remu_i64_s390x +#define helper_muluh_i64 helper_muluh_i64_s390x +#define helper_mulsh_i64 helper_mulsh_i64_s390x +#define helper_clz_i32 helper_clz_i32_s390x +#define helper_ctz_i32 helper_ctz_i32_s390x +#define helper_clz_i64 helper_clz_i64_s390x +#define helper_ctz_i64 helper_ctz_i64_s390x +#define helper_clrsb_i32 helper_clrsb_i32_s390x +#define helper_clrsb_i64 helper_clrsb_i64_s390x +#define helper_ctpop_i32 helper_ctpop_i32_s390x +#define helper_ctpop_i64 helper_ctpop_i64_s390x +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_s390x +#define helper_exit_atomic helper_exit_atomic_s390x +#define helper_gvec_add8 helper_gvec_add8_s390x +#define helper_gvec_add16 helper_gvec_add16_s390x +#define helper_gvec_add32 helper_gvec_add32_s390x +#define helper_gvec_add64 helper_gvec_add64_s390x +#define helper_gvec_adds8 helper_gvec_adds8_s390x +#define helper_gvec_adds16 helper_gvec_adds16_s390x +#define helper_gvec_adds32 helper_gvec_adds32_s390x +#define helper_gvec_adds64 helper_gvec_adds64_s390x +#define helper_gvec_sub8 helper_gvec_sub8_s390x +#define helper_gvec_sub16 helper_gvec_sub16_s390x +#define helper_gvec_sub32 helper_gvec_sub32_s390x +#define helper_gvec_sub64 helper_gvec_sub64_s390x +#define helper_gvec_subs8 helper_gvec_subs8_s390x +#define helper_gvec_subs16 helper_gvec_subs16_s390x +#define helper_gvec_subs32 helper_gvec_subs32_s390x +#define helper_gvec_subs64 helper_gvec_subs64_s390x +#define helper_gvec_mul8 helper_gvec_mul8_s390x +#define helper_gvec_mul16 helper_gvec_mul16_s390x +#define helper_gvec_mul32 helper_gvec_mul32_s390x +#define helper_gvec_mul64 helper_gvec_mul64_s390x +#define helper_gvec_muls8 helper_gvec_muls8_s390x +#define helper_gvec_muls16 helper_gvec_muls16_s390x +#define helper_gvec_muls32 helper_gvec_muls32_s390x +#define helper_gvec_muls64 helper_gvec_muls64_s390x +#define helper_gvec_neg8 helper_gvec_neg8_s390x +#define helper_gvec_neg16 helper_gvec_neg16_s390x +#define helper_gvec_neg32 helper_gvec_neg32_s390x +#define helper_gvec_neg64 helper_gvec_neg64_s390x +#define helper_gvec_abs8 helper_gvec_abs8_s390x +#define helper_gvec_abs16 helper_gvec_abs16_s390x +#define helper_gvec_abs32 helper_gvec_abs32_s390x +#define helper_gvec_abs64 helper_gvec_abs64_s390x +#define helper_gvec_mov helper_gvec_mov_s390x +#define helper_gvec_dup64 helper_gvec_dup64_s390x +#define helper_gvec_dup32 helper_gvec_dup32_s390x +#define helper_gvec_dup16 helper_gvec_dup16_s390x +#define helper_gvec_dup8 helper_gvec_dup8_s390x +#define helper_gvec_not helper_gvec_not_s390x +#define helper_gvec_and helper_gvec_and_s390x +#define helper_gvec_or helper_gvec_or_s390x +#define helper_gvec_xor helper_gvec_xor_s390x +#define helper_gvec_andc helper_gvec_andc_s390x +#define helper_gvec_orc helper_gvec_orc_s390x +#define helper_gvec_nand helper_gvec_nand_s390x +#define helper_gvec_nor helper_gvec_nor_s390x +#define helper_gvec_eqv helper_gvec_eqv_s390x +#define helper_gvec_ands helper_gvec_ands_s390x +#define helper_gvec_xors helper_gvec_xors_s390x +#define helper_gvec_ors helper_gvec_ors_s390x +#define helper_gvec_shl8i helper_gvec_shl8i_s390x +#define helper_gvec_shl16i helper_gvec_shl16i_s390x +#define helper_gvec_shl32i helper_gvec_shl32i_s390x +#define helper_gvec_shl64i helper_gvec_shl64i_s390x +#define helper_gvec_shr8i helper_gvec_shr8i_s390x +#define helper_gvec_shr16i helper_gvec_shr16i_s390x +#define helper_gvec_shr32i helper_gvec_shr32i_s390x +#define helper_gvec_shr64i helper_gvec_shr64i_s390x +#define helper_gvec_sar8i helper_gvec_sar8i_s390x +#define helper_gvec_sar16i helper_gvec_sar16i_s390x +#define helper_gvec_sar32i helper_gvec_sar32i_s390x +#define helper_gvec_sar64i helper_gvec_sar64i_s390x +#define helper_gvec_shl8v helper_gvec_shl8v_s390x +#define helper_gvec_shl16v helper_gvec_shl16v_s390x +#define helper_gvec_shl32v helper_gvec_shl32v_s390x +#define helper_gvec_shl64v helper_gvec_shl64v_s390x +#define helper_gvec_shr8v helper_gvec_shr8v_s390x +#define helper_gvec_shr16v helper_gvec_shr16v_s390x +#define helper_gvec_shr32v helper_gvec_shr32v_s390x +#define helper_gvec_shr64v helper_gvec_shr64v_s390x +#define helper_gvec_sar8v helper_gvec_sar8v_s390x +#define helper_gvec_sar16v helper_gvec_sar16v_s390x +#define helper_gvec_sar32v helper_gvec_sar32v_s390x +#define helper_gvec_sar64v helper_gvec_sar64v_s390x +#define helper_gvec_eq8 helper_gvec_eq8_s390x +#define helper_gvec_ne8 helper_gvec_ne8_s390x +#define helper_gvec_lt8 helper_gvec_lt8_s390x +#define helper_gvec_le8 helper_gvec_le8_s390x +#define helper_gvec_ltu8 helper_gvec_ltu8_s390x +#define helper_gvec_leu8 helper_gvec_leu8_s390x +#define helper_gvec_eq16 helper_gvec_eq16_s390x +#define helper_gvec_ne16 helper_gvec_ne16_s390x +#define helper_gvec_lt16 helper_gvec_lt16_s390x +#define helper_gvec_le16 helper_gvec_le16_s390x +#define helper_gvec_ltu16 helper_gvec_ltu16_s390x +#define helper_gvec_leu16 helper_gvec_leu16_s390x +#define helper_gvec_eq32 helper_gvec_eq32_s390x +#define helper_gvec_ne32 helper_gvec_ne32_s390x +#define helper_gvec_lt32 helper_gvec_lt32_s390x +#define helper_gvec_le32 helper_gvec_le32_s390x +#define helper_gvec_ltu32 helper_gvec_ltu32_s390x +#define helper_gvec_leu32 helper_gvec_leu32_s390x +#define helper_gvec_eq64 helper_gvec_eq64_s390x +#define helper_gvec_ne64 helper_gvec_ne64_s390x +#define helper_gvec_lt64 helper_gvec_lt64_s390x +#define helper_gvec_le64 helper_gvec_le64_s390x +#define helper_gvec_ltu64 helper_gvec_ltu64_s390x +#define helper_gvec_leu64 helper_gvec_leu64_s390x +#define helper_gvec_ssadd8 helper_gvec_ssadd8_s390x +#define helper_gvec_ssadd16 helper_gvec_ssadd16_s390x +#define helper_gvec_ssadd32 helper_gvec_ssadd32_s390x +#define helper_gvec_ssadd64 helper_gvec_ssadd64_s390x +#define helper_gvec_sssub8 helper_gvec_sssub8_s390x +#define helper_gvec_sssub16 helper_gvec_sssub16_s390x +#define helper_gvec_sssub32 helper_gvec_sssub32_s390x +#define helper_gvec_sssub64 helper_gvec_sssub64_s390x +#define helper_gvec_usadd8 helper_gvec_usadd8_s390x +#define helper_gvec_usadd16 helper_gvec_usadd16_s390x +#define helper_gvec_usadd32 helper_gvec_usadd32_s390x +#define helper_gvec_usadd64 helper_gvec_usadd64_s390x +#define helper_gvec_ussub8 helper_gvec_ussub8_s390x +#define helper_gvec_ussub16 helper_gvec_ussub16_s390x +#define helper_gvec_ussub32 helper_gvec_ussub32_s390x +#define helper_gvec_ussub64 helper_gvec_ussub64_s390x +#define helper_gvec_smin8 helper_gvec_smin8_s390x +#define helper_gvec_smin16 helper_gvec_smin16_s390x +#define helper_gvec_smin32 helper_gvec_smin32_s390x +#define helper_gvec_smin64 helper_gvec_smin64_s390x +#define helper_gvec_smax8 helper_gvec_smax8_s390x +#define helper_gvec_smax16 helper_gvec_smax16_s390x +#define helper_gvec_smax32 helper_gvec_smax32_s390x +#define helper_gvec_smax64 helper_gvec_smax64_s390x +#define helper_gvec_umin8 helper_gvec_umin8_s390x +#define helper_gvec_umin16 helper_gvec_umin16_s390x +#define helper_gvec_umin32 helper_gvec_umin32_s390x +#define helper_gvec_umin64 helper_gvec_umin64_s390x +#define helper_gvec_umax8 helper_gvec_umax8_s390x +#define helper_gvec_umax16 helper_gvec_umax16_s390x +#define helper_gvec_umax32 helper_gvec_umax32_s390x +#define helper_gvec_umax64 helper_gvec_umax64_s390x +#define helper_gvec_bitsel helper_gvec_bitsel_s390x +#define cpu_restore_state cpu_restore_state_s390x +#define page_collection_lock page_collection_lock_s390x +#define page_collection_unlock page_collection_unlock_s390x +#define free_code_gen_buffer free_code_gen_buffer_s390x +#define tcg_exec_init tcg_exec_init_s390x +#define tb_cleanup tb_cleanup_s390x +#define tb_flush tb_flush_s390x +#define tb_phys_invalidate tb_phys_invalidate_s390x +#define tb_gen_code tb_gen_code_s390x +#define tb_exec_lock tb_exec_lock_s390x +#define tb_exec_unlock tb_exec_unlock_s390x +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_s390x +#define tb_invalidate_phys_range tb_invalidate_phys_range_s390x +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_s390x +#define tb_check_watchpoint tb_check_watchpoint_s390x +#define cpu_io_recompile cpu_io_recompile_s390x +#define tb_flush_jmp_cache tb_flush_jmp_cache_s390x +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_s390x +#define translator_loop_temp_check translator_loop_temp_check_s390x +#define translator_loop translator_loop_s390x +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_s390x +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_s390x +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_s390x +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_s390x +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_s390x +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_s390x +#define unassigned_mem_ops unassigned_mem_ops_s390x +#define floatx80_infinity floatx80_infinity_s390x +#define dup_const_func dup_const_func_s390x +#define gen_helper_raise_exception gen_helper_raise_exception_s390x +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_s390x +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_s390x +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_s390x +#define gen_helper_cpsr_read gen_helper_cpsr_read_s390x +#define gen_helper_cpsr_write gen_helper_cpsr_write_s390x +#endif diff --git a/qemu/target/s390x/cc_helper.c b/qemu/target/s390x/cc_helper.c new file mode 100644 index 00000000..44731e4a --- /dev/null +++ b/qemu/target/s390x/cc_helper.c @@ -0,0 +1,595 @@ +/* + * S/390 condition code helper routines + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2009 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "tcg_s390x.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" + +/* #define DEBUG_HELPER */ +#ifdef DEBUG_HELPER +#define HELPER_LOG(x...) qemu_log(x) +#else +#define HELPER_LOG(x...) +#endif + +static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst) +{ + if (src == dst) { + return 0; + } else if (src < dst) { + return 1; + } else { + return 2; + } +} + +static uint32_t cc_calc_ltgt0_32(int32_t dst) +{ + return cc_calc_ltgt_32(dst, 0); +} + +static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst) +{ + if (src == dst) { + return 0; + } else if (src < dst) { + return 1; + } else { + return 2; + } +} + +static uint32_t cc_calc_ltgt0_64(int64_t dst) +{ + return cc_calc_ltgt_64(dst, 0); +} + +static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst) +{ + if (src == dst) { + return 0; + } else if (src < dst) { + return 1; + } else { + return 2; + } +} + +static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst) +{ + if (src == dst) { + return 0; + } else if (src < dst) { + return 1; + } else { + return 2; + } +} + +static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask) +{ + uint32_t r = val & mask; + + if (r == 0) { + return 0; + } else if (r == mask) { + return 3; + } else { + return 1; + } +} + +static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask) +{ + uint64_t r = val & mask; + + if (r == 0) { + return 0; + } else if (r == mask) { + return 3; + } else { + int top = clz64(mask); + if ((int64_t)(val << top) < 0) { + return 2; + } else { + return 1; + } + } +} + +static uint32_t cc_calc_nz(uint64_t dst) +{ + return !!dst; +} + +static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar) +{ + if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) { + return 1; + } else if (ar > 0) { + return 2; + } else { + return 0; + } + } +} + +static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar) +{ + return (ar != 0) + 2 * (ar < a1); +} + +static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar) +{ + /* Recover a2 + carry_in. */ + uint64_t a2c = ar - a1; + /* Check for a2+carry_in overflow, then a1+a2c overflow. */ + int carry_out = (a2c < a2) || (ar < a1); + + return (ar != 0) + 2 * carry_out; +} + +static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar) +{ + if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) { + return 1; + } else if (ar > 0) { + return 2; + } else { + return 0; + } + } +} + +static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar) +{ + if (ar == 0) { + return 2; + } else { + if (a2 > a1) { + return 1; + } else { + return 3; + } + } +} + +static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar) +{ + int borrow_out; + + if (ar != a1 - a2) { /* difference means borrow-in */ + borrow_out = (a2 >= a1); + } else { + borrow_out = (a2 > a1); + } + + return (ar != 0) + 2 * !borrow_out; +} + +static uint32_t cc_calc_abs_64(int64_t dst) +{ + if ((uint64_t)dst == 0x8000000000000000ULL) { + return 3; + } else if (dst) { + return 2; + } else { + return 0; + } +} + +static uint32_t cc_calc_nabs_64(int64_t dst) +{ + return !!dst; +} + +static uint32_t cc_calc_comp_64(int64_t dst) +{ + if ((uint64_t)dst == 0x8000000000000000ULL) { + return 3; + } else if (dst < 0) { + return 1; + } else if (dst > 0) { + return 2; + } else { + return 0; + } +} + + +static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar) +{ + if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) { + return 1; + } else if (ar > 0) { + return 2; + } else { + return 0; + } + } +} + +static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar) +{ + return (ar != 0) + 2 * (ar < a1); +} + +static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar) +{ + /* Recover a2 + carry_in. */ + uint32_t a2c = ar - a1; + /* Check for a2+carry_in overflow, then a1+a2c overflow. */ + int carry_out = (a2c < a2) || (ar < a1); + + return (ar != 0) + 2 * carry_out; +} + +static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar) +{ + if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { + return 3; /* overflow */ + } else { + if (ar < 0) { + return 1; + } else if (ar > 0) { + return 2; + } else { + return 0; + } + } +} + +static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar) +{ + if (ar == 0) { + return 2; + } else { + if (a2 > a1) { + return 1; + } else { + return 3; + } + } +} + +static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar) +{ + int borrow_out; + + if (ar != a1 - a2) { /* difference means borrow-in */ + borrow_out = (a2 >= a1); + } else { + borrow_out = (a2 > a1); + } + + return (ar != 0) + 2 * !borrow_out; +} + +static uint32_t cc_calc_abs_32(int32_t dst) +{ + if ((uint32_t)dst == 0x80000000UL) { + return 3; + } else if (dst) { + return 2; + } else { + return 0; + } +} + +static uint32_t cc_calc_nabs_32(int32_t dst) +{ + return !!dst; +} + +static uint32_t cc_calc_comp_32(int32_t dst) +{ + if ((uint32_t)dst == 0x80000000UL) { + return 3; + } else if (dst < 0) { + return 1; + } else if (dst > 0) { + return 2; + } else { + return 0; + } +} + +/* calculate condition code for insert character under mask insn */ +static uint32_t cc_calc_icm(uint64_t mask, uint64_t val) +{ + if ((val & mask) == 0) { + return 0; + } else { + int top = clz64(mask); + if ((int64_t)(val << top) < 0) { + return 1; + } else { + return 2; + } + } +} + +static uint32_t cc_calc_sla_32(uint32_t src, int shift) +{ + uint32_t mask = ((1U << shift) - 1U) << (32 - shift); + uint32_t sign = 1U << 31; + uint32_t match; + int32_t r; + + /* Check if the sign bit stays the same. */ + if (src & sign) { + match = mask; + } else { + match = 0; + } + if ((src & mask) != match) { + /* Overflow. */ + return 3; + } + + r = ((src << shift) & ~sign) | (src & sign); + if (r == 0) { + return 0; + } else if (r < 0) { + return 1; + } + return 2; +} + +static uint32_t cc_calc_sla_64(uint64_t src, int shift) +{ + uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift); + uint64_t sign = 1ULL << 63; + uint64_t match; + int64_t r; + + /* Check if the sign bit stays the same. */ + if (src & sign) { + match = mask; + } else { + match = 0; + } + if ((src & mask) != match) { + /* Overflow. */ + return 3; + } + + r = ((src << shift) & ~sign) | (src & sign); + if (r == 0) { + return 0; + } else if (r < 0) { + return 1; + } + return 2; +} + +static uint32_t cc_calc_flogr(uint64_t dst) +{ + return dst ? 2 : 0; +} + +static uint32_t cc_calc_lcbb(uint64_t dst) +{ + return dst == 16 ? 0 : 3; +} + +static uint32_t cc_calc_vc(uint64_t low, uint64_t high) +{ + if (high == -1ull && low == -1ull) { + /* all elements match */ + return 0; + } else if (high == 0 && low == 0) { + /* no elements match */ + return 3; + } else { + /* some elements but not all match */ + return 1; + } +} + +static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op, + uint64_t src, uint64_t dst, uint64_t vr) +{ + uint32_t r = 0; + + switch (cc_op) { + case CC_OP_CONST0: + case CC_OP_CONST1: + case CC_OP_CONST2: + case CC_OP_CONST3: + /* cc_op value _is_ cc */ + r = cc_op; + break; + case CC_OP_LTGT0_32: + r = cc_calc_ltgt0_32(dst); + break; + case CC_OP_LTGT0_64: + r = cc_calc_ltgt0_64(dst); + break; + case CC_OP_LTGT_32: + r = cc_calc_ltgt_32(src, dst); + break; + case CC_OP_LTGT_64: + r = cc_calc_ltgt_64(src, dst); + break; + case CC_OP_LTUGTU_32: + r = cc_calc_ltugtu_32(src, dst); + break; + case CC_OP_LTUGTU_64: + r = cc_calc_ltugtu_64(src, dst); + break; + case CC_OP_TM_32: + r = cc_calc_tm_32(src, dst); + break; + case CC_OP_TM_64: + r = cc_calc_tm_64(src, dst); + break; + case CC_OP_NZ: + r = cc_calc_nz(dst); + break; + case CC_OP_ADD_64: + r = cc_calc_add_64(src, dst, vr); + break; + case CC_OP_ADDU_64: + r = cc_calc_addu_64(src, dst, vr); + break; + case CC_OP_ADDC_64: + r = cc_calc_addc_64(src, dst, vr); + break; + case CC_OP_SUB_64: + r = cc_calc_sub_64(src, dst, vr); + break; + case CC_OP_SUBU_64: + r = cc_calc_subu_64(src, dst, vr); + break; + case CC_OP_SUBB_64: + r = cc_calc_subb_64(src, dst, vr); + break; + case CC_OP_ABS_64: + r = cc_calc_abs_64(dst); + break; + case CC_OP_NABS_64: + r = cc_calc_nabs_64(dst); + break; + case CC_OP_COMP_64: + r = cc_calc_comp_64(dst); + break; + + case CC_OP_ADD_32: + r = cc_calc_add_32(src, dst, vr); + break; + case CC_OP_ADDU_32: + r = cc_calc_addu_32(src, dst, vr); + break; + case CC_OP_ADDC_32: + r = cc_calc_addc_32(src, dst, vr); + break; + case CC_OP_SUB_32: + r = cc_calc_sub_32(src, dst, vr); + break; + case CC_OP_SUBU_32: + r = cc_calc_subu_32(src, dst, vr); + break; + case CC_OP_SUBB_32: + r = cc_calc_subb_32(src, dst, vr); + break; + case CC_OP_ABS_32: + r = cc_calc_abs_32(dst); + break; + case CC_OP_NABS_32: + r = cc_calc_nabs_32(dst); + break; + case CC_OP_COMP_32: + r = cc_calc_comp_32(dst); + break; + + case CC_OP_ICM: + r = cc_calc_icm(src, dst); + break; + case CC_OP_SLA_32: + r = cc_calc_sla_32(src, dst); + break; + case CC_OP_SLA_64: + r = cc_calc_sla_64(src, dst); + break; + case CC_OP_FLOGR: + r = cc_calc_flogr(dst); + break; + case CC_OP_LCBB: + r = cc_calc_lcbb(dst); + break; + case CC_OP_VC: + r = cc_calc_vc(src, dst); + break; + + case CC_OP_NZ_F32: + r = set_cc_nz_f32(dst); + break; + case CC_OP_NZ_F64: + r = set_cc_nz_f64(dst); + break; + case CC_OP_NZ_F128: + r = set_cc_nz_f128(make_float128(src, dst)); + break; + + default: + cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op)); + } + + HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__, + cc_name(cc_op), src, dst, vr, r); + return r; +} + +uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, + uint64_t vr) +{ + return do_calc_cc(env, cc_op, src, dst, vr); +} + +uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src, + uint64_t dst, uint64_t vr) +{ + return do_calc_cc(env, cc_op, src, dst, vr); +} + +#ifndef CONFIG_USER_ONLY +void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr) +{ + load_psw(env, mask, addr); + cpu_loop_exit(env_cpu(env)); +} + +void HELPER(sacf)(CPUS390XState *env, uint64_t a1) +{ + HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1); + + switch (a1 & 0xf00) { + case 0x000: + env->psw.mask &= ~PSW_MASK_ASC; + env->psw.mask |= PSW_ASC_PRIMARY; + break; + case 0x100: + env->psw.mask &= ~PSW_MASK_ASC; + env->psw.mask |= PSW_ASC_SECONDARY; + break; + case 0x300: + env->psw.mask &= ~PSW_MASK_ASC; + env->psw.mask |= PSW_ASC_HOME; + break; + default: + HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1); + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); + } +} +#endif diff --git a/qemu/target/s390x/cpu-param.h b/qemu/target/s390x/cpu-param.h new file mode 100644 index 00000000..472db648 --- /dev/null +++ b/qemu/target/s390x/cpu-param.h @@ -0,0 +1,17 @@ +/* + * S/390 cpu parameters for qemu. + * + * Copyright (c) 2009 Ulrich Hecht + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef S390_CPU_PARAM_H +#define S390_CPU_PARAM_H 1 + +#define TARGET_LONG_BITS 64 +#define TARGET_PAGE_BITS 12 +#define TARGET_PHYS_ADDR_SPACE_BITS 64 +#define TARGET_VIRT_ADDR_SPACE_BITS 64 +#define NB_MMU_MODES 4 + +#endif diff --git a/qemu/target/s390x/cpu-qom.h b/qemu/target/s390x/cpu-qom.h new file mode 100644 index 00000000..c0515723 --- /dev/null +++ b/qemu/target/s390x/cpu-qom.h @@ -0,0 +1,64 @@ +/* + * QEMU S/390 CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_S390_CPU_QOM_H +#define QEMU_S390_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define S390_CPU(obj) ((S390CPU *)obj) +#define S390_CPU_CLASS(klass) ((S390CPUClass *)klass) +#define S390_CPU_GET_CLASS(obj) (&((S390CPU *)obj)->cc) + +typedef struct S390CPUModel S390CPUModel; +typedef struct S390CPUDef S390CPUDef; + +typedef enum cpu_reset_type { + S390_CPU_RESET_NORMAL, + S390_CPU_RESET_INITIAL, + S390_CPU_RESET_CLEAR, +} cpu_reset_type; + +/** + * S390CPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * @load_normal: Performs a load normal. + * @cpu_reset: Performs a CPU reset. + * @initial_cpu_reset: Performs an initial CPU reset. + * + * An S/390 CPU model. + */ +typedef struct S390CPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + const S390CPUDef *cpu_def; + bool is_static; + // const char *desc; qq + + void (*load_normal)(CPUState *cpu); + void (*reset)(CPUState *cpu, cpu_reset_type type); + void (*parent_reset)(CPUState *cpu); +} S390CPUClass; + +typedef struct S390CPU S390CPU; +typedef struct CPUS390XState CPUS390XState; + +#endif diff --git a/qemu/target/s390x/cpu.c b/qemu/target/s390x/cpu.c new file mode 100644 index 00000000..9f2fbf34 --- /dev/null +++ b/qemu/target/s390x/cpu.c @@ -0,0 +1,301 @@ +/* + * QEMU S/390 CPU + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2011 Alexander Graf + * Copyright (c) 2012 SUSE LINUX Products GmbH + * Copyright (c) 2012 IBM Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "sysemu/tcg.h" +#include "fpu/softfloat-helpers.h" + +#define CR0_RESET 0xE0UL +#define CR14_RESET 0xC2000000UL; + +static void s390_cpu_set_pc(CPUState *cs, vaddr value) +{ + S390CPU *cpu = S390_CPU(cs); + + cpu->env.psw.addr = value; +} + +static bool s390_cpu_has_work(CPUState *cs) +{ + S390CPU *cpu = S390_CPU(cs); + +#if 0 + /* STOPPED cpus can never wake up */ + if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD && + s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) { + return false; + } +#endif + + if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { + return false; + } + + return s390_cpu_has_int(cpu); +} + +/* S390CPUClass::reset() */ +static void s390_cpu_reset(CPUState *dev, cpu_reset_type type) +{ + CPUState *s = CPU(dev); + S390CPU *cpu = S390_CPU(s); + S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); + CPUS390XState *env = &cpu->env; + + scc->parent_reset(dev); + cpu->env.sigp_order = 0; + s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); + + switch (type) { + case S390_CPU_RESET_CLEAR: + memset(env, 0, offsetof(CPUS390XState, start_initial_reset_fields)); + /* fall through */ + case S390_CPU_RESET_INITIAL: + /* initial reset does not clear everything! */ + memset(&env->start_initial_reset_fields, 0, + offsetof(CPUS390XState, start_normal_reset_fields) - + offsetof(CPUS390XState, start_initial_reset_fields)); + + /* architectured initial value for Breaking-Event-Address register */ + env->gbea = 1; + + /* architectured initial values for CR 0 and 14 */ + env->cregs[0] = CR0_RESET; + env->cregs[14] = CR14_RESET; + + /* tininess for underflow is detected before rounding */ + set_float_detect_tininess(float_tininess_before_rounding, + &env->fpu_status); + /* fall through */ + case S390_CPU_RESET_NORMAL: + env->psw.mask &= ~PSW_MASK_RI; + memset(&env->start_normal_reset_fields, 0, + offsetof(CPUS390XState, end_reset_fields) - + offsetof(CPUS390XState, start_normal_reset_fields)); + + env->pfault_token = -1UL; + env->bpbc = false; + break; + default: + g_assert_not_reached(); + } +} + +static void s390_cpu_realizefn(struct uc_struct *uc, CPUState *dev) +{ + CPUState *cs = CPU(dev); + S390CPU *cpu = S390_CPU(dev); + + /* the model has to be realized before qemu_init_vcpu() due to kvm */ + // s390_realize_cpu_model(cs); + + /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */ + cs->cpu_index = cpu->env.core_id; + + cpu_exec_realizefn(cs); + + qemu_init_vcpu(cs); + + cpu_reset(cs); +} + +static void s390_cpu_initfn(struct uc_struct *uc, CPUState *obj) +{ + CPUState *cs = CPU(obj); + S390CPU *cpu = S390_CPU(obj); + + cpu_set_cpustate_pointers(cpu); + cs->halted = 1; + cs->exception_index = EXCP_HLT; + // s390_cpu_model_register_props(obj); + // cpu->env.tod_timer = + // timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); + // cpu->env.cpu_timer = + // timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); + s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); +} + +static unsigned s390_count_running_cpus(void) +{ + return 1; +} + +unsigned int s390_cpu_halt(S390CPU *cpu) +{ + CPUState *cs = CPU(cpu); + + if (!cs->halted) { + cs->halted = 1; + cs->exception_index = EXCP_HLT; + } + + return s390_count_running_cpus(); +} + +void s390_cpu_unhalt(S390CPU *cpu) +{ + CPUState *cs = CPU(cpu); + + if (cs->halted) { + cs->halted = 0; + cs->exception_index = -1; + } +} + +unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu) + { + switch (cpu_state) { + case S390_CPU_STATE_STOPPED: + case S390_CPU_STATE_CHECK_STOP: + /* halt the cpu for common infrastructure */ + s390_cpu_halt(cpu); + break; + case S390_CPU_STATE_OPERATING: + case S390_CPU_STATE_LOAD: + /* + * Starting a CPU with a PSW WAIT bit set: + * KVM: handles this internally and triggers another WAIT exit. + * TCG: will actually try to continue to run. Don't unhalt, will + * be done when the CPU actually has work (an interrupt). + */ + if (!(cpu->env.psw.mask & PSW_MASK_WAIT)) { + s390_cpu_unhalt(cpu); + } + break; + default: + //error_report("Requested CPU state is not a valid S390 CPU state: %u", + // cpu_state); + exit(1); + } + cpu->env.cpu_state = cpu_state; + + return s390_count_running_cpus(); +} + +int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit) +{ + return 0; +} + +void s390_set_max_pagesize(uint64_t pagesize) +{ +} + +void s390_cmma_reset(void) +{ +} + +void s390_crypto_reset(void) +{ +} + +void s390_enable_css_support(S390CPU *cpu) +{ +} + +#if 0 +static void s390_cpu_reset_full(CPUState *dev) +{ + return s390_cpu_reset(dev, S390_CPU_RESET_CLEAR); +} +#endif + +static void s390_cpu_class_init(struct uc_struct *uc, CPUClass *oc) +{ + S390CPUClass *scc = S390_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(scc); + + scc->reset = s390_cpu_reset; + cc->has_work = s390_cpu_has_work; + // cc->do_interrupt = s390_cpu_do_interrupt; + cc->set_pc = s390_cpu_set_pc; + cc->get_phys_page_debug = s390_cpu_get_phys_page_debug; + cc->cpu_exec_interrupt = s390_cpu_exec_interrupt; + cc->debug_excp_handler = s390x_cpu_debug_excp_handler; + cc->do_unaligned_access = s390x_cpu_do_unaligned_access; + cc->tcg_initialize = s390x_translate_init; + cc->tlb_fill = s390_cpu_tlb_fill; + + // s390_cpu_model_class_register_props(oc); +} + +S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model) +{ + S390CPU *cpu; + CPUState *cs; + CPUClass *cc; + // int i; + + cpu = calloc(1, sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = (CPUState *)cpu; + + /* init CPUClass */ + cpu_class_init(uc, cc); + + /* init CPUClass */ + s390_cpu_class_init(uc, cc); + + /* init CPUState */ + cpu_common_initfn(uc, cs); + + /* init CPU */ + s390_cpu_initfn(uc, cs); + + /* init specific CPU model */ +/* + for (i = 0; i < ARRAY_SIZE(cpu_models); i++) { + if (strcmp(cpu_model, cpu_models[i].name) == 0) { + cpu_models[i].initfn(cs); + + if (arm_cpus[i].class_init) { + arm_cpus[i].class_init(uc, cc, uc); + } + if (arm_cpus[i].initfn) { + arm_cpus[i].initfn(uc, cs); + } + break; + } + } +*/ + + /* realize CPU */ + s390_cpu_realizefn(uc, cs); + + // init addresss space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} diff --git a/qemu/target/s390x/cpu.h b/qemu/target/s390x/cpu.h new file mode 100644 index 00000000..2548d653 --- /dev/null +++ b/qemu/target/s390x/cpu.h @@ -0,0 +1,840 @@ +/* + * S/390 virtual CPU header + * + * For details on the s390x architecture and used definitions (e.g., + * PSW, PER and DAT (Dynamic Address Translation)), please refer to + * the "z/Architecture Principles of Operations" - a.k.a. PoP. + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright IBM Corp. 2012, 2018 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef S390X_CPU_H +#define S390X_CPU_H + +#include "cpu-qom.h" +#include "cpu_models.h" +#include "exec/cpu-defs.h" +#include "hw/s390x/storage-keys.h" + +#define ELF_MACHINE_UNAME "S390X" + +/* The z/Architecture has a strong memory model with some store-after-load re-ordering */ +#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) + +#define TARGET_INSN_START_EXTRA_WORDS 2 + +#define MMU_USER_IDX 0 + +#define S390_MAX_CPUS 248 + +typedef struct PSW { + uint64_t mask; + uint64_t addr; +} PSW; + +struct CPUS390XState { + uint64_t regs[16]; /* GP registers */ + /* + * The floating point registers are part of the vector registers. + * vregs[0][0] -> vregs[15][0] are 16 floating point registers + */ + uint64_t vregs[32][2] QEMU_ALIGNED(16); /* vector registers */ + uint32_t aregs[16]; /* access registers */ + uint64_t gscb[4]; /* guarded storage control */ + uint64_t etoken; /* etoken */ + uint64_t etoken_extension; /* etoken extension */ + + /* Fields up to this point are not cleared by initial CPU reset */ + int start_initial_reset_fields; + + uint32_t fpc; /* floating-point control register */ + uint32_t cc_op; + bool bpbc; /* branch prediction blocking */ + + float_status fpu_status; /* passed to softfloat lib */ + + /* The low part of a 128-bit return, or remainder of a divide. */ + uint64_t retxl; + + PSW psw; + + // S390CrashReason crash_reason; + + uint64_t cc_src; + uint64_t cc_dst; + uint64_t cc_vr; + + uint64_t ex_value; + + uint64_t __excp_addr; + uint64_t psa; + + uint32_t int_pgm_code; + uint32_t int_pgm_ilen; + + uint32_t int_svc_code; + uint32_t int_svc_ilen; + + uint64_t per_address; + uint16_t per_perc_atmid; + + uint64_t cregs[16]; /* control registers */ + + uint64_t ckc; + uint64_t cputm; + uint32_t todpr; + + uint64_t pfault_token; + uint64_t pfault_compare; + uint64_t pfault_select; + + uint64_t gbea; + uint64_t pp; + + /* Fields up to this point are not cleared by normal CPU reset */ + int start_normal_reset_fields; + uint8_t riccb[64]; /* runtime instrumentation control */ + + int pending_int; + uint16_t external_call_addr; + DECLARE_BITMAP(emergency_signals, S390_MAX_CPUS); + + /* Fields up to this point are cleared by a CPU reset */ + int end_reset_fields; + + uint32_t core_id; /* PoP "CPU address", same as cpu_index */ + uint64_t cpuid; + + // QEMUTimer *tod_timer; + + // QEMUTimer *cpu_timer; + + /* + * The cpu state represents the logical state of a cpu. In contrast to other + * architectures, there is a difference between a halt and a stop on s390. + * If all cpus are either stopped (including check stop) or in the disabled + * wait state, the vm can be shut down. + * The acceptable cpu_state values are defined in the CpuInfoS390State + * enum. + */ + uint8_t cpu_state; + + /* currently processed sigp order */ + uint8_t sigp_order; + + // Unicorn engine + struct uc_struct *uc; +}; + +static inline uint64_t *get_freg(CPUS390XState *cs, int nr) +{ + return &cs->vregs[nr][0]; +} + +/** + * S390CPU: + * @env: #CPUS390XState. + * + * An S/390 CPU. + */ +struct S390CPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPUS390XState env; + S390CPUModel *model; + /* needed for live migration */ + void *irqstate; + uint32_t irqstate_saved_size; + + // unicorn + struct S390CPUClass cc; + struct S390SKeysClass skey; +}; + + +/* distinguish between 24 bit and 31 bit addressing */ +#define HIGH_ORDER_BIT 0x80000000 + +/* Interrupt Codes */ +/* Program Interrupts */ +#define PGM_OPERATION 0x0001 +#define PGM_PRIVILEGED 0x0002 +#define PGM_EXECUTE 0x0003 +#define PGM_PROTECTION 0x0004 +#define PGM_ADDRESSING 0x0005 +#define PGM_SPECIFICATION 0x0006 +#define PGM_DATA 0x0007 +#define PGM_FIXPT_OVERFLOW 0x0008 +#define PGM_FIXPT_DIVIDE 0x0009 +#define PGM_DEC_OVERFLOW 0x000a +#define PGM_DEC_DIVIDE 0x000b +#define PGM_HFP_EXP_OVERFLOW 0x000c +#define PGM_HFP_EXP_UNDERFLOW 0x000d +#define PGM_HFP_SIGNIFICANCE 0x000e +#define PGM_HFP_DIVIDE 0x000f +#define PGM_SEGMENT_TRANS 0x0010 +#define PGM_PAGE_TRANS 0x0011 +#define PGM_TRANS_SPEC 0x0012 +#define PGM_SPECIAL_OP 0x0013 +#define PGM_OPERAND 0x0015 +#define PGM_TRACE_TABLE 0x0016 +#define PGM_VECTOR_PROCESSING 0x001b +#define PGM_SPACE_SWITCH 0x001c +#define PGM_HFP_SQRT 0x001d +#define PGM_PC_TRANS_SPEC 0x001f +#define PGM_AFX_TRANS 0x0020 +#define PGM_ASX_TRANS 0x0021 +#define PGM_LX_TRANS 0x0022 +#define PGM_EX_TRANS 0x0023 +#define PGM_PRIM_AUTH 0x0024 +#define PGM_SEC_AUTH 0x0025 +#define PGM_ALET_SPEC 0x0028 +#define PGM_ALEN_SPEC 0x0029 +#define PGM_ALE_SEQ 0x002a +#define PGM_ASTE_VALID 0x002b +#define PGM_ASTE_SEQ 0x002c +#define PGM_EXT_AUTH 0x002d +#define PGM_STACK_FULL 0x0030 +#define PGM_STACK_EMPTY 0x0031 +#define PGM_STACK_SPEC 0x0032 +#define PGM_STACK_TYPE 0x0033 +#define PGM_STACK_OP 0x0034 +#define PGM_ASCE_TYPE 0x0038 +#define PGM_REG_FIRST_TRANS 0x0039 +#define PGM_REG_SEC_TRANS 0x003a +#define PGM_REG_THIRD_TRANS 0x003b +#define PGM_MONITOR 0x0040 +#define PGM_PER 0x0080 +#define PGM_CRYPTO 0x0119 + +/* External Interrupts */ +#define EXT_INTERRUPT_KEY 0x0040 +#define EXT_CLOCK_COMP 0x1004 +#define EXT_CPU_TIMER 0x1005 +#define EXT_MALFUNCTION 0x1200 +#define EXT_EMERGENCY 0x1201 +#define EXT_EXTERNAL_CALL 0x1202 +#define EXT_ETR 0x1406 +#define EXT_SERVICE 0x2401 +#define EXT_VIRTIO 0x2603 + +/* PSW defines */ +#undef PSW_MASK_PER +#undef PSW_MASK_UNUSED_2 +#undef PSW_MASK_UNUSED_3 +#undef PSW_MASK_DAT +#undef PSW_MASK_IO +#undef PSW_MASK_EXT +#undef PSW_MASK_KEY +#undef PSW_SHIFT_KEY +#undef PSW_MASK_MCHECK +#undef PSW_MASK_WAIT +#undef PSW_MASK_PSTATE +#undef PSW_MASK_ASC +#undef PSW_SHIFT_ASC +#undef PSW_MASK_CC +#undef PSW_MASK_PM +#undef PSW_MASK_RI +#undef PSW_SHIFT_MASK_PM +#undef PSW_MASK_64 +#undef PSW_MASK_32 +#undef PSW_MASK_ESA_ADDR + +#define PSW_MASK_PER 0x4000000000000000ULL +#define PSW_MASK_UNUSED_2 0x2000000000000000ULL +#define PSW_MASK_UNUSED_3 0x1000000000000000ULL +#define PSW_MASK_DAT 0x0400000000000000ULL +#define PSW_MASK_IO 0x0200000000000000ULL +#define PSW_MASK_EXT 0x0100000000000000ULL +#define PSW_MASK_KEY 0x00F0000000000000ULL +#define PSW_SHIFT_KEY 52 +#define PSW_MASK_SHORTPSW 0x0008000000000000ULL +#define PSW_MASK_MCHECK 0x0004000000000000ULL +#define PSW_MASK_WAIT 0x0002000000000000ULL +#define PSW_MASK_PSTATE 0x0001000000000000ULL +#define PSW_MASK_ASC 0x0000C00000000000ULL +#define PSW_SHIFT_ASC 46 +#define PSW_MASK_CC 0x0000300000000000ULL +#define PSW_MASK_PM 0x00000F0000000000ULL +#define PSW_SHIFT_MASK_PM 40 +#define PSW_MASK_RI 0x0000008000000000ULL +#define PSW_MASK_64 0x0000000100000000ULL +#define PSW_MASK_32 0x0000000080000000ULL +#define PSW_MASK_SHORT_ADDR 0x000000007fffffffULL +#define PSW_MASK_SHORT_CTRL 0xffffffff80000000ULL + +#undef PSW_ASC_PRIMARY +#undef PSW_ASC_ACCREG +#undef PSW_ASC_SECONDARY +#undef PSW_ASC_HOME + +#define PSW_ASC_PRIMARY 0x0000000000000000ULL +#define PSW_ASC_ACCREG 0x0000400000000000ULL +#define PSW_ASC_SECONDARY 0x0000800000000000ULL +#define PSW_ASC_HOME 0x0000C00000000000ULL + +/* the address space values shifted */ +#define AS_PRIMARY 0 +#define AS_ACCREG 1 +#define AS_SECONDARY 2 +#define AS_HOME 3 + +/* tb flags */ + +#define FLAG_MASK_PSW_SHIFT 31 +#define FLAG_MASK_PER (PSW_MASK_PER >> FLAG_MASK_PSW_SHIFT) +#define FLAG_MASK_DAT (PSW_MASK_DAT >> FLAG_MASK_PSW_SHIFT) +#define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> FLAG_MASK_PSW_SHIFT) +#define FLAG_MASK_ASC (PSW_MASK_ASC >> FLAG_MASK_PSW_SHIFT) +#define FLAG_MASK_64 (PSW_MASK_64 >> FLAG_MASK_PSW_SHIFT) +#define FLAG_MASK_32 (PSW_MASK_32 >> FLAG_MASK_PSW_SHIFT) +#define FLAG_MASK_PSW (FLAG_MASK_PER | FLAG_MASK_DAT | FLAG_MASK_PSTATE \ + | FLAG_MASK_ASC | FLAG_MASK_64 | FLAG_MASK_32) + +/* we'll use some unused PSW positions to store CR flags in tb flags */ +#define FLAG_MASK_AFP (PSW_MASK_UNUSED_2 >> FLAG_MASK_PSW_SHIFT) +#define FLAG_MASK_VECTOR (PSW_MASK_UNUSED_3 >> FLAG_MASK_PSW_SHIFT) + +/* Control register 0 bits */ +#define CR0_LOWPROT 0x0000000010000000ULL +#define CR0_SECONDARY 0x0000000004000000ULL +#define CR0_EDAT 0x0000000000800000ULL +#define CR0_AFP 0x0000000000040000ULL +#define CR0_VECTOR 0x0000000000020000ULL +#define CR0_IEP 0x0000000000100000ULL +#define CR0_EMERGENCY_SIGNAL_SC 0x0000000000004000ULL +#define CR0_EXTERNAL_CALL_SC 0x0000000000002000ULL +#define CR0_CKC_SC 0x0000000000000800ULL +#define CR0_CPU_TIMER_SC 0x0000000000000400ULL +#define CR0_SERVICE_SC 0x0000000000000200ULL + +/* Control register 14 bits */ +#define CR14_CHANNEL_REPORT_SC 0x0000000010000000ULL + +/* MMU */ +#define MMU_PRIMARY_IDX 0 +#define MMU_SECONDARY_IDX 1 +#define MMU_HOME_IDX 2 +#define MMU_REAL_IDX 3 + +static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch) +{ + if (!(env->psw.mask & PSW_MASK_DAT)) { + return MMU_REAL_IDX; + } + + if (ifetch) { + if ((env->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) { + return MMU_HOME_IDX; + } + return MMU_PRIMARY_IDX; + } + + switch (env->psw.mask & PSW_MASK_ASC) { + case PSW_ASC_PRIMARY: + return MMU_PRIMARY_IDX; + case PSW_ASC_SECONDARY: + return MMU_SECONDARY_IDX; + case PSW_ASC_HOME: + return MMU_HOME_IDX; + case PSW_ASC_ACCREG: + /* Fallthrough: access register mode is not yet supported */ + default: + abort(); + } +} + +static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *pc = env->psw.addr; + *cs_base = env->ex_value; + *flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW; + if (env->cregs[0] & CR0_AFP) { + *flags |= FLAG_MASK_AFP; + } + if (env->cregs[0] & CR0_VECTOR) { + *flags |= FLAG_MASK_VECTOR; + } +} + +/* PER bits from control register 9 */ +#define PER_CR9_EVENT_BRANCH 0x80000000 +#define PER_CR9_EVENT_IFETCH 0x40000000 +#define PER_CR9_EVENT_STORE 0x20000000 +#define PER_CR9_EVENT_STORE_REAL 0x08000000 +#define PER_CR9_EVENT_NULLIFICATION 0x01000000 +#define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000 +#define PER_CR9_CONTROL_ALTERATION 0x00200000 + +/* PER bits from the PER CODE/ATMID/AI in lowcore */ +#define PER_CODE_EVENT_BRANCH 0x8000 +#define PER_CODE_EVENT_IFETCH 0x4000 +#define PER_CODE_EVENT_STORE 0x2000 +#define PER_CODE_EVENT_STORE_REAL 0x0800 +#define PER_CODE_EVENT_NULLIFICATION 0x0100 + +#define EXCP_EXT 1 /* external interrupt */ +#define EXCP_SVC 2 /* supervisor call (syscall) */ +#define EXCP_PGM 3 /* program interruption */ +#define EXCP_RESTART 4 /* restart interrupt */ +#define EXCP_STOP 5 /* stop interrupt */ +#define EXCP_IO 7 /* I/O interrupt */ +#define EXCP_MCHK 8 /* machine check */ + +#define INTERRUPT_EXT_CPU_TIMER (1 << 3) +#define INTERRUPT_EXT_CLOCK_COMPARATOR (1 << 4) +#define INTERRUPT_EXTERNAL_CALL (1 << 5) +#define INTERRUPT_EMERGENCY_SIGNAL (1 << 6) +#define INTERRUPT_RESTART (1 << 7) +#define INTERRUPT_STOP (1 << 8) + +/* Program Status Word. */ +#define S390_PSWM_REGNUM 0 +#define S390_PSWA_REGNUM 1 +/* General Purpose Registers. */ +#define S390_R0_REGNUM 2 +#define S390_R1_REGNUM 3 +#define S390_R2_REGNUM 4 +#define S390_R3_REGNUM 5 +#define S390_R4_REGNUM 6 +#define S390_R5_REGNUM 7 +#define S390_R6_REGNUM 8 +#define S390_R7_REGNUM 9 +#define S390_R8_REGNUM 10 +#define S390_R9_REGNUM 11 +#define S390_R10_REGNUM 12 +#define S390_R11_REGNUM 13 +#define S390_R12_REGNUM 14 +#define S390_R13_REGNUM 15 +#define S390_R14_REGNUM 16 +#define S390_R15_REGNUM 17 +/* Total Core Registers. */ +#define S390_NUM_CORE_REGS 18 + +static inline void setcc(S390CPU *cpu, uint64_t cc) +{ + CPUS390XState *env = &cpu->env; + + env->psw.mask &= ~(3ull << 44); + env->psw.mask |= (cc & 3) << 44; + env->cc_op = cc; +} + +/* STSI */ +#define STSI_R0_FC_MASK 0x00000000f0000000ULL +#define STSI_R0_FC_CURRENT 0x0000000000000000ULL +#define STSI_R0_FC_LEVEL_1 0x0000000010000000ULL +#define STSI_R0_FC_LEVEL_2 0x0000000020000000ULL +#define STSI_R0_FC_LEVEL_3 0x0000000030000000ULL +#define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL +#define STSI_R0_SEL1_MASK 0x00000000000000ffULL +#define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL +#define STSI_R1_SEL2_MASK 0x000000000000ffffULL + +/* Basic Machine Configuration */ +typedef struct SysIB_111 { + uint8_t res1[32]; + uint8_t manuf[16]; + uint8_t type[4]; + uint8_t res2[12]; + uint8_t model[16]; + uint8_t sequence[16]; + uint8_t plant[4]; + uint8_t res3[3996]; +} SysIB_111; +QEMU_BUILD_BUG_ON(sizeof(SysIB_111) != 4096); + +/* Basic Machine CPU */ +typedef struct SysIB_121 { + uint8_t res1[80]; + uint8_t sequence[16]; + uint8_t plant[4]; + uint8_t res2[2]; + uint16_t cpu_addr; + uint8_t res3[3992]; +} SysIB_121; +QEMU_BUILD_BUG_ON(sizeof(SysIB_121) != 4096); + +/* Basic Machine CPUs */ +typedef struct SysIB_122 { + uint8_t res1[32]; + uint32_t capability; + uint16_t total_cpus; + uint16_t conf_cpus; + uint16_t standby_cpus; + uint16_t reserved_cpus; + uint16_t adjustments[2026]; +} SysIB_122; +QEMU_BUILD_BUG_ON(sizeof(SysIB_122) != 4096); + +/* LPAR CPU */ +typedef struct SysIB_221 { + uint8_t res1[80]; + uint8_t sequence[16]; + uint8_t plant[4]; + uint16_t cpu_id; + uint16_t cpu_addr; + uint8_t res3[3992]; +} SysIB_221; +QEMU_BUILD_BUG_ON(sizeof(SysIB_221) != 4096); + +/* LPAR CPUs */ +typedef struct SysIB_222 { + uint8_t res1[32]; + uint16_t lpar_num; + uint8_t res2; + uint8_t lcpuc; + uint16_t total_cpus; + uint16_t conf_cpus; + uint16_t standby_cpus; + uint16_t reserved_cpus; + uint8_t name[8]; + uint32_t caf; + uint8_t res3[16]; + uint16_t dedicated_cpus; + uint16_t shared_cpus; + uint8_t res4[4020]; +} SysIB_222; +QEMU_BUILD_BUG_ON(sizeof(SysIB_222) != 4096); + +/* VM CPUs */ +typedef struct SysIB_322 { + uint8_t res1[31]; + uint8_t count; + struct { + uint8_t res2[4]; + uint16_t total_cpus; + uint16_t conf_cpus; + uint16_t standby_cpus; + uint16_t reserved_cpus; + uint8_t name[8]; + uint32_t caf; + uint8_t cpi[16]; + uint8_t res5[3]; + uint8_t ext_name_encoding; + uint32_t res3; + uint8_t uuid[16]; + } vm[8]; + uint8_t res4[1504]; + uint8_t ext_names[8][256]; +} SysIB_322; +QEMU_BUILD_BUG_ON(sizeof(SysIB_322) != 4096); + +typedef union SysIB { + SysIB_111 sysib_111; + SysIB_121 sysib_121; + SysIB_122 sysib_122; + SysIB_221 sysib_221; + SysIB_222 sysib_222; + SysIB_322 sysib_322; +} SysIB; +QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096); + +/* MMU defines */ +#define ASCE_ORIGIN (~0xfffULL) /* segment table origin */ +#define ASCE_SUBSPACE 0x200 /* subspace group control */ +#define ASCE_PRIVATE_SPACE 0x100 /* private space control */ +#define ASCE_ALT_EVENT 0x80 /* storage alteration event control */ +#define ASCE_SPACE_SWITCH 0x40 /* space switch event */ +#define ASCE_REAL_SPACE 0x20 /* real space control */ +#define ASCE_TYPE_MASK 0x0c /* asce table type mask */ +#define ASCE_TYPE_REGION1 0x0c /* region first table type */ +#define ASCE_TYPE_REGION2 0x08 /* region second table type */ +#define ASCE_TYPE_REGION3 0x04 /* region third table type */ +#define ASCE_TYPE_SEGMENT 0x00 /* segment table type */ +#define ASCE_TABLE_LENGTH 0x03 /* region table length */ + +#define REGION_ENTRY_ORIGIN 0xfffffffffffff000ULL +#define REGION_ENTRY_P 0x0000000000000200ULL +#define REGION_ENTRY_TF 0x00000000000000c0ULL +#define REGION_ENTRY_I 0x0000000000000020ULL +#define REGION_ENTRY_TT 0x000000000000000cULL +#define REGION_ENTRY_TL 0x0000000000000003ULL + +#define REGION_ENTRY_TT_REGION1 0x000000000000000cULL +#define REGION_ENTRY_TT_REGION2 0x0000000000000008ULL +#define REGION_ENTRY_TT_REGION3 0x0000000000000004ULL + +#define REGION3_ENTRY_RFAA 0xffffffff80000000ULL +#define REGION3_ENTRY_AV 0x0000000000010000ULL +#define REGION3_ENTRY_ACC 0x000000000000f000ULL +#define REGION3_ENTRY_F 0x0000000000000800ULL +#define REGION3_ENTRY_FC 0x0000000000000400ULL +#define REGION3_ENTRY_IEP 0x0000000000000100ULL +#define REGION3_ENTRY_CR 0x0000000000000010ULL + +#define SEGMENT_ENTRY_ORIGIN 0xfffffffffffff800ULL +#define SEGMENT_ENTRY_SFAA 0xfffffffffff00000ULL +#define SEGMENT_ENTRY_AV 0x0000000000010000ULL +#define SEGMENT_ENTRY_ACC 0x000000000000f000ULL +#define SEGMENT_ENTRY_F 0x0000000000000800ULL +#define SEGMENT_ENTRY_FC 0x0000000000000400ULL +#define SEGMENT_ENTRY_P 0x0000000000000200ULL +#define SEGMENT_ENTRY_IEP 0x0000000000000100ULL +#define SEGMENT_ENTRY_I 0x0000000000000020ULL +#define SEGMENT_ENTRY_CS 0x0000000000000010ULL +#define SEGMENT_ENTRY_TT 0x000000000000000cULL + +#define SEGMENT_ENTRY_TT_SEGMENT 0x0000000000000000ULL + +#define PAGE_ENTRY_0 0x0000000000000800ULL +#define PAGE_ENTRY_I 0x0000000000000400ULL +#define PAGE_ENTRY_P 0x0000000000000200ULL +#define PAGE_ENTRY_IEP 0x0000000000000100ULL + +#define VADDR_REGION1_TX_MASK 0xffe0000000000000ULL +#define VADDR_REGION2_TX_MASK 0x001ffc0000000000ULL +#define VADDR_REGION3_TX_MASK 0x000003ff80000000ULL +#define VADDR_SEGMENT_TX_MASK 0x000000007ff00000ULL +#define VADDR_PAGE_TX_MASK 0x00000000000ff000ULL + +#define VADDR_REGION1_TX(vaddr) (((vaddr) & VADDR_REGION1_TX_MASK) >> 53) +#define VADDR_REGION2_TX(vaddr) (((vaddr) & VADDR_REGION2_TX_MASK) >> 42) +#define VADDR_REGION3_TX(vaddr) (((vaddr) & VADDR_REGION3_TX_MASK) >> 31) +#define VADDR_SEGMENT_TX(vaddr) (((vaddr) & VADDR_SEGMENT_TX_MASK) >> 20) +#define VADDR_PAGE_TX(vaddr) (((vaddr) & VADDR_PAGE_TX_MASK) >> 12) + +#define VADDR_REGION1_TL(vaddr) (((vaddr) & 0xc000000000000000ULL) >> 62) +#define VADDR_REGION2_TL(vaddr) (((vaddr) & 0x0018000000000000ULL) >> 51) +#define VADDR_REGION3_TL(vaddr) (((vaddr) & 0x0000030000000000ULL) >> 40) +#define VADDR_SEGMENT_TL(vaddr) (((vaddr) & 0x0000000060000000ULL) >> 29) + +#define SK_C (0x1 << 1) +#define SK_R (0x1 << 2) +#define SK_F (0x1 << 3) +#define SK_ACC_MASK (0xf << 4) + +/* SIGP order codes */ +#define SIGP_SENSE 0x01 +#define SIGP_EXTERNAL_CALL 0x02 +#define SIGP_EMERGENCY 0x03 +#define SIGP_START 0x04 +#define SIGP_STOP 0x05 +#define SIGP_RESTART 0x06 +#define SIGP_STOP_STORE_STATUS 0x09 +#define SIGP_INITIAL_CPU_RESET 0x0b +#define SIGP_CPU_RESET 0x0c +#define SIGP_SET_PREFIX 0x0d +#define SIGP_STORE_STATUS_ADDR 0x0e +#define SIGP_SET_ARCH 0x12 +#define SIGP_COND_EMERGENCY 0x13 +#define SIGP_SENSE_RUNNING 0x15 +#define SIGP_STORE_ADTL_STATUS 0x17 + +/* SIGP condition codes */ +#define SIGP_CC_ORDER_CODE_ACCEPTED 0 +#define SIGP_CC_STATUS_STORED 1 +#define SIGP_CC_BUSY 2 +#define SIGP_CC_NOT_OPERATIONAL 3 + +/* SIGP status bits */ +#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL +#define SIGP_STAT_NOT_RUNNING 0x00000400UL +#define SIGP_STAT_INCORRECT_STATE 0x00000200UL +#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL +#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL +#define SIGP_STAT_STOPPED 0x00000040UL +#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL +#define SIGP_STAT_CHECK_STOP 0x00000010UL +#define SIGP_STAT_INOPERATIVE 0x00000004UL +#define SIGP_STAT_INVALID_ORDER 0x00000002UL +#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL + +/* SIGP SET ARCHITECTURE modes */ +#define SIGP_MODE_ESA_S390 0 +#define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1 +#define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2 + +/* SIGP order code mask corresponding to bit positions 56-63 */ +#define SIGP_ORDER_MASK 0x000000ff + +/* machine check interruption code */ + +/* subclasses */ +#define MCIC_SC_SD 0x8000000000000000ULL +#define MCIC_SC_PD 0x4000000000000000ULL +#define MCIC_SC_SR 0x2000000000000000ULL +#define MCIC_SC_CD 0x0800000000000000ULL +#define MCIC_SC_ED 0x0400000000000000ULL +#define MCIC_SC_DG 0x0100000000000000ULL +#define MCIC_SC_W 0x0080000000000000ULL +#define MCIC_SC_CP 0x0040000000000000ULL +#define MCIC_SC_SP 0x0020000000000000ULL +#define MCIC_SC_CK 0x0010000000000000ULL + +/* subclass modifiers */ +#define MCIC_SCM_B 0x0002000000000000ULL +#define MCIC_SCM_DA 0x0000000020000000ULL +#define MCIC_SCM_AP 0x0000000000080000ULL + +/* storage errors */ +#define MCIC_SE_SE 0x0000800000000000ULL +#define MCIC_SE_SC 0x0000400000000000ULL +#define MCIC_SE_KE 0x0000200000000000ULL +#define MCIC_SE_DS 0x0000100000000000ULL +#define MCIC_SE_IE 0x0000000080000000ULL + +/* validity bits */ +#define MCIC_VB_WP 0x0000080000000000ULL +#define MCIC_VB_MS 0x0000040000000000ULL +#define MCIC_VB_PM 0x0000020000000000ULL +#define MCIC_VB_IA 0x0000010000000000ULL +#define MCIC_VB_FA 0x0000008000000000ULL +#define MCIC_VB_VR 0x0000004000000000ULL +#define MCIC_VB_EC 0x0000002000000000ULL +#define MCIC_VB_FP 0x0000001000000000ULL +#define MCIC_VB_GR 0x0000000800000000ULL +#define MCIC_VB_CR 0x0000000400000000ULL +#define MCIC_VB_ST 0x0000000100000000ULL +#define MCIC_VB_AR 0x0000000040000000ULL +#define MCIC_VB_GS 0x0000000008000000ULL +#define MCIC_VB_PR 0x0000000000200000ULL +#define MCIC_VB_FC 0x0000000000100000ULL +#define MCIC_VB_CT 0x0000000000020000ULL +#define MCIC_VB_CC 0x0000000000010000ULL + +static inline uint64_t s390_build_validity_mcic(struct uc_struct *uc) +{ + uint64_t mcic; + + /* + * Indicate all validity bits (no damage) only. Other bits have to be + * added by the caller. (storage errors, subclasses and subclass modifiers) + */ + mcic = MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP | + MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR | + MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC; + if (s390_has_feat(uc, S390_FEAT_VECTOR)) { + mcic |= MCIC_VB_VR; + } + if (s390_has_feat(uc, S390_FEAT_GUARDED_STORAGE)) { + mcic |= MCIC_VB_GS; + } + return mcic; +} + +static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg) +{ + cpu_reset(cs); +} + +static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg) +{ + S390CPUClass *scc = S390_CPU_GET_CLASS(cs); + + scc->reset(cs, S390_CPU_RESET_NORMAL); +} + +static inline void s390_do_cpu_initial_reset(CPUState *cs, run_on_cpu_data arg) +{ + S390CPUClass *scc = S390_CPU_GET_CLASS(cs); + + scc->reset(cs, S390_CPU_RESET_INITIAL); +} + +static inline void s390_do_cpu_load_normal(CPUState *cs, run_on_cpu_data arg) +{ + S390CPUClass *scc = S390_CPU_GET_CLASS(cs); + + scc->load_normal(cs); +} + + +/* cpu.c */ +void s390_crypto_reset(void); +int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit); +void s390_set_max_pagesize(uint64_t pagesize); +void s390_cmma_reset(void); +void s390_enable_css_support(S390CPU *cpu); + +unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu); + +static inline uint8_t s390_cpu_get_state(S390CPU *cpu) +{ + return cpu->env.cpu_state; +} + + +/* cpu_models.c */ +void s390_cpu_list(void); +#define cpu_list s390_cpu_list +void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, + const S390FeatInit feat_init); + + +/* helper.c */ +#define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU +#define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX) +#define CPU_RESOLVING_TYPE TYPE_S390_CPU + +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_s390x_signal_handler(int host_signum, void *pinfo, void *puc); +#define cpu_signal_handler cpu_s390x_signal_handler + + +/* interrupt.c */ +void s390_crw_mchk(void); +void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr, + uint32_t io_int_parm, uint32_t io_int_word); +#define RA_IGNORED 0 +void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra); +/* service interrupts are floating therefore we must not pass an cpustate */ +void s390_sclp_extint(uint32_t parm); + +/* mmu_helper.c */ +int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, + int len, bool is_write); +#define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false) +#define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true) +#define s390_cpu_virt_mem_check_read(cpu, laddr, ar, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, false) +#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \ + s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true) +void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra); + + +/* sigp.c */ +int s390_cpu_restart(S390CPU *cpu); +void s390_init_sigp(void); + + +/* outside of target/s390x/ */ +S390CPU *s390_cpu_addr2state(uint16_t cpu_addr); + +typedef CPUS390XState CPUArchState; +typedef S390CPU ArchCPU; + +#include "exec/cpu-all.h" + +typedef enum CpuS390State { + S390_CPU_STATE_UNINITIALIZED, + S390_CPU_STATE_STOPPED, + S390_CPU_STATE_CHECK_STOP, + S390_CPU_STATE_OPERATING, + S390_CPU_STATE_LOAD, + S390_CPU_STATE__MAX, +} CpuS390State; + +#endif diff --git a/qemu/target/s390x/cpu_features.c b/qemu/target/s390x/cpu_features.c new file mode 100644 index 00000000..abc46cf9 --- /dev/null +++ b/qemu/target/s390x/cpu_features.c @@ -0,0 +1,210 @@ +/* + * CPU features/facilities for s390x + * + * Copyright IBM Corp. 2016, 2018 + * Copyright Red Hat, Inc. 2019 + * + * Author(s): David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "cpu_features.h" + +#define DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC) \ + [S390_FEAT_##_FEAT] = { \ + .name = _NAME, \ + .type = S390_FEAT_TYPE_##_TYPE, \ + .bit = _BIT, \ + .desc = _DESC, \ + }, +static const S390FeatDef s390_features[S390_FEAT_MAX] = { + #include "cpu_features_def.inc.h" +}; +#undef DEF_FEAT + +const S390FeatDef *s390_feat_def(S390Feat feat) +{ + return &s390_features[feat]; +} + +S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit) +{ + S390Feat feat; + + for (feat = 0; feat < ARRAY_SIZE(s390_features); feat++) { + if (s390_features[feat].type == type && + s390_features[feat].bit == bit) { + return feat; + } + } + return S390_FEAT_MAX; +} + +void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap) +{ + int i, j; + + for (i = 0; i < (S390_FEAT_MAX / 64 + 1); i++) { + if (init[i]) { + for (j = 0; j < 64; j++) { + if (init[i] & 1ULL << j) { + set_bit(i * 64 + j, bitmap); + } + } + } + } +} + +void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type, + uint8_t *data) +{ + S390Feat feat; + int bit_nr; + + switch (type) { + case S390_FEAT_TYPE_STFL: + if (test_bit(S390_FEAT_ZARCH, features)) { + /* Features that are always active */ + set_be_bit(2, data); /* z/Architecture */ + set_be_bit(138, data); /* Configuration-z-architectural-mode */ + } + break; + case S390_FEAT_TYPE_PTFF: + case S390_FEAT_TYPE_KMAC: + case S390_FEAT_TYPE_KMC: + case S390_FEAT_TYPE_KM: + case S390_FEAT_TYPE_KIMD: + case S390_FEAT_TYPE_KLMD: + case S390_FEAT_TYPE_PCKMO: + case S390_FEAT_TYPE_KMCTR: + case S390_FEAT_TYPE_KMF: + case S390_FEAT_TYPE_KMO: + case S390_FEAT_TYPE_PCC: + case S390_FEAT_TYPE_PPNO: + case S390_FEAT_TYPE_KMA: + case S390_FEAT_TYPE_KDSA: + case S390_FEAT_TYPE_SORTL: + case S390_FEAT_TYPE_DFLTCC: + set_be_bit(0, data); /* query is always available */ + break; + default: + break; + }; + + feat = find_first_bit(features, S390_FEAT_MAX); + while (feat < S390_FEAT_MAX) { + if (s390_features[feat].type == type) { + bit_nr = s390_features[feat].bit; + /* big endian on uint8_t array */ + set_be_bit(bit_nr, data); + } + feat = find_next_bit(features, S390_FEAT_MAX, feat + 1); + } +} + +void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type, + uint8_t *data) +{ + int nr_bits, le_bit; + + switch (type) { + case S390_FEAT_TYPE_STFL: + nr_bits = 16384; + break; + case S390_FEAT_TYPE_PLO: + case S390_FEAT_TYPE_SORTL: + case S390_FEAT_TYPE_DFLTCC: + nr_bits = 256; + break; + default: + /* all cpu subfunctions have 128 bit */ + nr_bits = 128; + }; + + le_bit = find_first_bit((unsigned long *) data, nr_bits); + while (le_bit < nr_bits) { + /* convert the bit number to a big endian bit nr */ + S390Feat feat = s390_feat_by_type_and_bit(type, BE_BIT_NR(le_bit)); + /* ignore unknown bits */ + if (feat < S390_FEAT_MAX) { + set_bit(feat, features); + } + le_bit = find_next_bit((unsigned long *) data, nr_bits, le_bit + 1); + } +} + +void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque, + void (*fn)(const char *name, void *opaque)) +{ + S390FeatBitmap bitmap, tmp; + S390FeatGroup group; + S390Feat feat; + + bitmap_copy(bitmap, features, S390_FEAT_MAX); + + /* process whole groups first */ + for (group = 0; group < S390_FEAT_GROUP_MAX; group++) { + const S390FeatGroupDef *def = s390_feat_group_def(group); + + bitmap_and(tmp, bitmap, def->feat, S390_FEAT_MAX); + if (bitmap_equal(tmp, def->feat, S390_FEAT_MAX)) { + bitmap_andnot(bitmap, bitmap, def->feat, S390_FEAT_MAX); + fn(def->name, opaque); + } + } + + /* report leftovers as separate features */ + feat = find_first_bit(bitmap, S390_FEAT_MAX); + while (feat < S390_FEAT_MAX) { + fn(s390_feat_def(feat)->name, opaque); + feat = find_next_bit(bitmap, S390_FEAT_MAX, feat + 1); + }; +} + +#define FEAT_GROUP_INIT(_name, _group, _desc) \ + { \ + .name = _name, \ + .desc = _desc, \ + .init = { S390_FEAT_GROUP_LIST_ ## _group }, \ + } + +/* indexed by feature group number for easy lookup */ +static S390FeatGroupDef s390_feature_groups[] = { + FEAT_GROUP_INIT("plo", PLO, "Perform-locked-operation facility"), + FEAT_GROUP_INIT("tods", TOD_CLOCK_STEERING, "Tod-clock-steering facility"), + FEAT_GROUP_INIT("gen13ptff", GEN13_PTFF, "PTFF enhancements introduced with z13"), + FEAT_GROUP_INIT("msa", MSA, "Message-security-assist facility"), + FEAT_GROUP_INIT("msa1", MSA_EXT_1, "Message-security-assist-extension 1 facility"), + FEAT_GROUP_INIT("msa2", MSA_EXT_2, "Message-security-assist-extension 2 facility"), + FEAT_GROUP_INIT("msa3", MSA_EXT_3, "Message-security-assist-extension 3 facility"), + FEAT_GROUP_INIT("msa4", MSA_EXT_4, "Message-security-assist-extension 4 facility"), + FEAT_GROUP_INIT("msa5", MSA_EXT_5, "Message-security-assist-extension 5 facility"), + FEAT_GROUP_INIT("msa6", MSA_EXT_6, "Message-security-assist-extension 6 facility"), + FEAT_GROUP_INIT("msa7", MSA_EXT_7, "Message-security-assist-extension 7 facility"), + FEAT_GROUP_INIT("msa8", MSA_EXT_8, "Message-security-assist-extension 8 facility"), + FEAT_GROUP_INIT("msa9", MSA_EXT_9, "Message-security-assist-extension 9 facility"), + FEAT_GROUP_INIT("msa9_pckmo", MSA_EXT_9_PCKMO, "Message-security-assist-extension 9 PCKMO subfunctions"), + FEAT_GROUP_INIT("mepochptff", MULTIPLE_EPOCH_PTFF, "PTFF enhancements introduced with Multiple-epoch facility"), + FEAT_GROUP_INIT("esort", ENH_SORT, "Enhanced-sort facility"), + FEAT_GROUP_INIT("deflate", DEFLATE_CONVERSION, "Deflate-conversion facility"), +}; + +const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group) +{ + return &s390_feature_groups[group]; +} + +void init_groups(void) +{ + int i; + + /* init all bitmaps from gnerated data initially */ + for (i = 0; i < ARRAY_SIZE(s390_feature_groups); i++) { + s390_init_feat_bitmap(s390_feature_groups[i].init, + s390_feature_groups[i].feat); + } +} diff --git a/qemu/target/s390x/cpu_features.h b/qemu/target/s390x/cpu_features.h new file mode 100644 index 00000000..da695a83 --- /dev/null +++ b/qemu/target/s390x/cpu_features.h @@ -0,0 +1,91 @@ +/* + * CPU features/facilities helper structs and utility functions for s390 + * + * Copyright 2016 IBM Corp. + * + * Author(s): Michael Mueller + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef TARGET_S390X_CPU_FEATURES_H +#define TARGET_S390X_CPU_FEATURES_H + +#include "qemu/bitmap.h" +#include "cpu_features_def.h" +#include "gen-features.h" + +/* CPU features are announced via different ways */ +typedef enum { + S390_FEAT_TYPE_STFL, + S390_FEAT_TYPE_SCLP_CONF_CHAR, + S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, + S390_FEAT_TYPE_SCLP_CPU, + S390_FEAT_TYPE_MISC, + S390_FEAT_TYPE_PLO, + S390_FEAT_TYPE_PTFF, + S390_FEAT_TYPE_KMAC, + S390_FEAT_TYPE_KMC, + S390_FEAT_TYPE_KM, + S390_FEAT_TYPE_KIMD, + S390_FEAT_TYPE_KLMD, + S390_FEAT_TYPE_PCKMO, + S390_FEAT_TYPE_KMCTR, + S390_FEAT_TYPE_KMF, + S390_FEAT_TYPE_KMO, + S390_FEAT_TYPE_PCC, + S390_FEAT_TYPE_PPNO, + S390_FEAT_TYPE_KMA, + S390_FEAT_TYPE_KDSA, + S390_FEAT_TYPE_SORTL, + S390_FEAT_TYPE_DFLTCC, +} S390FeatType; + +/* Definition of a CPU feature */ +typedef struct { + const char *name; /* name exposed to the user */ + const char *desc; /* description exposed to the user */ + S390FeatType type; /* feature type (way of indication)*/ + int bit; /* bit within the feature type area (fixed) */ +} S390FeatDef; + +/* use ordinary bitmap operations to work with features */ +typedef unsigned long S390FeatBitmap[BITS_TO_LONGS(S390_FEAT_MAX)]; + +/* 64bit based bitmap used to init S390FeatBitmap from generated data */ +typedef uint64_t S390FeatInit[S390_FEAT_MAX / 64 + 1]; + +const S390FeatDef *s390_feat_def(S390Feat feat); +S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit); +void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap); +void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type, + uint8_t *data); +void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type, + uint8_t *data); +void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque, + void (*fn)(const char *name, void *opaque)); + +/* Definition of a CPU feature group */ +typedef struct { + const char *name; /* name exposed to the user */ + const char *desc; /* description exposed to the user */ + S390FeatBitmap feat; /* features contained in the group */ + S390FeatInit init; /* used to init feat from generated data */ +} S390FeatGroupDef; + +const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group); + +#define BE_BIT_NR(BIT) (BIT ^ (BITS_PER_LONG - 1)) + +static inline void set_be_bit(unsigned int bit_nr, uint8_t *array) +{ + array[bit_nr / 8] |= 0x80 >> (bit_nr % 8); +} +static inline bool test_be_bit(unsigned int bit_nr, const uint8_t *array) +{ + return array[bit_nr / 8] & (0x80 >> (bit_nr % 8)); +} +#endif /* TARGET_S390X_CPU_FEATURES_H */ diff --git a/qemu/target/s390x/cpu_features_def.h b/qemu/target/s390x/cpu_features_def.h new file mode 100644 index 00000000..412d356f --- /dev/null +++ b/qemu/target/s390x/cpu_features_def.h @@ -0,0 +1,25 @@ +/* + * CPU features/facilities for s390 + * + * Copyright IBM Corp. 2016, 2018 + * Copyright Red Hat, Inc. 2019 + * + * Author(s): Michael Mueller + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef TARGET_S390X_CPU_FEATURES_DEF_H +#define TARGET_S390X_CPU_FEATURES_DEF_H + +#define DEF_FEAT(_FEAT, ...) S390_FEAT_##_FEAT, +typedef enum { + #include "cpu_features_def.inc.h" + S390_FEAT_MAX, +} S390Feat; +#undef DEF_FEAT + +#endif /* TARGET_S390X_CPU_FEATURES_DEF_H */ diff --git a/qemu/target/s390x/cpu_features_def.inc.h b/qemu/target/s390x/cpu_features_def.inc.h new file mode 100644 index 00000000..31dff0d8 --- /dev/null +++ b/qemu/target/s390x/cpu_features_def.inc.h @@ -0,0 +1,370 @@ +/* + * RAW s390x CPU feature definitions: + * + * DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC): + * - _FEAT: Feature (enum) name used internally (S390_FEAT_##_FEAT) + * - _NAME: Feature name exposed to the user. + * - _TYPE: Feature type (S390_FEAT_TYPE_##_TYPE). + * - _BIT: Feature bit number within feature type block (unused for MISC). + * - _DESC: Feature description, exposed to the user. + * + * Copyright IBM Corp. 2016, 2018 + * Copyright Red Hat, Inc. 2019 + * + * Author(s): Michael Mueller + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +/* Features exposed via the STFL(E) instruction. */ +DEF_FEAT(ESAN3, "esan3", STFL, 0, "Instructions marked as n3") +DEF_FEAT(ZARCH, "zarch", STFL, 1, "z/Architecture architectural mode") +DEF_FEAT(DAT_ENH, "dateh", STFL, 3, "DAT-enhancement facility") +DEF_FEAT(IDTE_SEGMENT, "idtes", STFL, 4, "IDTE selective TLB segment-table clearing") +DEF_FEAT(IDTE_REGION, "idter", STFL, 5, "IDTE selective TLB region-table clearing") +DEF_FEAT(ASN_LX_REUSE, "asnlxr", STFL, 6, "ASN-and-LX reuse facility") +DEF_FEAT(STFLE, "stfle", STFL, 7, "Store-facility-list-extended facility") +DEF_FEAT(EDAT, "edat", STFL, 8, "Enhanced-DAT facility") +DEF_FEAT(SENSE_RUNNING_STATUS, "srs", STFL, 9, "Sense-running-status facility") +DEF_FEAT(CONDITIONAL_SSKE, "csske", STFL, 10, "Conditional-SSKE facility") +DEF_FEAT(CONFIGURATION_TOPOLOGY, "ctop", STFL, 11, "Configuration-topology facility") +DEF_FEAT(AP_QUERY_CONFIG_INFO, "apqci", STFL, 12, "Query AP Configuration Information facility") +DEF_FEAT(IPTE_RANGE, "ipter", STFL, 13, "IPTE-range facility") +DEF_FEAT(NONQ_KEY_SETTING, "nonqks", STFL, 14, "Nonquiescing key-setting facility") +DEF_FEAT(AP_FACILITIES_TEST, "apft", STFL, 15, "AP Facilities Test facility") +DEF_FEAT(EXTENDED_TRANSLATION_2, "etf2", STFL, 16, "Extended-translation facility 2") +DEF_FEAT(MSA, "msa-base", STFL, 17, "Message-security-assist facility (excluding subfunctions)") +DEF_FEAT(LONG_DISPLACEMENT, "ldisp", STFL, 18, "Long-displacement facility") +DEF_FEAT(LONG_DISPLACEMENT_FAST, "ldisphp", STFL, 19, "Long-displacement facility has high performance") +DEF_FEAT(HFP_MADDSUB, "hfpm", STFL, 20, "HFP-multiply-add/subtract facility") +DEF_FEAT(EXTENDED_IMMEDIATE, "eimm", STFL, 21, "Extended-immediate facility") +DEF_FEAT(EXTENDED_TRANSLATION_3, "etf3", STFL, 22, "Extended-translation facility 3") +DEF_FEAT(HFP_UNNORMALIZED_EXT, "hfpue", STFL, 23, "HFP-unnormalized-extension facility") +DEF_FEAT(ETF2_ENH, "etf2eh", STFL, 24, "ETF2-enhancement facility") +DEF_FEAT(STORE_CLOCK_FAST, "stckf", STFL, 25, "Store-clock-fast facility") +DEF_FEAT(PARSING_ENH, "parseh", STFL, 26, "Parsing-enhancement facility") +DEF_FEAT(MOVE_WITH_OPTIONAL_SPEC, "mvcos", STFL, 27, "Move-with-optional-specification facility") +DEF_FEAT(TOD_CLOCK_STEERING, "tods-base", STFL, 28, "TOD-clock-steering facility (excluding subfunctions)") +DEF_FEAT(ETF3_ENH, "etf3eh", STFL, 30, "ETF3-enhancement facility") +DEF_FEAT(EXTRACT_CPU_TIME, "ectg", STFL, 31, "Extract-CPU-time facility") +DEF_FEAT(COMPARE_AND_SWAP_AND_STORE, "csst", STFL, 32, "Compare-and-swap-and-store facility") +DEF_FEAT(COMPARE_AND_SWAP_AND_STORE_2, "csst2", STFL, 33, "Compare-and-swap-and-store facility 2") +DEF_FEAT(GENERAL_INSTRUCTIONS_EXT, "ginste", STFL, 34, "General-instructions-extension facility") +DEF_FEAT(EXECUTE_EXT, "exrl", STFL, 35, "Execute-extensions facility") +DEF_FEAT(ENHANCED_MONITOR, "emon", STFL, 36, "Enhanced-monitor facility") +DEF_FEAT(FLOATING_POINT_EXT, "fpe", STFL, 37, "Floating-point extension facility") +DEF_FEAT(ORDER_PRESERVING_COMPRESSION, "opc", STFL, 38, "Order Preserving Compression facility") +DEF_FEAT(SET_PROGRAM_PARAMETERS, "sprogp", STFL, 40, "Set-program-parameters facility") +DEF_FEAT(FLOATING_POINT_SUPPPORT_ENH, "fpseh", STFL, 41, "Floating-point-support-enhancement facilities") +DEF_FEAT(DFP, "dfp", STFL, 42, "DFP (decimal-floating-point) facility") +DEF_FEAT(DFP_FAST, "dfphp", STFL, 43, "DFP (decimal-floating-point) facility has high performance") +DEF_FEAT(PFPO, "pfpo", STFL, 44, "PFPO instruction") +DEF_FEAT(STFLE_45, "stfle45", STFL, 45, "Various facilities introduced with z196") +DEF_FEAT(CMPSC_ENH, "cmpsceh", STFL, 47, "CMPSC-enhancement facility") +DEF_FEAT(DFP_ZONED_CONVERSION, "dfpzc", STFL, 48, "Decimal-floating-point zoned-conversion facility") +DEF_FEAT(STFLE_49, "stfle49", STFL, 49, "Various facilities introduced with zEC12") +DEF_FEAT(CONSTRAINT_TRANSACTIONAL_EXE, "cte", STFL, 50, "Constrained transactional-execution facility") +DEF_FEAT(LOCAL_TLB_CLEARING, "ltlbc", STFL, 51, "Local-TLB-clearing facility") +DEF_FEAT(INTERLOCKED_ACCESS_2, "iacc2", STFL, 52, "Interlocked-access facility 2") +DEF_FEAT(STFLE_53, "stfle53", STFL, 53, "Various facilities introduced with z13") +DEF_FEAT(ENTROPY_ENC_COMP, "eec", STFL, 54, "Entropy encoding compression facility") +DEF_FEAT(MSA_EXT_5, "msa5-base", STFL, 57, "Message-security-assist-extension-5 facility (excluding subfunctions)") +DEF_FEAT(MISC_INSTRUCTION_EXT, "minste2", STFL, 58, "Miscellaneous-instruction-extensions facility 2") +DEF_FEAT(SEMAPHORE_ASSIST, "sema", STFL, 59, "Semaphore-assist facility") +DEF_FEAT(TIME_SLICE_INSTRUMENTATION, "tsi", STFL, 60, "Time-slice Instrumentation facility") +DEF_FEAT(MISC_INSTRUCTION_EXT3, "minste3", STFL, 61, "Miscellaneous-Instruction-Extensions Facility 3") +DEF_FEAT(RUNTIME_INSTRUMENTATION, "ri", STFL, 64, "CPU runtime-instrumentation facility") +DEF_FEAT(AP_QUEUE_INTERRUPT_CONTROL, "apqi", STFL, 65, "AP-Queue interruption facility") +DEF_FEAT(ZPCI, "zpci", STFL, 69, "z/PCI facility") +DEF_FEAT(ADAPTER_EVENT_NOTIFICATION, "aen", STFL, 71, "General-purpose-adapter-event-notification facility") +DEF_FEAT(ADAPTER_INT_SUPPRESSION, "ais", STFL, 72, "General-purpose-adapter-interruption-suppression facility") +DEF_FEAT(TRANSACTIONAL_EXE, "te", STFL, 73, "Transactional-execution facility") +DEF_FEAT(STORE_HYPERVISOR_INFO, "sthyi", STFL, 74, "Store-hypervisor-information facility") +DEF_FEAT(ACCESS_EXCEPTION_FS_INDICATION, "aefsi", STFL, 75, "Access-exception-fetch/store-indication facility") +DEF_FEAT(MSA_EXT_3, "msa3-base", STFL, 76, "Message-security-assist-extension-3 facility (excluding subfunctions)") +DEF_FEAT(MSA_EXT_4, "msa4-base", STFL, 77, "Message-security-assist-extension-4 facility (excluding subfunctions)") +DEF_FEAT(EDAT_2, "edat2", STFL, 78, "Enhanced-DAT facility 2") +DEF_FEAT(DFP_PACKED_CONVERSION, "dfppc", STFL, 80, "Decimal-floating-point packed-conversion facility") +DEF_FEAT(PPA15, "ppa15", STFL, 81, "PPA15 is installed") +DEF_FEAT(BPB, "bpb", STFL, 82, "Branch prediction blocking") +DEF_FEAT(VECTOR, "vx", STFL, 129, "Vector facility") +DEF_FEAT(INSTRUCTION_EXEC_PROT, "iep", STFL, 130, "Instruction-execution-protection facility") +DEF_FEAT(SIDE_EFFECT_ACCESS_ESOP2, "sea_esop2", STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2") +DEF_FEAT(GUARDED_STORAGE, "gs", STFL, 133, "Guarded-storage facility") +DEF_FEAT(VECTOR_PACKED_DECIMAL, "vxpd", STFL, 134, "Vector packed decimal facility") +DEF_FEAT(VECTOR_ENH, "vxeh", STFL, 135, "Vector enhancements facility") +DEF_FEAT(MULTIPLE_EPOCH, "mepoch", STFL, 139, "Multiple-epoch facility") +DEF_FEAT(TEST_PENDING_EXT_INTERRUPTION, "tpei", STFL, 144, "Test-pending-external-interruption facility") +DEF_FEAT(INSERT_REFERENCE_BITS_MULT, "irbm", STFL, 145, "Insert-reference-bits-multiple facility") +DEF_FEAT(MSA_EXT_8, "msa8-base", STFL, 146, "Message-security-assist-extension-8 facility (excluding subfunctions)") +DEF_FEAT(CMM_NT, "cmmnt", STFL, 147, "CMM: ESSA-enhancement (no translate) facility") +DEF_FEAT(VECTOR_ENH2, "vxeh2", STFL, 148, "Vector Enhancements facility 2") +DEF_FEAT(ESORT_BASE, "esort-base", STFL, 150, "Enhanced-sort facility (excluding subfunctions)") +DEF_FEAT(DEFLATE_BASE, "deflate-base", STFL, 151, "Deflate-conversion facility (excluding subfunctions)") +DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH, "vxpdeh", STFL, 152, "Vector-Packed-Decimal-Enhancement Facility") +DEF_FEAT(MSA_EXT_9, "msa9-base", STFL, 155, "Message-security-assist-extension-9 facility (excluding subfunctions)") +DEF_FEAT(ETOKEN, "etoken", STFL, 156, "Etoken facility") + +/* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */ +DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility") +DEF_FEAT(ESOP, "esop", SCLP_CONF_CHAR, 46, "Enhanced-suppression-on-protection facility") +DEF_FEAT(HPMA2, "hpma2", SCLP_CONF_CHAR, 90, "Host page management assist 2 Facility") /* 91-2 */ +DEF_FEAT(SIE_KSS, "kss", SCLP_CONF_CHAR, 151, "SIE: Keyless-subset facility") /* 98-7 */ + +/* Features exposed via SCLP SCCB Byte 116 - 119 (bit numbers relative to byte-116) */ +DEF_FEAT(SIE_64BSCAO, "64bscao", SCLP_CONF_CHAR_EXT, 0, "SIE: 64-bit-SCAO facility") +DEF_FEAT(SIE_CMMA, "cmma", SCLP_CONF_CHAR_EXT, 1, "SIE: Collaborative-memory-management assist") +DEF_FEAT(SIE_PFMFI, "pfmfi", SCLP_CONF_CHAR_EXT, 9, "SIE: PFMF interpretation facility") +DEF_FEAT(SIE_IBS, "ibs", SCLP_CONF_CHAR_EXT, 10, "SIE: Interlock-and-broadcast-suppression facility") + +/* Features exposed via SCLP CPU info. */ +DEF_FEAT(SIE_F2, "sief2", SCLP_CPU, 4, "SIE: interception format 2 (Virtual SIE)") +DEF_FEAT(SIE_SKEY, "skey", SCLP_CPU, 5, "SIE: Storage-key facility") +DEF_FEAT(SIE_GPERE, "gpereh", SCLP_CPU, 10, "SIE: Guest-PER enhancement facility") +DEF_FEAT(SIE_SIIF, "siif", SCLP_CPU, 11, "SIE: Shared IPTE-interlock facility") +DEF_FEAT(SIE_SIGPIF, "sigpif", SCLP_CPU, 12, "SIE: SIGP interpretation facility") +DEF_FEAT(SIE_IB, "ib", SCLP_CPU, 42, "SIE: Intervention bypass facility") +DEF_FEAT(SIE_CEI, "cei", SCLP_CPU, 43, "SIE: Conditional-external-interception facility") + +/* + * Features exposed via no feature bit (but e.g., instruction sensing) + * -> the feature bit number is irrelavant + */ +DEF_FEAT(DAT_ENH_2, "dateh2", MISC, 0, "DAT-enhancement facility 2") +DEF_FEAT(CMM, "cmm", MISC, 0, "Collaborative-memory-management facility") +DEF_FEAT(AP, "ap", MISC, 0, "AP instructions installed") + +/* Features exposed via the PLO instruction. */ +DEF_FEAT(PLO_CL, "plo-cl", PLO, 0, "PLO Compare and load (32 bit in general registers)") +DEF_FEAT(PLO_CLG, "plo-clg", PLO, 1, "PLO Compare and load (64 bit in parameter list)") +DEF_FEAT(PLO_CLGR, "plo-clgr", PLO, 2, "PLO Compare and load (32 bit in general registers)") +DEF_FEAT(PLO_CLX, "plo-clx", PLO, 3, "PLO Compare and load (128 bit in parameter list)") +DEF_FEAT(PLO_CS, "plo-cs", PLO, 4, "PLO Compare and swap (32 bit in general registers)") +DEF_FEAT(PLO_CSG, "plo-csg", PLO, 5, "PLO Compare and swap (64 bit in parameter list)") +DEF_FEAT(PLO_CSGR, "plo-csgr", PLO, 6, "PLO Compare and swap (32 bit in general registers)") +DEF_FEAT(PLO_CSX, "plo-csx", PLO, 7, "PLO Compare and swap (128 bit in parameter list)") +DEF_FEAT(PLO_DCS, "plo-dcs", PLO, 8, "PLO Double compare and swap (32 bit in general registers)") +DEF_FEAT(PLO_DCSG, "plo-dcsg", PLO, 9, "PLO Double compare and swap (64 bit in parameter list)") +DEF_FEAT(PLO_DCSGR, "plo-dcsgr", PLO, 10, "PLO Double compare and swap (32 bit in general registers)") +DEF_FEAT(PLO_DCSX, "plo-dcsx", PLO, 11, "PLO Double compare and swap (128 bit in parameter list)") +DEF_FEAT(PLO_CSST, "plo-csst", PLO, 12, "PLO Compare and swap and store (32 bit in general registers)") +DEF_FEAT(PLO_CSSTG, "plo-csstg", PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)") +DEF_FEAT(PLO_CSSTGR, "plo-csstgr", PLO, 14, "PLO Compare and swap and store (32 bit in general registers)") +DEF_FEAT(PLO_CSSTX, "plo-csstx", PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)") +DEF_FEAT(PLO_CSDST, "plo-csdst", PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)") +DEF_FEAT(PLO_CSDSTG, "plo-csdstg", PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)") +DEF_FEAT(PLO_CSDSTGR, "plo-csdstgr", PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)") +DEF_FEAT(PLO_CSDSTX, "plo-csdstx", PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)") +DEF_FEAT(PLO_CSTST, "plo-cstst", PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)") +DEF_FEAT(PLO_CSTSTG, "plo-cststg", PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)") +DEF_FEAT(PLO_CSTSTGR, "plo-cststgr", PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)") +DEF_FEAT(PLO_CSTSTX, "plo-cststx", PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)") + +/* Features exposed via the PTFF instruction. */ +DEF_FEAT(PTFF_QTO, "ptff-qto", PTFF, 1, "PTFF Query TOD Offset") +DEF_FEAT(PTFF_QSI, "ptff-qsi", PTFF, 2, "PTFF Query Steering Information") +DEF_FEAT(PTFF_QPT, "ptff-qpc", PTFF, 3, "PTFF Query Physical Clock") +DEF_FEAT(PTFF_QUI, "ptff-qui", PTFF, 4, "PTFF Query UTC Information") +DEF_FEAT(PTFF_QTOU, "ptff-qtou", PTFF, 5, "PTFF Query TOD Offset User") +DEF_FEAT(PTFF_QSIE, "ptff-qsie", PTFF, 10, "PTFF Query Steering Information Extended") +DEF_FEAT(PTFF_QTOUE, "ptff-qtoue", PTFF, 13, "PTFF Query TOD Offset User Extended") +DEF_FEAT(PTFF_STO, "ptff-sto", PTFF, 65, "PTFF Set TOD Offset") +DEF_FEAT(PTFF_STOU, "ptff-stou", PTFF, 69, "PTFF Set TOD Offset User") +DEF_FEAT(PTFF_STOE, "ptff-stoe", PTFF, 73, "PTFF Set TOD Offset Extended") +DEF_FEAT(PTFF_STOUE, "ptff-stoue", PTFF, 77, "PTFF Set TOD Offset User Extended") + +/* Features exposed via the KMAC instruction. */ +DEF_FEAT(KMAC_DEA, "kmac-dea", KMAC, 1, "KMAC DEA") +DEF_FEAT(KMAC_TDEA_128, "kmac-tdea-128", KMAC, 2, "KMAC TDEA-128") +DEF_FEAT(KMAC_TDEA_192, "kmac-tdea-192", KMAC, 3, "KMAC TDEA-192") +DEF_FEAT(KMAC_EDEA, "kmac-edea", KMAC, 9, "KMAC Encrypted-DEA") +DEF_FEAT(KMAC_ETDEA_128, "kmac-etdea-128", KMAC, 10, "KMAC Encrypted-TDEA-128") +DEF_FEAT(KMAC_ETDEA_192, "kmac-etdea-192", KMAC, 11, "KMAC Encrypted-TDEA-192") +DEF_FEAT(KMAC_AES_128, "kmac-aes-128", KMAC, 18, "KMAC AES-128") +DEF_FEAT(KMAC_AES_192, "kmac-aes-192", KMAC, 19, "KMAC AES-192") +DEF_FEAT(KMAC_AES_256, "kmac-aes-256", KMAC, 20, "KMAC AES-256") +DEF_FEAT(KMAC_EAES_128, "kmac-eaes-128", KMAC, 26, "KMAC Encrypted-AES-128") +DEF_FEAT(KMAC_EAES_192, "kmac-eaes-192", KMAC, 27, "KMAC Encrypted-AES-192") +DEF_FEAT(KMAC_EAES_256, "kmac-eaes-256", KMAC, 28, "KMAC Encrypted-AES-256") + +/* Features exposed via the KMC instruction. */ +DEF_FEAT(KMC_DEA, "kmc-dea", KMC, 1, "KMC DEA") +DEF_FEAT(KMC_TDEA_128, "kmc-tdea-128", KMC, 2, "KMC TDEA-128") +DEF_FEAT(KMC_TDEA_192, "kmc-tdea-192", KMC, 3, "KMC TDEA-192") +DEF_FEAT(KMC_EDEA, "kmc-edea", KMC, 9, "KMC Encrypted-DEA") +DEF_FEAT(KMC_ETDEA_128, "kmc-etdea-128", KMC, 10, "KMC Encrypted-TDEA-128") +DEF_FEAT(KMC_ETDEA_192, "kmc-etdea-192", KMC, 11, "KMC Encrypted-TDEA-192") +DEF_FEAT(KMC_AES_128, "kmc-aes-128", KMC, 18, "KMC AES-128") +DEF_FEAT(KMC_AES_192, "kmc-aes-192", KMC, 19, "KMC AES-192") +DEF_FEAT(KMC_AES_256, "kmc-aes-256", KMC, 20, "KMC AES-256") +DEF_FEAT(KMC_EAES_128, "kmc-eaes-128", KMC, 26, "KMC Encrypted-AES-128") +DEF_FEAT(KMC_EAES_192, "kmc-eaes-192", KMC, 27, "KMC Encrypted-AES-192") +DEF_FEAT(KMC_EAES_256, "kmc-eaes-256", KMC, 28, "KMC Encrypted-AES-256") +DEF_FEAT(KMC_PRNG, "kmc-prng", KMC, 67, "KMC PRNG") + +/* Features exposed via the KM instruction. */ +DEF_FEAT(KM_DEA, "km-dea", KM, 1, "KM DEA") +DEF_FEAT(KM_TDEA_128, "km-tdea-128", KM, 2, "KM TDEA-128") +DEF_FEAT(KM_TDEA_192, "km-tdea-192", KM, 3, "KM TDEA-192") +DEF_FEAT(KM_EDEA, "km-edea", KM, 9, "KM Encrypted-DEA") +DEF_FEAT(KM_ETDEA_128, "km-etdea-128", KM, 10, "KM Encrypted-TDEA-128") +DEF_FEAT(KM_ETDEA_192, "km-etdea-192", KM, 11, "KM Encrypted-TDEA-192") +DEF_FEAT(KM_AES_128, "km-aes-128", KM, 18, "KM AES-128") +DEF_FEAT(KM_AES_192, "km-aes-192", KM, 19, "KM AES-192") +DEF_FEAT(KM_AES_256, "km-aes-256", KM, 20, "KM AES-256") +DEF_FEAT(KM_EAES_128, "km-eaes-128", KM, 26, "KM Encrypted-AES-128") +DEF_FEAT(KM_EAES_192, "km-eaes-192", KM, 27, "KM Encrypted-AES-192") +DEF_FEAT(KM_EAES_256, "km-eaes-256", KM, 28, "KM Encrypted-AES-256") +DEF_FEAT(KM_XTS_AES_128, "km-xts-aes-128", KM, 50, "KM XTS-AES-128") +DEF_FEAT(KM_XTS_AES_256, "km-xts-aes-256", KM, 52, "KM XTS-AES-256") +DEF_FEAT(KM_XTS_EAES_128, "km-xts-eaes-128", KM, 58, "KM XTS-Encrypted-AES-128") +DEF_FEAT(KM_XTS_EAES_256, "km-xts-eaes-256", KM, 60, "KM XTS-Encrypted-AES-256") + +/* Features exposed via the KIMD instruction. */ +DEF_FEAT(KIMD_SHA_1, "kimd-sha-1", KIMD, 1, "KIMD SHA-1") +DEF_FEAT(KIMD_SHA_256, "kimd-sha-256", KIMD, 2, "KIMD SHA-256") +DEF_FEAT(KIMD_SHA_512, "kimd-sha-512", KIMD, 3, "KIMD SHA-512") +DEF_FEAT(KIMD_SHA3_224, "kimd-sha3-224", KIMD, 32, "KIMD SHA3-224") +DEF_FEAT(KIMD_SHA3_256, "kimd-sha3-256", KIMD, 33, "KIMD SHA3-256") +DEF_FEAT(KIMD_SHA3_384, "kimd-sha3-384", KIMD, 34, "KIMD SHA3-384") +DEF_FEAT(KIMD_SHA3_512, "kimd-sha3-512", KIMD, 35, "KIMD SHA3-512") +DEF_FEAT(KIMD_SHAKE_128, "kimd-shake-128", KIMD, 36, "KIMD SHAKE-128") +DEF_FEAT(KIMD_SHAKE_256, "kimd-shake-256", KIMD, 37, "KIMD SHAKE-256") +DEF_FEAT(KIMD_GHASH, "kimd-ghash", KIMD, 65, "KIMD GHASH") + +/* Features exposed via the KLMD instruction. */ +DEF_FEAT(KLMD_SHA_1, "klmd-sha-1", KLMD, 1, "KLMD SHA-1") +DEF_FEAT(KLMD_SHA_256, "klmd-sha-256", KLMD, 2, "KLMD SHA-256") +DEF_FEAT(KLMD_SHA_512, "klmd-sha-512", KLMD, 3, "KLMD SHA-512") +DEF_FEAT(KLMD_SHA3_224, "klmd-sha3-224", KLMD, 32, "KLMD SHA3-224") +DEF_FEAT(KLMD_SHA3_256, "klmd-sha3-256", KLMD, 33, "KLMD SHA3-256") +DEF_FEAT(KLMD_SHA3_384, "klmd-sha3-384", KLMD, 34, "KLMD SHA3-384") +DEF_FEAT(KLMD_SHA3_512, "klmd-sha3-512", KLMD, 35, "KLMD SHA3-512") +DEF_FEAT(KLMD_SHAKE_128, "klmd-shake-128", KLMD, 36, "KLMD SHAKE-128") +DEF_FEAT(KLMD_SHAKE_256, "klmd-shake-256", KLMD, 37, "KLMD SHAKE-256") + +/* Features exposed via the PCKMO instruction. */ +DEF_FEAT(PCKMO_EDEA, "pckmo-edea", PCKMO, 1, "PCKMO Encrypted-DEA-Key") +DEF_FEAT(PCKMO_ETDEA_128, "pckmo-etdea-128", PCKMO, 2, "PCKMO Encrypted-TDEA-128-Key") +DEF_FEAT(PCKMO_ETDEA_256, "pckmo-etdea-192", PCKMO, 3, "PCKMO Encrypted-TDEA-192-Key") +DEF_FEAT(PCKMO_AES_128, "pckmo-aes-128", PCKMO, 18, "PCKMO Encrypted-AES-128-Key") +DEF_FEAT(PCKMO_AES_192, "pckmo-aes-192", PCKMO, 19, "PCKMO Encrypted-AES-192-Key") +DEF_FEAT(PCKMO_AES_256, "pckmo-aes-256", PCKMO, 20, "PCKMO Encrypted-AES-256-Key") +DEF_FEAT(PCKMO_ECC_P256, "pckmo-ecc-p256", PCKMO, 32, "PCKMO Encrypt-ECC-P256-Key") +DEF_FEAT(PCKMO_ECC_P384, "pckmo-ecc-p384", PCKMO, 33, "PCKMO Encrypt-ECC-P384-Key") +DEF_FEAT(PCKMO_ECC_P521, "pckmo-ecc-p521", PCKMO, 34, "PCKMO Encrypt-ECC-P521-Key") +DEF_FEAT(PCKMO_ECC_ED25519, "pckmo-ecc-ed25519", PCKMO, 40 , "PCKMO Encrypt-ECC-Ed25519-Key") +DEF_FEAT(PCKMO_ECC_ED448, "pckmo-ecc-ed448", PCKMO, 41 , "PCKMO Encrypt-ECC-Ed448-Key") + +/* Features exposed via the KMCTR instruction. */ +DEF_FEAT(KMCTR_DEA, "kmctr-dea", KMCTR, 1, "KMCTR DEA") +DEF_FEAT(KMCTR_TDEA_128, "kmctr-tdea-128", KMCTR, 2, "KMCTR TDEA-128") +DEF_FEAT(KMCTR_TDEA_192, "kmctr-tdea-192", KMCTR, 3, "KMCTR TDEA-192") +DEF_FEAT(KMCTR_EDEA, "kmctr-edea", KMCTR, 9, "KMCTR Encrypted-DEA") +DEF_FEAT(KMCTR_ETDEA_128, "kmctr-etdea-128", KMCTR, 10, "KMCTR Encrypted-TDEA-128") +DEF_FEAT(KMCTR_ETDEA_192, "kmctr-etdea-192", KMCTR, 11, "KMCTR Encrypted-TDEA-192") +DEF_FEAT(KMCTR_AES_128, "kmctr-aes-128", KMCTR, 18, "KMCTR AES-128") +DEF_FEAT(KMCTR_AES_192, "kmctr-aes-192", KMCTR, 19, "KMCTR AES-192") +DEF_FEAT(KMCTR_AES_256, "kmctr-aes-256", KMCTR, 20, "KMCTR AES-256") +DEF_FEAT(KMCTR_EAES_128, "kmctr-eaes-128", KMCTR, 26, "KMCTR Encrypted-AES-128") +DEF_FEAT(KMCTR_EAES_192, "kmctr-eaes-192", KMCTR, 27, "KMCTR Encrypted-AES-192") +DEF_FEAT(KMCTR_EAES_256, "kmctr-eaes-256", KMCTR, 28, "KMCTR Encrypted-AES-256") + +/* Features exposed via the KMF instruction. */ +DEF_FEAT(KMF_DEA, "kmf-dea", KMF, 1, "KMF DEA") +DEF_FEAT(KMF_TDEA_128, "kmf-tdea-128", KMF, 2, "KMF TDEA-128") +DEF_FEAT(KMF_TDEA_192, "kmf-tdea-192", KMF, 3, "KMF TDEA-192") +DEF_FEAT(KMF_EDEA, "kmf-edea", KMF, 9, "KMF Encrypted-DEA") +DEF_FEAT(KMF_ETDEA_128, "kmf-etdea-128", KMF, 10, "KMF Encrypted-TDEA-128") +DEF_FEAT(KMF_ETDEA_192, "kmf-etdea-192", KMF, 11, "KMF Encrypted-TDEA-192") +DEF_FEAT(KMF_AES_128, "kmf-aes-128", KMF, 18, "KMF AES-128") +DEF_FEAT(KMF_AES_192, "kmf-aes-192", KMF, 19, "KMF AES-192") +DEF_FEAT(KMF_AES_256, "kmf-aes-256", KMF, 20, "KMF AES-256") +DEF_FEAT(KMF_EAES_128, "kmf-eaes-128", KMF, 26, "KMF Encrypted-AES-128") +DEF_FEAT(KMF_EAES_192, "kmf-eaes-192", KMF, 27, "KMF Encrypted-AES-192") +DEF_FEAT(KMF_EAES_256, "kmf-eaes-256", KMF, 28, "KMF Encrypted-AES-256") + +/* Features exposed via the KMO instruction. */ +DEF_FEAT(KMO_DEA, "kmo-dea", KMO, 1, "KMO DEA") +DEF_FEAT(KMO_TDEA_128, "kmo-tdea-128", KMO, 2, "KMO TDEA-128") +DEF_FEAT(KMO_TDEA_192, "kmo-tdea-192", KMO, 3, "KMO TDEA-192") +DEF_FEAT(KMO_EDEA, "kmo-edea", KMO, 9, "KMO Encrypted-DEA") +DEF_FEAT(KMO_ETDEA_128, "kmo-etdea-128", KMO, 10, "KMO Encrypted-TDEA-128") +DEF_FEAT(KMO_ETDEA_192, "kmo-etdea-192", KMO, 11, "KMO Encrypted-TDEA-192") +DEF_FEAT(KMO_AES_128, "kmo-aes-128", KMO, 18, "KMO AES-128") +DEF_FEAT(KMO_AES_192, "kmo-aes-192", KMO, 19, "KMO AES-192") +DEF_FEAT(KMO_AES_256, "kmo-aes-256", KMO, 20, "KMO AES-256") +DEF_FEAT(KMO_EAES_128, "kmo-eaes-128", KMO, 26, "KMO Encrypted-AES-128") +DEF_FEAT(KMO_EAES_192, "kmo-eaes-192", KMO, 27, "KMO Encrypted-AES-192") +DEF_FEAT(KMO_EAES_256, "kmo-eaes-256", KMO, 28, "KMO Encrypted-AES-256") + +/* Features exposed via the PCC instruction. */ +DEF_FEAT(PCC_CMAC_DEA, "pcc-cmac-dea", PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA") +DEF_FEAT(PCC_CMAC_TDEA_128, "pcc-cmac-tdea-128", PCC, 2, "PCC Compute-Last-Block-CMAC-Using-TDEA-128") +DEF_FEAT(PCC_CMAC_TDEA_192, "pcc-cmac-tdea-192", PCC, 3, "PCC Compute-Last-Block-CMAC-Using-TDEA-192") +DEF_FEAT(PCC_CMAC_ETDEA_128, "pcc-cmac-edea", PCC, 9, "PCC Compute-Last-Block-CMAC-Using-Encrypted-DEA") +DEF_FEAT(PCC_CMAC_ETDEA_192, "pcc-cmac-etdea-128", PCC, 10, "PCC Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128") +DEF_FEAT(PCC_CMAC_TDEA, "pcc-cmac-etdea-192", PCC, 11, "PCC Compute-Last-Block-CMAC-Using-EncryptedTDEA-192") +DEF_FEAT(PCC_CMAC_AES_128, "pcc-cmac-aes-128", PCC, 18, "PCC Compute-Last-Block-CMAC-Using-AES-128") +DEF_FEAT(PCC_CMAC_AES_192, "pcc-cmac-aes-192", PCC, 19, "PCC Compute-Last-Block-CMAC-Using-AES-192") +DEF_FEAT(PCC_CMAC_AES_256, "pcc-cmac-eaes-256", PCC, 20, "PCC Compute-Last-Block-CMAC-Using-AES-256") +DEF_FEAT(PCC_CMAC_EAES_128, "pcc-cmac-eaes-128", PCC, 26, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-128") +DEF_FEAT(PCC_CMAC_EAES_192, "pcc-cmac-eaes-192", PCC, 27, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-192") +DEF_FEAT(PCC_CMAC_EAES_256, "pcc-cmac-eaes-256", PCC, 28, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-256") +DEF_FEAT(PCC_XTS_AES_128, "pcc-xts-aes-128", PCC, 50, "PCC Compute-XTS-Parameter-Using-AES-128") +DEF_FEAT(PCC_XTS_AES_256, "pcc-xts-aes-256", PCC, 52, "PCC Compute-XTS-Parameter-Using-AES-256") +DEF_FEAT(PCC_XTS_EAES_128, "pcc-xts-eaes-128", PCC, 58, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-128") +DEF_FEAT(PCC_XTS_EAES_256, "pcc-xts-eaes-256", PCC, 60, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-256") +DEF_FEAT(PCC_SCALAR_MULT_P256, "pcc-scalar-mult-p256", PCC, 64, "PCC Scalar-Multiply-P256") +DEF_FEAT(PCC_SCALAR_MULT_P384, "pcc-scalar-mult-p384", PCC, 65, "PCC Scalar-Multiply-P384") +DEF_FEAT(PCC_SCALAR_MULT_P512, "pcc-scalar-mult-p521", PCC, 66, "PCC Scalar-Multiply-P521") +DEF_FEAT(PCC_SCALAR_MULT_ED25519, "pcc-scalar-mult-ed25519", PCC, 72, "PCC Scalar-Multiply-Ed25519") +DEF_FEAT(PCC_SCALAR_MULT_ED448, "pcc-scalar-mult-ed448", PCC, 73, "PCC Scalar-Multiply-Ed448") +DEF_FEAT(PCC_SCALAR_MULT_X25519, "pcc-scalar-mult-x25519", PCC, 80, "PCC Scalar-Multiply-X25519") +DEF_FEAT(PCC_SCALAR_MULT_X448, "pcc-scalar-mult-x448", PCC, 81, "PCC Scalar-Multiply-X448") + +/* Features exposed via the PPNO/PRNO instruction. */ +DEF_FEAT(PPNO_SHA_512_DRNG, "ppno-sha-512-drng", PPNO, 3, "PPNO SHA-512-DRNG") +DEF_FEAT(PRNO_TRNG_QRTCR, "prno-trng-qrtcr", PPNO, 112, "PRNO TRNG-Query-Raw-to-Conditioned-Ratio") +DEF_FEAT(PRNO_TRNG, "prno-trng", PPNO, 114, "PRNO TRNG") + +/* Features exposed via the KMA instruction. */ +DEF_FEAT(KMA_GCM_AES_128, "kma-gcm-aes-128", KMA, 18, "KMA GCM-AES-128") +DEF_FEAT(KMA_GCM_AES_192, "kma-gcm-aes-192", KMA, 19, "KMA GCM-AES-192") +DEF_FEAT(KMA_GCM_AES_256, "kma-gcm-aes-256", KMA, 20, "KMA GCM-AES-256") +DEF_FEAT(KMA_GCM_EAES_128, "kma-gcm-eaes-128", KMA, 26, "KMA GCM-Encrypted-AES-128") +DEF_FEAT(KMA_GCM_EAES_192, "kma-gcm-eaes-192", KMA, 27, "KMA GCM-Encrypted-AES-192") +DEF_FEAT(KMA_GCM_EAES_256, "kma-gcm-eaes-256", KMA, 28, "KMA GCM-Encrypted-AES-256") + +/* Features exposed via the KDSA instruction. */ +DEF_FEAT(KDSA_ECDSA_VERIFY_P256, "kdsa-ecdsa-verify-p256", KDSA, 1, "KDSA ECDSA-Verify-P256") +DEF_FEAT(KDSA_ECDSA_VERIFY_P384, "kdsa-ecdsa-verify-p384", KDSA, 2, "KDSA ECDSA-Verify-P384") +DEF_FEAT(KDSA_ECDSA_VERIFY_P512, "kdsa-ecdsa-verify-p521", KDSA, 3, "KDSA ECDSA-Verify-P521") +DEF_FEAT(KDSA_ECDSA_SIGN_P256, "kdsa-ecdsa-sign-p256", KDSA, 9, "KDSA ECDSA-Sign-P256") +DEF_FEAT(KDSA_ECDSA_SIGN_P384, "kdsa-ecdsa-sign-p384", KDSA, 10, "KDSA ECDSA-Sign-P384") +DEF_FEAT(KDSA_ECDSA_SIGN_P512, "kdsa-ecdsa-sign-p521", KDSA, 11, "KDSA ECDSA-Sign-P521") +DEF_FEAT(KDSA_EECDSA_SIGN_P256, "kdsa-eecdsa-sign-p256", KDSA, 17, "KDSA Encrypted-ECDSA-Sign-P256") +DEF_FEAT(KDSA_EECDSA_SIGN_P384, "kdsa-eecdsa-sign-p384", KDSA, 18, "KDSA Encrypted-ECDSA-Sign-P384") +DEF_FEAT(KDSA_EECDSA_SIGN_P512, "kdsa-eecdsa-sign-p521", KDSA, 19, "KDSA Encrypted-ECDSA-Sign-P521") +DEF_FEAT(KDSA_EDDSA_VERIFY_ED25519, "kdsa-eddsa-verify-ed25519", KDSA, 32, "KDSA EdDSA-Verify-Ed25519") +DEF_FEAT(KDSA_EDDSA_VERIFY_ED448, "kdsa-eddsa-verify-ed448", KDSA, 36, "KDSA EdDSA-Verify-Ed448") +DEF_FEAT(KDSA_EDDSA_SIGN_ED25519, "kdsa-eddsa-sign-ed25519", KDSA, 40, "KDSA EdDSA-Sign-Ed25519") +DEF_FEAT(KDSA_EDDSA_SIGN_ED448, "kdsa-eddsa-sign-ed448", KDSA, 44, "KDSA EdDSA-Sign-Ed448") +DEF_FEAT(KDSA_EEDDSA_SIGN_ED25519, "kdsa-eeddsa-sign-ed25519", KDSA, 48, "KDSA Encrypted-EdDSA-Sign-Ed25519") +DEF_FEAT(KDSA_EEDDSA_SIGN_ED448, "kdsa-eeddsa-sign-ed448", KDSA, 52, "KDSA Encrypted-EdDSA-Sign-Ed448") + +/* Features exposed via the SORTL instruction. */ +DEF_FEAT(SORTL_SFLR, "sortl-sflr", SORTL, 1, "SORTL SFLR") +DEF_FEAT(SORTL_SVLR, "sortl-svlr", SORTL, 2, "SORTL SVLR") +DEF_FEAT(SORTL_32, "sortl-32", SORTL, 130, "SORTL 32 input lists") +DEF_FEAT(SORTL_128, "sortl-128", SORTL, 132, "SORTL 128 input lists") +DEF_FEAT(SORTL_F0, "sortl-f0", SORTL, 192, "SORTL format 0 parameter-block") + +/* Features exposed via the DEFLATE instruction. */ +DEF_FEAT(DEFLATE_GHDT, "dfltcc-gdht", DFLTCC, 1, "DFLTCC GDHT") +DEF_FEAT(DEFLATE_CMPR, "dfltcc-cmpr", DFLTCC, 2, "DFLTCC CMPR") +DEF_FEAT(DEFLATE_XPND, "dfltcc-xpnd", DFLTCC, 4, "DFLTCC XPND") +DEF_FEAT(DEFLATE_F0, "dfltcc-f0", DFLTCC, 192, "DFLTCC format 0 parameter-block") diff --git a/qemu/target/s390x/cpu_models.c b/qemu/target/s390x/cpu_models.c new file mode 100644 index 00000000..a9bc83f7 --- /dev/null +++ b/qemu/target/s390x/cpu_models.c @@ -0,0 +1,579 @@ +/* + * CPU models for s390x + * + * Copyright 2016 IBM Corp. + * + * Author(s): David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "sysemu/tcg.h" +#include "qemu-common.h" +//#include "hw/pci/pci.h" + +#define CPUDEF_INIT(_type, _gen, _ec_ga, _mha_pow, _hmfai, _name, _desc) \ + { \ + .name = _name, \ + .type = _type, \ + .gen = _gen, \ + .ec_ga = _ec_ga, \ + .mha_pow = _mha_pow, \ + .hmfai = _hmfai, \ + .desc = _desc, \ + .base_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _BASE }, \ + .default_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _DEFAULT }, \ + .full_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _FULL }, \ + } + +/* + * CPU definition list in order of release. Up to generation 14 base features + * of a following release have been a superset of the previous release. With + * generation 15 one base feature and one optional feature have been deprecated. + */ +static S390CPUDef s390_cpu_defs[] = { + CPUDEF_INIT(0x2064, 7, 1, 38, 0x00000000U, "z900", "IBM zSeries 900 GA1"), + CPUDEF_INIT(0x2064, 7, 2, 38, 0x00000000U, "z900.2", "IBM zSeries 900 GA2"), + CPUDEF_INIT(0x2064, 7, 3, 38, 0x00000000U, "z900.3", "IBM zSeries 900 GA3"), + CPUDEF_INIT(0x2066, 7, 3, 38, 0x00000000U, "z800", "IBM zSeries 800 GA1"), + CPUDEF_INIT(0x2084, 8, 1, 38, 0x00000000U, "z990", "IBM zSeries 990 GA1"), + CPUDEF_INIT(0x2084, 8, 2, 38, 0x00000000U, "z990.2", "IBM zSeries 990 GA2"), + CPUDEF_INIT(0x2084, 8, 3, 38, 0x00000000U, "z990.3", "IBM zSeries 990 GA3"), + CPUDEF_INIT(0x2086, 8, 3, 38, 0x00000000U, "z890", "IBM zSeries 880 GA1"), + CPUDEF_INIT(0x2084, 8, 4, 38, 0x00000000U, "z990.4", "IBM zSeries 990 GA4"), + CPUDEF_INIT(0x2086, 8, 4, 38, 0x00000000U, "z890.2", "IBM zSeries 880 GA2"), + CPUDEF_INIT(0x2084, 8, 5, 38, 0x00000000U, "z990.5", "IBM zSeries 990 GA5"), + CPUDEF_INIT(0x2086, 8, 5, 38, 0x00000000U, "z890.3", "IBM zSeries 880 GA3"), + CPUDEF_INIT(0x2094, 9, 1, 40, 0x00000000U, "z9EC", "IBM System z9 EC GA1"), + CPUDEF_INIT(0x2094, 9, 2, 40, 0x00000000U, "z9EC.2", "IBM System z9 EC GA2"), + CPUDEF_INIT(0x2096, 9, 2, 40, 0x00000000U, "z9BC", "IBM System z9 BC GA1"), + CPUDEF_INIT(0x2094, 9, 3, 40, 0x00000000U, "z9EC.3", "IBM System z9 EC GA3"), + CPUDEF_INIT(0x2096, 9, 3, 40, 0x00000000U, "z9BC.2", "IBM System z9 BC GA2"), + CPUDEF_INIT(0x2097, 10, 1, 43, 0x00000000U, "z10EC", "IBM System z10 EC GA1"), + CPUDEF_INIT(0x2097, 10, 2, 43, 0x00000000U, "z10EC.2", "IBM System z10 EC GA2"), + CPUDEF_INIT(0x2098, 10, 2, 43, 0x00000000U, "z10BC", "IBM System z10 BC GA1"), + CPUDEF_INIT(0x2097, 10, 3, 43, 0x00000000U, "z10EC.3", "IBM System z10 EC GA3"), + CPUDEF_INIT(0x2098, 10, 3, 43, 0x00000000U, "z10BC.2", "IBM System z10 BC GA2"), + CPUDEF_INIT(0x2817, 11, 1, 44, 0x08000000U, "z196", "IBM zEnterprise 196 GA1"), + CPUDEF_INIT(0x2817, 11, 2, 44, 0x08000000U, "z196.2", "IBM zEnterprise 196 GA2"), + CPUDEF_INIT(0x2818, 11, 2, 44, 0x08000000U, "z114", "IBM zEnterprise 114 GA1"), + CPUDEF_INIT(0x2827, 12, 1, 44, 0x08000000U, "zEC12", "IBM zEnterprise EC12 GA1"), + CPUDEF_INIT(0x2827, 12, 2, 44, 0x08000000U, "zEC12.2", "IBM zEnterprise EC12 GA2"), + CPUDEF_INIT(0x2828, 12, 2, 44, 0x08000000U, "zBC12", "IBM zEnterprise BC12 GA1"), + CPUDEF_INIT(0x2964, 13, 1, 47, 0x08000000U, "z13", "IBM z13 GA1"), + CPUDEF_INIT(0x2964, 13, 2, 47, 0x08000000U, "z13.2", "IBM z13 GA2"), + CPUDEF_INIT(0x2965, 13, 2, 47, 0x08000000U, "z13s", "IBM z13s GA1"), + CPUDEF_INIT(0x3906, 14, 1, 47, 0x08000000U, "z14", "IBM z14 GA1"), + CPUDEF_INIT(0x3906, 14, 2, 47, 0x08000000U, "z14.2", "IBM z14 GA2"), + CPUDEF_INIT(0x3907, 14, 1, 47, 0x08000000U, "z14ZR1", "IBM z14 Model ZR1 GA1"), + CPUDEF_INIT(0x8561, 15, 1, 47, 0x08000000U, "gen15a", "IBM z15 GA1"), + CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM 8562 GA1"), +}; + +#define QEMU_MAX_CPU_TYPE 0x2964 +#define QEMU_MAX_CPU_GEN 13 +#define QEMU_MAX_CPU_EC_GA 2 +static const S390FeatInit qemu_max_cpu_feat_init = { S390_FEAT_LIST_QEMU_MAX }; +static S390FeatBitmap qemu_max_cpu_feat; + +/* features part of a base model but not relevant for finding a base model */ +S390FeatBitmap ignored_base_feat; + +void s390_cpudef_featoff(uint8_t gen, uint8_t ec_ga, S390Feat feat) +{ + const S390CPUDef *def; + + def = s390_find_cpu_def(0, gen, ec_ga, NULL); + clear_bit(feat, (unsigned long *)&def->default_feat); +} + +void s390_cpudef_featoff_greater(uint8_t gen, uint8_t ec_ga, S390Feat feat) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { + const S390CPUDef *def = &s390_cpu_defs[i]; + + if (def->gen < gen) { + continue; + } + if (def->gen == gen && def->ec_ga < ec_ga) { + continue; + } + + clear_bit(feat, (unsigned long *)&def->default_feat); + } +} + +void s390_cpudef_group_featoff_greater(uint8_t gen, uint8_t ec_ga, + S390FeatGroup group) +{ + const S390FeatGroupDef *group_def = s390_feat_group_def(group); + S390FeatBitmap group_def_off; + int i; + + bitmap_complement(group_def_off, group_def->feat, S390_FEAT_MAX); + + for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { + const S390CPUDef *cpu_def = &s390_cpu_defs[i]; + + if (cpu_def->gen < gen) { + continue; + } + if (cpu_def->gen == gen && cpu_def->ec_ga < ec_ga) { + continue; + } + + bitmap_and((unsigned long *)&cpu_def->default_feat, + cpu_def->default_feat, group_def_off, S390_FEAT_MAX); + } +} + +uint32_t s390_get_hmfai(void) +{ + static S390CPU *cpu; + + if (!cpu) { + cpu = S390_CPU(qemu_get_cpu(NULL, 0)); + } + + if (!cpu || !cpu->model) { + return 0; + } + return cpu->model->def->hmfai; +} + +uint8_t s390_get_mha_pow(void) +{ + static S390CPU *cpu; + + if (!cpu) { + cpu = S390_CPU(qemu_get_cpu(NULL, 0)); + } + + if (!cpu || !cpu->model) { + return 0; + } + return cpu->model->def->mha_pow; +} + +uint32_t s390_get_ibc_val(void) +{ + uint16_t unblocked_ibc, lowest_ibc; + static S390CPU *cpu; + + if (!cpu) { + cpu = S390_CPU(qemu_get_cpu(NULL, 0)); + } + + if (!cpu || !cpu->model) { + return 0; + } + unblocked_ibc = s390_ibc_from_cpu_model(cpu->model); + lowest_ibc = cpu->model->lowest_ibc; + /* the lowest_ibc always has to be <= unblocked_ibc */ + if (!lowest_ibc || lowest_ibc > unblocked_ibc) { + return 0; + } + return ((uint32_t) lowest_ibc << 16) | unblocked_ibc; +} + +void s390_get_feat_block(struct uc_struct *uc, S390FeatType type, uint8_t *data) +{ + S390CPU *cpu = S390_CPU(qemu_get_cpu(uc, 0)); + s390_fill_feat_block(cpu->model->features, type, data); +} + +bool s390_has_feat(struct uc_struct *uc, S390Feat feat) +{ + S390CPU *cpu = S390_CPU(qemu_get_cpu(uc, 0)); + if (!cpu->model) { + if (feat == S390_FEAT_ZPCI) { + return true; + } + return false; + } + return test_bit(feat, cpu->model->features); +} + +uint8_t s390_get_gen_for_cpu_type(uint16_t type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { + if (s390_cpu_defs[i].type == type) { + return s390_cpu_defs[i].gen; + } + } + return 0; +} + +const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga, + S390FeatBitmap features) +{ + const S390CPUDef *last_compatible = NULL; + const S390CPUDef *matching_cpu_type = NULL; + int i; + + if (!gen) { + ec_ga = 0; + } + if (!gen && type) { + gen = s390_get_gen_for_cpu_type(type); + } + + for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { + const S390CPUDef *def = &s390_cpu_defs[i]; + S390FeatBitmap missing; + + /* don't even try newer generations if we know the generation */ + if (gen) { + if (def->gen > gen) { + break; + } else if (def->gen == gen && ec_ga && def->ec_ga > ec_ga) { + break; + } + } + + if (features) { + /* see if the model satisfies the minimum features */ + bitmap_andnot(missing, def->base_feat, features, S390_FEAT_MAX); + /* + * Ignore certain features that are in the base model, but not + * relevant for the search (esp. MSA subfunctions). + */ + bitmap_andnot(missing, missing, ignored_base_feat, S390_FEAT_MAX); + if (!bitmap_empty(missing, S390_FEAT_MAX)) { + break; + } + } + + /* stop the search if we found the exact model */ + if (def->type == type && def->ec_ga == ec_ga) { + return def; + } + /* remember if we've at least seen one with the same cpu type */ + if (def->type == type) { + matching_cpu_type = def; + } + last_compatible = def; + } + /* prefer the model with the same cpu type, esp. don't take the BC for EC */ + if (matching_cpu_type) { + return matching_cpu_type; + } + return last_compatible; +} + +static S390CPUModel *get_max_cpu_model(void); + +static S390CPUModel *get_max_cpu_model(void) +{ + static S390CPUModel max_model; + static bool cached; + + if (cached) { + return &max_model; + } + + max_model.def = s390_find_cpu_def(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN, + QEMU_MAX_CPU_EC_GA, NULL); + bitmap_copy(max_model.features, qemu_max_cpu_feat, S390_FEAT_MAX); + cached = true; + return &max_model; +} + +static inline void apply_cpu_model(const S390CPUModel *model) +{ + static S390CPUModel applied_model; + static bool applied; + + /* + * We have the same model for all VCPUs. KVM can only be configured before + * any VCPUs are defined in KVM. + */ + if (applied) { + if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) { + // error_setg(errp, "Mixed CPU models are not supported on s390x."); + } + return; + } + + applied = true; + if (model) { + applied_model = *model; + } +} + +void s390_realize_cpu_model(CPUState *cs) +{ + S390CPU *cpu = S390_CPU(cs); + const S390CPUModel *max_model; + + if (!cpu->model) { + /* no host model support -> perform compatibility stuff */ + apply_cpu_model(NULL); + return; + } + + max_model = get_max_cpu_model(); + if (!max_model) { + //error_prepend(errp, "CPU models are not available: "); + return; + } + + /* copy over properties that can vary */ + cpu->model->lowest_ibc = max_model->lowest_ibc; + cpu->model->cpu_id = max_model->cpu_id; + cpu->model->cpu_id_format = max_model->cpu_id_format; + cpu->model->cpu_ver = max_model->cpu_ver; + + apply_cpu_model(cpu->model); + + cpu->env.cpuid = s390_cpuid_from_cpu_model(cpu->model); + /* basic mode, write the cpu address into the first 4 bit of the ID */ + cpu->env.cpuid = deposit64(cpu->env.cpuid, 54, 4, cpu->env.core_id); +} + +static void s390_cpu_model_initfn(CPUState *obj) +{ + S390CPU *cpu = S390_CPU(obj); + S390CPUClass *xcc = S390_CPU_GET_CLASS(cpu); + + cpu->model = g_malloc0(sizeof(*cpu->model)); + /* copy the model, so we can modify it */ + cpu->model->def = xcc->cpu_def; + if (xcc->is_static) { + /* base model - features will never change */ + bitmap_copy(cpu->model->features, cpu->model->def->base_feat, + S390_FEAT_MAX); + } else { + /* latest model - features can change */ + bitmap_copy(cpu->model->features, + cpu->model->def->default_feat, S390_FEAT_MAX); + } +} + +static S390CPUDef s390_qemu_cpu_def; +static S390CPUModel s390_qemu_cpu_model; + +/* Set the qemu CPU model (on machine initialization). Must not be called + * once CPUs have been created. + */ +void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, + const S390FeatInit feat_init) +{ + const S390CPUDef *def = s390_find_cpu_def(type, gen, ec_ga, NULL); + + g_assert(def); + //g_assert(QTAILQ_EMPTY_RCU(&cpus)); + + /* TCG emulates some features that can usually not be enabled with + * the emulated machine generation. Make sure they can be enabled + * when using the QEMU model by adding them to full_feat. We have + * to copy the definition to do that. + */ + memcpy(&s390_qemu_cpu_def, def, sizeof(s390_qemu_cpu_def)); + bitmap_or(s390_qemu_cpu_def.full_feat, s390_qemu_cpu_def.full_feat, + qemu_max_cpu_feat, S390_FEAT_MAX); + + /* build the CPU model */ + s390_qemu_cpu_model.def = &s390_qemu_cpu_def; + bitmap_zero(s390_qemu_cpu_model.features, S390_FEAT_MAX); + s390_init_feat_bitmap(feat_init, s390_qemu_cpu_model.features); +} + +static void s390_qemu_cpu_model_initfn(CPUState *obj) +{ + S390CPU *cpu = S390_CPU(obj); + + cpu->model = g_malloc0(sizeof(*cpu->model)); + /* copy the CPU model so we can modify it */ + memcpy(cpu->model, &s390_qemu_cpu_model, sizeof(*cpu->model)); +} + +static void s390_max_cpu_model_initfn(CPUState *obj) +{ + const S390CPUModel *max_model; + S390CPU *cpu = S390_CPU(obj); + + max_model = get_max_cpu_model(); + + cpu->model = g_new(S390CPUModel, 1); + /* copy the CPU model so we can modify it */ + memcpy(cpu->model, max_model, sizeof(*cpu->model)); +} + +static void s390_cpu_model_finalize(CPUState *obj) +{ + S390CPU *cpu = S390_CPU(obj); + + g_free(cpu->model); + cpu->model = NULL; +} + +static void s390_base_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) +{ + S390CPUClass *xcc = S390_CPU_CLASS(oc); + + /* all base models are migration safe */ + xcc->cpu_def = (const S390CPUDef *) data; + xcc->is_static = true; + //xcc->desc = xcc->cpu_def->desc; +} + +static void s390_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) +{ + S390CPUClass *xcc = S390_CPU_CLASS(oc); + + /* model that can change between QEMU versions */ + xcc->cpu_def = (const S390CPUDef *) data; + //xcc->desc = xcc->cpu_def->desc; +} + +static void s390_qemu_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) +{ + //S390CPUClass *xcc = S390_CPU_CLASS(oc); + + //xcc->desc = g_strdup_printf("QEMU Virtual CPU version %s", + // qemu_hw_version()); +} + +static void s390_max_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) +{ + //S390CPUClass *xcc = S390_CPU_CLASS(oc); + + /* + * The "max" model is neither static nor migration safe. Under KVM + * it represents the "host" model. Under TCG it represents some kind of + * "qemu" CPU model without compat handling and maybe with some additional + * CPU features that are not yet unlocked in the "qemu" model. + */ + //xcc->desc = + // "Enables all features supported by the accelerator in the current host"; +} + +#if 0 + +/* Generate type name for a cpu model. Caller has to free the string. */ +static char *s390_cpu_type_name(const char *model_name) +{ + return g_strdup_printf(S390_CPU_TYPE_NAME("%s"), model_name); +} + +/* Generate type name for a base cpu model. Caller has to free the string. */ +static char *s390_base_cpu_type_name(const char *model_name) +{ + return g_strdup_printf(S390_CPU_TYPE_NAME("%s-base"), model_name); +} + +CPUClass *s390_cpu_class_by_name(const char *name) +{ + char *typename = s390_cpu_type_name(name); + CPUClass *oc; + + oc = object_class_by_name(typename); + g_free(typename); + return oc; +} + +static const TypeInfo qemu_s390_cpu_type_info = { + //.name = S390_CPU_TYPE_NAME("qemu"), + .parent = TYPE_S390_CPU, + .instance_init = s390_qemu_cpu_model_initfn, + .instance_finalize = s390_cpu_model_finalize, + .class_init = s390_qemu_cpu_model_class_init, +}; + +static const TypeInfo max_s390_cpu_type_info = { + //.name = S390_CPU_TYPE_NAME("max"), + .parent = TYPE_S390_CPU, + .instance_init = s390_max_cpu_model_initfn, + .instance_finalize = s390_cpu_model_finalize, + .class_init = s390_max_cpu_model_class_init, +}; +#endif + +static void init_ignored_base_feat(void) +{ + static const int feats[] = { + /* MSA subfunctions that could not be available on certain machines */ + S390_FEAT_KMAC_DEA, + S390_FEAT_KMAC_TDEA_128, + S390_FEAT_KMAC_TDEA_192, + S390_FEAT_KMC_DEA, + S390_FEAT_KMC_TDEA_128, + S390_FEAT_KMC_TDEA_192, + S390_FEAT_KM_DEA, + S390_FEAT_KM_TDEA_128, + S390_FEAT_KM_TDEA_192, + S390_FEAT_KIMD_SHA_1, + S390_FEAT_KLMD_SHA_1, + /* CSSKE is deprecated on newer generations */ + S390_FEAT_CONDITIONAL_SSKE, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(feats); i++) { + set_bit(feats[i], ignored_base_feat); + } +} + +static void register_types(void) +{ +#if 0 + static const S390FeatInit qemu_latest_init = { S390_FEAT_LIST_QEMU_LATEST }; + int i; + + init_ignored_base_feat(); + + /* init all bitmaps from gnerated data initially */ + s390_init_feat_bitmap(qemu_max_cpu_feat_init, qemu_max_cpu_feat); + for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { + s390_init_feat_bitmap(s390_cpu_defs[i].base_init, + s390_cpu_defs[i].base_feat); + s390_init_feat_bitmap(s390_cpu_defs[i].default_init, + s390_cpu_defs[i].default_feat); + s390_init_feat_bitmap(s390_cpu_defs[i].full_init, + s390_cpu_defs[i].full_feat); + } + + /* initialize the qemu model with latest definition */ + s390_set_qemu_cpu_model(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN, + QEMU_MAX_CPU_EC_GA, qemu_latest_init); + + for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { + char *base_name = s390_base_cpu_type_name(s390_cpu_defs[i].name); + TypeInfo ti_base = { + .name = base_name, + .parent = TYPE_S390_CPU, + .instance_init = s390_cpu_model_initfn, + .instance_finalize = s390_cpu_model_finalize, + .class_init = s390_base_cpu_model_class_init, + .class_data = (void *) &s390_cpu_defs[i], + }; + char *name = s390_cpu_type_name(s390_cpu_defs[i].name); + TypeInfo ti = { + .name = name, + .parent = TYPE_S390_CPU, + .instance_init = s390_cpu_model_initfn, + .instance_finalize = s390_cpu_model_finalize, + .class_init = s390_cpu_model_class_init, + .class_data = (void *) &s390_cpu_defs[i], + }; + + type_register_static(&ti_base); + type_register_static(&ti); + g_free(base_name); + g_free(name); + } + + type_register_static(&qemu_s390_cpu_type_info); + type_register_static(&max_s390_cpu_type_info); +#endif +} diff --git a/qemu/target/s390x/cpu_models.h b/qemu/target/s390x/cpu_models.h new file mode 100644 index 00000000..23e53796 --- /dev/null +++ b/qemu/target/s390x/cpu_models.h @@ -0,0 +1,109 @@ +/* + * CPU models for s390x + * + * Copyright 2016 IBM Corp. + * + * Author(s): David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef TARGET_S390X_CPU_MODELS_H +#define TARGET_S390X_CPU_MODELS_H + +#include "cpu_features.h" +#include "gen-features.h" +#include "hw/core/cpu.h" + +/* static CPU definition */ +struct S390CPUDef { + const char *name; /* name exposed to the user */ + const char *desc; /* description exposed to the user */ + uint8_t gen; /* hw generation identification */ + uint16_t type; /* cpu type identification */ + uint8_t ec_ga; /* EC GA version (on which also the BC is based) */ + uint8_t mha_pow; /* Maximum Host Adress Power, mha = 2^pow-1 */ + uint32_t hmfai; /* hypervisor-managed facilities */ + /* base/min features, must never be changed between QEMU versions */ + S390FeatBitmap base_feat; + /* used to init base_feat from generated data */ + S390FeatInit base_init; + /* deafault features, QEMU version specific */ + S390FeatBitmap default_feat; + /* used to init default_feat from generated data */ + S390FeatInit default_init; + /* max allowed features, QEMU version specific */ + S390FeatBitmap full_feat; + /* used to init full_feat from generated data */ + S390FeatInit full_init; +}; + +/* CPU model based on a CPU definition */ +struct S390CPUModel { + const S390CPUDef *def; + S390FeatBitmap features; + /* values copied from the "host" model, can change during migration */ + uint16_t lowest_ibc; /* lowest IBC that the hardware supports */ + uint32_t cpu_id; /* CPU id */ + uint8_t cpu_id_format; /* CPU id format bit */ + uint8_t cpu_ver; /* CPU version, usually "ff" for kvm */ +}; + +/* + * CPU ID + * + * bits 0-7: Zeroes (ff for kvm) + * bits 8-31: CPU ID (serial number) + * bits 32-47: Machine type + * bit 48: CPU ID format + * bits 49-63: Zeroes + */ +#define cpuid_type(x) (((x) >> 16) & 0xffff) +#define cpuid_id(x) (((x) >> 32) & 0xffffff) +#define cpuid_ver(x) (((x) >> 56) & 0xff) +#define cpuid_format(x) (((x) >> 15) & 0x1) + +#define lowest_ibc(x) (((uint32_t)(x) >> 16) & 0xfff) +#define unblocked_ibc(x) ((uint32_t)(x) & 0xfff) +#define has_ibc(x) (lowest_ibc(x) != 0) + +#define S390_GEN_Z10 0xa +#define ibc_gen(x) (x == 0 ? 0 : ((x >> 4) + S390_GEN_Z10)) +#define ibc_ec_ga(x) (x & 0xf) + +void s390_cpudef_featoff(uint8_t gen, uint8_t ec_ga, S390Feat feat); +void s390_cpudef_featoff_greater(uint8_t gen, uint8_t ec_ga, S390Feat feat); +void s390_cpudef_group_featoff_greater(uint8_t gen, uint8_t ec_ga, + S390FeatGroup group); +uint32_t s390_get_hmfai(void); +uint8_t s390_get_mha_pow(void); +uint32_t s390_get_ibc_val(void); +static inline uint16_t s390_ibc_from_cpu_model(const S390CPUModel *model) +{ + uint16_t ibc = 0; + + if (model->def->gen >= S390_GEN_Z10) { + ibc = ((model->def->gen - S390_GEN_Z10) << 4) + model->def->ec_ga; + } + return ibc; +} +void s390_get_feat_block(struct uc_struct *uc, S390FeatType type, uint8_t *data); +bool s390_has_feat(struct uc_struct *uc, S390Feat feat); +uint8_t s390_get_gen_for_cpu_type(uint16_t type); +static inline bool s390_known_cpu_type(uint16_t type) +{ + return s390_get_gen_for_cpu_type(type) != 0; +} +static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model) +{ + return ((uint64_t)model->cpu_ver << 56) | + ((uint64_t)model->cpu_id << 32) | + ((uint64_t)model->def->type << 16) | + (model->def->gen == 7 ? 0 : (uint64_t)model->cpu_id_format << 15); +} +S390CPUDef const *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga, + S390FeatBitmap features); + +#endif /* TARGET_S390X_CPU_MODELS_H */ diff --git a/qemu/target/s390x/crypto_helper.c b/qemu/target/s390x/crypto_helper.c new file mode 100644 index 00000000..42f8222e --- /dev/null +++ b/qemu/target/s390x/crypto_helper.c @@ -0,0 +1,60 @@ +/* + * s390x crypto helpers + * + * Copyright (c) 2017 Red Hat Inc + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "internal.h" +#include "tcg_s390x.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" + +uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3, + uint32_t type) +{ + const uintptr_t ra = GETPC(); + const uint8_t mod = env->regs[0] & 0x80ULL; + const uint8_t fc = env->regs[0] & 0x7fULL; + uint8_t subfunc[16] = { 0 }; + uint64_t param_addr; + int i; + + switch (type) { + case S390_FEAT_TYPE_KMAC: + case S390_FEAT_TYPE_KIMD: + case S390_FEAT_TYPE_KLMD: + case S390_FEAT_TYPE_PCKMO: + case S390_FEAT_TYPE_PCC: + if (mod) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + break; + } + + s390_get_feat_block(env->uc, type, subfunc); + if (!test_be_bit(fc, subfunc)) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + switch (fc) { + case 0: /* query subfunction */ + for (i = 0; i < 16; i++) { + param_addr = wrap_address(env, env->regs[1] + i); + cpu_stb_data_ra(env, param_addr, subfunc[i], ra); + } + break; + default: + /* we don't implement any other subfunction yet */ + g_assert_not_reached(); + } + + return 0; +} diff --git a/qemu/target/s390x/excp_helper.c b/qemu/target/s390x/excp_helper.c new file mode 100644 index 00000000..83433271 --- /dev/null +++ b/qemu/target/s390x/excp_helper.c @@ -0,0 +1,600 @@ +/* + * s390x exception / interrupt helpers + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2011 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/helper-proto.h" +#include "qemu/timer.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "hw/s390x/ioinst.h" +#//include "exec/address-spaces.h" +#include "tcg_s390x.h" +#include "sysemu/sysemu.h" +//#include "hw/s390x/s390_flic.h" + +void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, + uint32_t code, uintptr_t ra) +{ + CPUState *cs = env_cpu(env); + + cpu_restore_state(cs, ra, true); + //qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", + // env->psw.addr); + trigger_pgm_exception(env, code); + cpu_loop_exit(cs); +} + +void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, + uintptr_t ra) +{ + g_assert(dxc <= 0xff); + /* Store the DXC into the lowcore */ +#ifdef UNICORN_ARCH_POSTFIX + glue(stl_phys, UNICORN_ARCH_POSTFIX)(env->uc, env_cpu(env)->as, + env->psa + offsetof(LowCore, data_exc_code), dxc); +#else + stl_phys(env->uc, env_cpu(env)->as, + env->psa + offsetof(LowCore, data_exc_code), dxc); +#endif + + + /* Store the DXC into the FPC if AFP is enabled */ + if (env->cregs[0] & CR0_AFP) { + env->fpc = deposit32(env->fpc, 8, 8, dxc); + } + tcg_s390_program_interrupt(env, PGM_DATA, ra); +} + +void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, + uintptr_t ra) +{ + g_assert(vxc <= 0xff); + /* Always store the VXC into the lowcore, without AFP it is undefined */ +#ifdef UNICORN_ARCH_POSTFIX + glue(stl_phys, UNICORN_ARCH_POSTFIX)(env->uc, env_cpu(env)->as, + env->psa + offsetof(LowCore, data_exc_code), vxc); +#else + stl_phys(env->uc, env_cpu(env)->as, + env->psa + offsetof(LowCore, data_exc_code), vxc); +#endif + + /* Always store the VXC into the FPC, without AFP it is undefined */ + env->fpc = deposit32(env->fpc, 8, 8, vxc); + tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra); +} + +void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) +{ + tcg_s390_data_exception(env, dxc, GETPC()); +} + +static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) +{ + switch (mmu_idx) { + case MMU_PRIMARY_IDX: + return PSW_ASC_PRIMARY; + case MMU_SECONDARY_IDX: + return PSW_ASC_SECONDARY; + case MMU_HOME_IDX: + return PSW_ASC_HOME; + default: + abort(); + } +} + +bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + target_ulong vaddr, raddr; + uint64_t asc, tec; + int prot, excp; + + //qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n", + // __func__, address, access_type, mmu_idx); + + vaddr = address; + + if (mmu_idx < MMU_REAL_IDX) { + asc = cpu_mmu_idx_to_asc(mmu_idx); + /* 31-Bit mode */ + if (!(env->psw.mask & PSW_MASK_64)) { + vaddr &= 0x7fffffff; + } + excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec); + } else if (mmu_idx == MMU_REAL_IDX) { + /* 31-Bit mode */ + if (!(env->psw.mask & PSW_MASK_64)) { + vaddr &= 0x7fffffff; + } + excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec); + } else { + g_assert_not_reached(); + } + + /* check out of RAM access */ + if (!excp && + !address_space_access_valid(env_cpu(env)->as, raddr, + TARGET_PAGE_SIZE, access_type, + MEMTXATTRS_UNSPECIFIED)) { + //qemu_log_mask(CPU_LOG_MMU, + // "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", + // __func__, (uint64_t)raddr, (uint64_t)ram_size); + excp = PGM_ADDRESSING; + tec = 0; /* unused */ + } + + if (!excp) { + //qemu_log_mask(CPU_LOG_MMU, + // "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", + // __func__, (uint64_t)vaddr, (uint64_t)raddr, prot); + tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot, + mmu_idx, TARGET_PAGE_SIZE); + return true; + } + if (probe) { + return false; + } + + if (excp != PGM_ADDRESSING) { +#ifdef UNICORN_ARCH_POSTFIX + glue(stq_phys, UNICORN_ARCH_POSTFIX)(cs->uc, env_cpu(env)->as, + env->psa + offsetof(LowCore, trans_exc_code), tec); +#else + stq_phys(cs->uc, env_cpu(env)->as, + env->psa + offsetof(LowCore, trans_exc_code), tec); +#endif + } + + /* + * For data accesses, ILEN will be filled in from the unwind info, + * within cpu_loop_exit_restore. For code accesses, retaddr == 0, + * and so unwinding will not occur. However, ILEN is also undefined + * for that case -- we choose to set ILEN = 2. + */ + env->int_pgm_ilen = 2; + trigger_pgm_exception(env, excp); + cpu_loop_exit_restore(cs, retaddr); +} + +#if 0 +static void do_program_interrupt(CPUS390XState *env) +{ + uint64_t mask, addr; + LowCore *lowcore; + int ilen = env->int_pgm_ilen; + + assert(ilen == 2 || ilen == 4 || ilen == 6); + + switch (env->int_pgm_code) { + case PGM_PER: + if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) { + break; + } + /* FALL THROUGH */ + case PGM_OPERATION: + case PGM_PRIVILEGED: + case PGM_EXECUTE: + case PGM_PROTECTION: + case PGM_ADDRESSING: + case PGM_SPECIFICATION: + case PGM_DATA: + case PGM_FIXPT_OVERFLOW: + case PGM_FIXPT_DIVIDE: + case PGM_DEC_OVERFLOW: + case PGM_DEC_DIVIDE: + case PGM_HFP_EXP_OVERFLOW: + case PGM_HFP_EXP_UNDERFLOW: + case PGM_HFP_SIGNIFICANCE: + case PGM_HFP_DIVIDE: + case PGM_TRANS_SPEC: + case PGM_SPECIAL_OP: + case PGM_OPERAND: + case PGM_HFP_SQRT: + case PGM_PC_TRANS_SPEC: + case PGM_ALET_SPEC: + case PGM_MONITOR: + /* advance the PSW if our exception is not nullifying */ + env->psw.addr += ilen; + break; + } + + //qemu_log_mask(CPU_LOG_INT, + // "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n", + // __func__, env->int_pgm_code, ilen, env->psw.mask, + // env->psw.addr); + + lowcore = cpu_map_lowcore(env); + + /* Signal PER events with the exception. */ + if (env->per_perc_atmid) { + env->int_pgm_code |= PGM_PER; + lowcore->per_address = cpu_to_be64(env->per_address); + lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid); + env->per_perc_atmid = 0; + } + + lowcore->pgm_ilen = cpu_to_be16(ilen); + lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); + lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); + mask = be64_to_cpu(lowcore->program_new_psw.mask); + addr = be64_to_cpu(lowcore->program_new_psw.addr); + lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea); + + cpu_unmap_lowcore(lowcore); + + load_psw(env, mask, addr); +} + +static void do_svc_interrupt(CPUS390XState *env) +{ + uint64_t mask, addr; + LowCore *lowcore; + + lowcore = cpu_map_lowcore(env); + + lowcore->svc_code = cpu_to_be16(env->int_svc_code); + lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen); + lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen); + mask = be64_to_cpu(lowcore->svc_new_psw.mask); + addr = be64_to_cpu(lowcore->svc_new_psw.addr); + + cpu_unmap_lowcore(lowcore); + + load_psw(env, mask, addr); + + /* When a PER event is pending, the PER exception has to happen + immediately after the SERVICE CALL one. */ + if (env->per_perc_atmid) { + env->int_pgm_code = PGM_PER; + env->int_pgm_ilen = env->int_svc_ilen; + do_program_interrupt(env); + } +} + +#define VIRTIO_SUBCODE_64 0x0D00 + +static void do_ext_interrupt(CPUS390XState *env) +{ + QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); + S390CPU *cpu = env_archcpu(env); + uint64_t mask, addr; + uint16_t cpu_addr; + LowCore *lowcore; + + if (!(env->psw.mask & PSW_MASK_EXT)) { + cpu_abort(CPU(cpu), "Ext int w/o ext mask\n"); + } + + lowcore = cpu_map_lowcore(env); + + if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) && + (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) { + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int max_cpus = ms->smp.max_cpus; + + lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY); + cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS); + g_assert(cpu_addr < S390_MAX_CPUS); + lowcore->cpu_addr = cpu_to_be16(cpu_addr); + clear_bit(cpu_addr, env->emergency_signals); + if (bitmap_empty(env->emergency_signals, max_cpus)) { + env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL; + } + } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && + (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { + lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL); + lowcore->cpu_addr = cpu_to_be16(env->external_call_addr); + env->pending_int &= ~INTERRUPT_EXTERNAL_CALL; + } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) && + (env->cregs[0] & CR0_CKC_SC)) { + lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP); + lowcore->cpu_addr = 0; + env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; + } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) && + (env->cregs[0] & CR0_CPU_TIMER_SC)) { + lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER); + lowcore->cpu_addr = 0; + env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER; + } else if (qemu_s390_flic_has_service(flic) && + (env->cregs[0] & CR0_SERVICE_SC)) { + uint32_t param; + + param = qemu_s390_flic_dequeue_service(flic); + lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE); + lowcore->ext_params = cpu_to_be32(param); + lowcore->cpu_addr = 0; + } else { + g_assert_not_reached(); + } + + mask = be64_to_cpu(lowcore->external_new_psw.mask); + addr = be64_to_cpu(lowcore->external_new_psw.addr); + lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); + + cpu_unmap_lowcore(lowcore); + + load_psw(env, mask, addr); +} + +static void do_io_interrupt(CPUS390XState *env) +{ + QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); + uint64_t mask, addr; + QEMUS390FlicIO *io; + LowCore *lowcore; + + g_assert(env->psw.mask & PSW_MASK_IO); + io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); + g_assert(io); + + lowcore = cpu_map_lowcore(env); + + lowcore->subchannel_id = cpu_to_be16(io->id); + lowcore->subchannel_nr = cpu_to_be16(io->nr); + lowcore->io_int_parm = cpu_to_be32(io->parm); + lowcore->io_int_word = cpu_to_be32(io->word); + lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr); + mask = be64_to_cpu(lowcore->io_new_psw.mask); + addr = be64_to_cpu(lowcore->io_new_psw.addr); + + cpu_unmap_lowcore(lowcore); + g_free(io); + + load_psw(env, mask, addr); +} + +typedef struct MchkExtSaveArea { + uint64_t vregs[32][2]; /* 0x0000 */ + uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ +} MchkExtSaveArea; +QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024); + +static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao) +{ + hwaddr len = sizeof(MchkExtSaveArea); + MchkExtSaveArea *sa; + int i; + + sa = cpu_physical_memory_map(env_cpu(env)->as, mcesao, &len, true); + if (!sa) { + return -EFAULT; + } + if (len != sizeof(MchkExtSaveArea)) { + cpu_physical_memory_unmap(env_cpu(env)->as, sa, len, 1, 0); + return -EFAULT; + } + + for (i = 0; i < 32; i++) { + sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]); + sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]); + } + + cpu_physical_memory_unmap(env_cpu(env)->as, sa, len, 1, len); + return 0; +} + +static void do_mchk_interrupt(CPUS390XState *env) +{ + QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); + uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP; + uint64_t mask, addr, mcesao = 0; + LowCore *lowcore; + int i; + + /* for now we only support channel report machine checks (floating) */ + g_assert(env->psw.mask & PSW_MASK_MCHECK); + g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC); + + qemu_s390_flic_dequeue_crw_mchk(flic); + + lowcore = cpu_map_lowcore(env); + + /* extended save area */ + if (mcic & MCIC_VB_VR) { + /* length and alignment is 1024 bytes */ + mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull; + } + + /* try to store vector registers */ + if (!mcesao || mchk_store_vregs(env, mcesao)) { + mcic &= ~MCIC_VB_VR; + } + + /* we are always in z/Architecture mode */ + lowcore->ar_access_id = 1; + + for (i = 0; i < 16; i++) { + lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i)); + lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]); + lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]); + lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]); + } + lowcore->prefixreg_save_area = cpu_to_be32(env->psa); + lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc); + lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr); + lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm); + lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8); + + lowcore->mcic = cpu_to_be64(mcic); + lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr); + mask = be64_to_cpu(lowcore->mcck_new_psw.mask); + addr = be64_to_cpu(lowcore->mcck_new_psw.addr); + + cpu_unmap_lowcore(lowcore); + + load_psw(env, mask, addr); +} + +void s390_cpu_do_interrupt(CPUState *cs) +{ + QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + bool stopped = false; + + //qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n", + // __func__, cs->exception_index, env->psw.mask, env->psw.addr); + +try_deliver: + /* handle machine checks */ + if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) { + cs->exception_index = EXCP_MCHK; + } + /* handle external interrupts */ + if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) { + cs->exception_index = EXCP_EXT; + } + /* handle I/O interrupts */ + if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) { + cs->exception_index = EXCP_IO; + } + /* RESTART interrupt */ + if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) { + cs->exception_index = EXCP_RESTART; + } + /* STOP interrupt has least priority */ + if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) { + cs->exception_index = EXCP_STOP; + } + + switch (cs->exception_index) { + case EXCP_PGM: + do_program_interrupt(env); + break; + case EXCP_SVC: + do_svc_interrupt(env); + break; + case EXCP_EXT: + do_ext_interrupt(env); + break; + case EXCP_IO: + do_io_interrupt(env); + break; + case EXCP_MCHK: + do_mchk_interrupt(env); + break; + case EXCP_RESTART: + do_restart_interrupt(env); + break; + case EXCP_STOP: + do_stop_interrupt(env); + stopped = true; + break; + } + + if (cs->exception_index != -1 && !stopped) { + /* check if there are more pending interrupts to deliver */ + cs->exception_index = -1; + goto try_deliver; + } + cs->exception_index = -1; + + /* we might still have pending interrupts, but not deliverable */ + if (!env->pending_int && !qemu_s390_flic_has_any(flic)) { + cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + } + + /* WAIT PSW during interrupt injection or STOP interrupt */ + if ((env->psw.mask & PSW_MASK_WAIT) || stopped) { + /* don't trigger a cpu_loop_exit(), use an interrupt instead */ + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); + } else if (cs->halted) { + /* unhalt if we had a WAIT PSW somehwere in our injection chain */ + s390_cpu_unhalt(cpu); + } +} +#endif + +bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + + if (env->ex_value) { + /* Execution of the target insn is indivisible from + the parent EXECUTE insn. */ + return false; + } + if (s390_cpu_has_int(cpu)) { + //s390_cpu_do_interrupt(cs); + return true; + } + if (env->psw.mask & PSW_MASK_WAIT) { + /* Woken up because of a floating interrupt but it has already + * been delivered. Go back to sleep. */ + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); + } + } + return false; +} + +void s390x_cpu_debug_excp_handler(CPUState *cs) +{ + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + CPUWatchpoint *wp_hit = cs->watchpoint_hit; + + if (wp_hit && wp_hit->flags & BP_CPU) { + /* FIXME: When the storage-alteration-space control bit is set, + the exception should only be triggered if the memory access + is done using an address space with the storage-alteration-event + bit set. We have no way to detect that with the current + watchpoint code. */ + cs->watchpoint_hit = NULL; + + env->per_address = env->psw.addr; + env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env); + /* FIXME: We currently no way to detect the address space used + to trigger the watchpoint. For now just consider it is the + current default ASC. This turn to be true except when MVCP + and MVCS instrutions are not used. */ + env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46; + + /* Remove all watchpoints to re-execute the code. A PER exception + will be triggered, it will call load_psw which will recompute + the watchpoints. */ + cpu_watchpoint_remove_all(cs, BP_CPU); + cpu_loop_exit_noexc(cs); + } +} + +/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, + this is only for the atomic operations, for which we want to raise a + specification exception. */ +void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); +} diff --git a/qemu/target/s390x/fpu_helper.c b/qemu/target/s390x/fpu_helper.c new file mode 100644 index 00000000..1325bc57 --- /dev/null +++ b/qemu/target/s390x/fpu_helper.c @@ -0,0 +1,888 @@ +/* + * S/390 FPU helper routines + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2009 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "tcg_s390x.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +/* #define DEBUG_HELPER */ +#ifdef DEBUG_HELPER +#define HELPER_LOG(x...) qemu_log(x) +#else +#define HELPER_LOG(x...) +#endif + +#define RET128(F) (env->retxl = F.low, F.high) + +uint8_t s390_softfloat_exc_to_ieee(unsigned int exc) +{ + uint8_t s390_exc = 0; + + s390_exc |= (exc & float_flag_invalid) ? S390_IEEE_MASK_INVALID : 0; + s390_exc |= (exc & float_flag_divbyzero) ? S390_IEEE_MASK_DIVBYZERO : 0; + s390_exc |= (exc & float_flag_overflow) ? S390_IEEE_MASK_OVERFLOW : 0; + s390_exc |= (exc & float_flag_underflow) ? S390_IEEE_MASK_UNDERFLOW : 0; + s390_exc |= (exc & float_flag_inexact) ? S390_IEEE_MASK_INEXACT : 0; + + return s390_exc; +} + +/* Should be called after any operation that may raise IEEE exceptions. */ +static void handle_exceptions(CPUS390XState *env, bool XxC, uintptr_t retaddr) +{ + unsigned s390_exc, qemu_exc; + + /* Get the exceptions raised by the current operation. Reset the + fpu_status contents so that the next operation has a clean slate. */ + qemu_exc = env->fpu_status.float_exception_flags; + if (qemu_exc == 0) { + return; + } + env->fpu_status.float_exception_flags = 0; + s390_exc = s390_softfloat_exc_to_ieee(qemu_exc); + + /* + * IEEE-Underflow exception recognition exists if a tininess condition + * (underflow) exists and + * - The mask bit in the FPC is zero and the result is inexact + * - The mask bit in the FPC is one + * So tininess conditions that are not inexact don't trigger any + * underflow action in case the mask bit is not one. + */ + if (!(s390_exc & S390_IEEE_MASK_INEXACT) && + !((env->fpc >> 24) & S390_IEEE_MASK_UNDERFLOW)) { + s390_exc &= ~S390_IEEE_MASK_UNDERFLOW; + } + + /* + * FIXME: + * 1. Right now, all inexact conditions are inidicated as + * "truncated" (0) and never as "incremented" (1) in the DXC. + * 2. Only traps due to invalid/divbyzero are suppressing. Other traps + * are completing, meaning the target register has to be written! + * This, however will mean that we have to write the register before + * triggering the trap - impossible right now. + */ + + /* + * invalid/divbyzero cannot coexist with other conditions. + * overflow/underflow however can coexist with inexact, we have to + * handle it separatly. + */ + if (s390_exc & ~S390_IEEE_MASK_INEXACT) { + if (s390_exc & ~S390_IEEE_MASK_INEXACT & env->fpc >> 24) { + /* trap condition - inexact reported along */ + tcg_s390_data_exception(env, s390_exc, retaddr); + } + /* nontrap condition - inexact handled differently */ + env->fpc |= (s390_exc & ~S390_IEEE_MASK_INEXACT) << 16; + } + + /* inexact handling */ + if (s390_exc & S390_IEEE_MASK_INEXACT && !XxC) { + /* trap condition - overflow/underflow _not_ reported along */ + if (s390_exc & S390_IEEE_MASK_INEXACT & env->fpc >> 24) { + tcg_s390_data_exception(env, s390_exc & S390_IEEE_MASK_INEXACT, + retaddr); + } + /* nontrap condition */ + env->fpc |= (s390_exc & S390_IEEE_MASK_INEXACT) << 16; + } +} + +int float_comp_to_cc(CPUS390XState *env, int float_compare) +{ + switch (float_compare) { + case float_relation_equal: + return 0; + case float_relation_less: + return 1; + case float_relation_greater: + return 2; + case float_relation_unordered: + return 3; + default: + cpu_abort(env_cpu(env), "unknown return value for float compare\n"); + } +} + +/* condition codes for unary FP ops */ +uint32_t set_cc_nz_f32(float32 v) +{ + if (float32_is_any_nan(v)) { + return 3; + } else if (float32_is_zero(v)) { + return 0; + } else if (float32_is_neg(v)) { + return 1; + } else { + return 2; + } +} + +uint32_t set_cc_nz_f64(float64 v) +{ + if (float64_is_any_nan(v)) { + return 3; + } else if (float64_is_zero(v)) { + return 0; + } else if (float64_is_neg(v)) { + return 1; + } else { + return 2; + } +} + +uint32_t set_cc_nz_f128(float128 v) +{ + if (float128_is_any_nan(v)) { + return 3; + } else if (float128_is_zero(v)) { + return 0; + } else if (float128_is_neg(v)) { + return 1; + } else { + return 2; + } +} + +static inline uint8_t round_from_m34(uint32_t m34) +{ + return extract32(m34, 0, 4); +} + +static inline bool xxc_from_m34(uint32_t m34) +{ + /* XxC is bit 1 of m4 */ + return extract32(m34, 4 + 3 - 1, 1); +} + +/* 32-bit FP addition */ +uint64_t HELPER(aeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float32 ret = float32_add(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 64-bit FP addition */ +uint64_t HELPER(adb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float64 ret = float64_add(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 128-bit FP addition */ +uint64_t HELPER(axb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint64_t bh, uint64_t bl) +{ + float128 ret = float128_add(make_float128(ah, al), + make_float128(bh, bl), + &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return RET128(ret); +} + +/* 32-bit FP subtraction */ +uint64_t HELPER(seb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float32 ret = float32_sub(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 64-bit FP subtraction */ +uint64_t HELPER(sdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float64 ret = float64_sub(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 128-bit FP subtraction */ +uint64_t HELPER(sxb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint64_t bh, uint64_t bl) +{ + float128 ret = float128_sub(make_float128(ah, al), + make_float128(bh, bl), + &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return RET128(ret); +} + +/* 32-bit FP division */ +uint64_t HELPER(deb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float32 ret = float32_div(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 64-bit FP division */ +uint64_t HELPER(ddb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float64 ret = float64_div(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 128-bit FP division */ +uint64_t HELPER(dxb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint64_t bh, uint64_t bl) +{ + float128 ret = float128_div(make_float128(ah, al), + make_float128(bh, bl), + &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return RET128(ret); +} + +/* 32-bit FP multiplication */ +uint64_t HELPER(meeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float32 ret = float32_mul(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 64-bit FP multiplication */ +uint64_t HELPER(mdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float64 ret = float64_mul(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 64/32-bit FP multiplication */ +uint64_t HELPER(mdeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + float64 ret = float32_to_float64(f2, &env->fpu_status); + ret = float64_mul(f1, ret, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 128-bit FP multiplication */ +uint64_t HELPER(mxb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint64_t bh, uint64_t bl) +{ + float128 ret = float128_mul(make_float128(ah, al), + make_float128(bh, bl), + &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return RET128(ret); +} + +/* 128/64-bit FP multiplication */ +uint64_t HELPER(mxdb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint64_t f2) +{ + float128 ret = float64_to_float128(f2, &env->fpu_status); + ret = float128_mul(make_float128(ah, al), ret, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return RET128(ret); +} + +/* convert 32-bit float to 64-bit float */ +uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2) +{ + float64 ret = float32_to_float64(f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* convert 128-bit float to 64-bit float */ +uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit float to 128-bit float */ +uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2) +{ + float128 ret = float64_to_float128(f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return RET128(ret); +} + +/* convert 32-bit float to 128-bit float */ +uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2) +{ + float128 ret = float32_to_float128(f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return RET128(ret); +} + +/* convert 64-bit float to 32-bit float */ +uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float32 ret = float64_to_float32(f2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 128-bit float to 32-bit float */ +uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* 32-bit FP compare */ +uint32_t HELPER(ceb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + int cmp = float32_compare_quiet(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return float_comp_to_cc(env, cmp); +} + +/* 64-bit FP compare */ +uint32_t HELPER(cdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + int cmp = float64_compare_quiet(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return float_comp_to_cc(env, cmp); +} + +/* 128-bit FP compare */ +uint32_t HELPER(cxb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint64_t bh, uint64_t bl) +{ + int cmp = float128_compare_quiet(make_float128(ah, al), + make_float128(bh, bl), + &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return float_comp_to_cc(env, cmp); +} + +int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3) +{ + int ret = env->fpu_status.float_rounding_mode; + + switch (m3) { + case 0: + /* current mode */ + break; + case 1: + /* round to nearest with ties away from 0 */ + set_float_rounding_mode(float_round_ties_away, &env->fpu_status); + break; + case 3: + /* round to prepare for shorter precision */ + set_float_rounding_mode(float_round_to_odd, &env->fpu_status); + break; + case 4: + /* round to nearest with ties to even */ + set_float_rounding_mode(float_round_nearest_even, &env->fpu_status); + break; + case 5: + /* round to zero */ + set_float_rounding_mode(float_round_to_zero, &env->fpu_status); + break; + case 6: + /* round to +inf */ + set_float_rounding_mode(float_round_up, &env->fpu_status); + break; + case 7: + /* round to -inf */ + set_float_rounding_mode(float_round_down, &env->fpu_status); + break; + default: + g_assert_not_reached(); + } + return ret; +} + +void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode) +{ + set_float_rounding_mode(old_mode, &env->fpu_status); +} + +/* convert 64-bit int to 32-bit float */ +uint64_t HELPER(cegb)(CPUS390XState *env, int64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float32 ret = int64_to_float32(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit int to 64-bit float */ +uint64_t HELPER(cdgb)(CPUS390XState *env, int64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float64 ret = int64_to_float64(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit int to 128-bit float */ +uint64_t HELPER(cxgb)(CPUS390XState *env, int64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float128 ret = int64_to_float128(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return RET128(ret); +} + +/* convert 64-bit uint to 32-bit float */ +uint64_t HELPER(celgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float32 ret = uint64_to_float32(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit uint to 64-bit float */ +uint64_t HELPER(cdlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float64 ret = uint64_to_float64(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit uint to 128-bit float */ +uint64_t HELPER(cxlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float128 ret = uint64_to_float128(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return RET128(ret); +} + +/* convert 32-bit float to 64-bit int */ +uint64_t HELPER(cgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + int64_t ret = float32_to_int64(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit float to 64-bit int */ +uint64_t HELPER(cgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + int64_t ret = float64_to_int64(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 128-bit float to 64-bit int */ +uint64_t HELPER(cgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float128 v2 = make_float128(h, l); + int64_t ret = float128_to_int64(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 32-bit float to 32-bit int */ +uint64_t HELPER(cfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + int32_t ret = float32_to_int32(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit float to 32-bit int */ +uint64_t HELPER(cfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + int32_t ret = float64_to_int32(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 128-bit float to 32-bit int */ +uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float128 v2 = make_float128(h, l); + int32_t ret = float128_to_int32(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 32-bit float to 64-bit uint */ +uint64_t HELPER(clgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + uint64_t ret; + + v2 = float32_to_float64(v2, &env->fpu_status); + ret = float64_to_uint64(v2, &env->fpu_status); + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit float to 64-bit uint */ +uint64_t HELPER(clgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + uint64_t ret = float64_to_uint64(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 128-bit float to 64-bit uint */ +uint64_t HELPER(clgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + uint64_t ret = float128_to_uint64(make_float128(h, l), &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 32-bit float to 32-bit uint */ +uint64_t HELPER(clfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + uint32_t ret = float32_to_uint32(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 64-bit float to 32-bit uint */ +uint64_t HELPER(clfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + uint32_t ret = float64_to_uint32(v2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* convert 128-bit float to 32-bit uint */ +uint64_t HELPER(clfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + uint32_t ret = float128_to_uint32(make_float128(h, l), &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* round to integer 32-bit */ +uint64_t HELPER(fieb)(CPUS390XState *env, uint64_t f2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float32 ret = float32_round_to_int(f2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* round to integer 64-bit */ +uint64_t HELPER(fidb)(CPUS390XState *env, uint64_t f2, uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float64 ret = float64_round_to_int(f2, &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return ret; +} + +/* round to integer 128-bit */ +uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint32_t m34) +{ + int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); + float128 ret = float128_round_to_int(make_float128(ah, al), + &env->fpu_status); + + s390_restore_bfp_rounding_mode(env, old_mode); + handle_exceptions(env, xxc_from_m34(m34), GETPC()); + return RET128(ret); +} + +/* 32-bit FP compare and signal */ +uint32_t HELPER(keb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + int cmp = float32_compare(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return float_comp_to_cc(env, cmp); +} + +/* 64-bit FP compare and signal */ +uint32_t HELPER(kdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +{ + int cmp = float64_compare(f1, f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return float_comp_to_cc(env, cmp); +} + +/* 128-bit FP compare and signal */ +uint32_t HELPER(kxb)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint64_t bh, uint64_t bl) +{ + int cmp = float128_compare(make_float128(ah, al), + make_float128(bh, bl), + &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return float_comp_to_cc(env, cmp); +} + +/* 32-bit FP multiply and add */ +uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1, + uint64_t f2, uint64_t f3) +{ + float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 64-bit FP multiply and add */ +uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1, + uint64_t f2, uint64_t f3) +{ + float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 32-bit FP multiply and subtract */ +uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1, + uint64_t f2, uint64_t f3) +{ + float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c, + &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* 64-bit FP multiply and subtract */ +uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1, + uint64_t f2, uint64_t f3) +{ + float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c, + &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* The rightmost bit has the number 11. */ +static inline uint16_t dcmask(int bit, bool neg) +{ + return 1 << (11 - bit - neg); +} + +#define DEF_FLOAT_DCMASK(_TYPE) \ +uint16_t _TYPE##_dcmask(CPUS390XState *env, _TYPE f1) \ +{ \ + const bool neg = _TYPE##_is_neg(f1); \ + \ + /* Sorted by most common cases - only one class is possible */ \ + if (_TYPE##_is_normal(f1)) { \ + return dcmask(2, neg); \ + } else if (_TYPE##_is_zero(f1)) { \ + return dcmask(0, neg); \ + } else if (_TYPE##_is_denormal(f1)) { \ + return dcmask(4, neg); \ + } else if (_TYPE##_is_infinity(f1)) { \ + return dcmask(6, neg); \ + } else if (_TYPE##_is_quiet_nan(f1, &env->fpu_status)) { \ + return dcmask(8, neg); \ + } \ + /* signaling nan, as last remaining case */ \ + return dcmask(10, neg); \ +} +DEF_FLOAT_DCMASK(float32) +DEF_FLOAT_DCMASK(float64) +DEF_FLOAT_DCMASK(float128) + +/* test data class 32-bit */ +uint32_t HELPER(tceb)(CPUS390XState *env, uint64_t f1, uint64_t m2) +{ + return (m2 & float32_dcmask(env, f1)) != 0; +} + +/* test data class 64-bit */ +uint32_t HELPER(tcdb)(CPUS390XState *env, uint64_t v1, uint64_t m2) +{ + return (m2 & float64_dcmask(env, v1)) != 0; +} + +/* test data class 128-bit */ +uint32_t HELPER(tcxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t m2) +{ + return (m2 & float128_dcmask(env, make_float128(ah, al))) != 0; +} + +/* square root 32-bit */ +uint64_t HELPER(sqeb)(CPUS390XState *env, uint64_t f2) +{ + float32 ret = float32_sqrt(f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* square root 64-bit */ +uint64_t HELPER(sqdb)(CPUS390XState *env, uint64_t f2) +{ + float64 ret = float64_sqrt(f2, &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return ret; +} + +/* square root 128-bit */ +uint64_t HELPER(sqxb)(CPUS390XState *env, uint64_t ah, uint64_t al) +{ + float128 ret = float128_sqrt(make_float128(ah, al), &env->fpu_status); + handle_exceptions(env, false, GETPC()); + return RET128(ret); +} + +static const int fpc_to_rnd[8] = { + float_round_nearest_even, + float_round_to_zero, + float_round_up, + float_round_down, + -1, + -1, + -1, + float_round_to_odd, +}; + +/* set fpc */ +void HELPER(sfpc)(CPUS390XState *env, uint64_t fpc) +{ + if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u || + (!s390_has_feat(env->uc, S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); + } + + /* Install everything in the main FPC. */ + env->fpc = fpc; + + /* Install the rounding mode in the shadow fpu_status. */ + set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status); +} + +/* set fpc and signal */ +void HELPER(sfas)(CPUS390XState *env, uint64_t fpc) +{ + uint32_t signalling = env->fpc; + uint32_t s390_exc; + + if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u || + (!s390_has_feat(env->uc, S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); + } + + /* + * FPC is set to the FPC operand with a bitwise OR of the signalling + * flags. + */ + env->fpc = fpc | (signalling & 0x00ff0000); + set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status); + + /* + * If any signaling flag is enabled in the new FPC mask, a + * simulated-iee-exception exception occurs. + */ + s390_exc = (signalling >> 16) & (fpc >> 24); + if (s390_exc) { + if (s390_exc & S390_IEEE_MASK_INVALID) { + s390_exc = S390_IEEE_MASK_INVALID; + } else if (s390_exc & S390_IEEE_MASK_DIVBYZERO) { + s390_exc = S390_IEEE_MASK_DIVBYZERO; + } else if (s390_exc & S390_IEEE_MASK_OVERFLOW) { + s390_exc &= (S390_IEEE_MASK_OVERFLOW | S390_IEEE_MASK_INEXACT); + } else if (s390_exc & S390_IEEE_MASK_UNDERFLOW) { + s390_exc &= (S390_IEEE_MASK_UNDERFLOW | S390_IEEE_MASK_INEXACT); + } else if (s390_exc & S390_IEEE_MASK_INEXACT) { + s390_exc = S390_IEEE_MASK_INEXACT; + } else if (s390_exc & S390_IEEE_MASK_QUANTUM) { + s390_exc = S390_IEEE_MASK_QUANTUM; + } + tcg_s390_data_exception(env, s390_exc | 3, GETPC()); + } +} + +/* set bfp rounding mode */ +void HELPER(srnm)(CPUS390XState *env, uint64_t rnd) +{ + if (rnd > 0x7 || fpc_to_rnd[rnd & 0x7] == -1) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); + } + + env->fpc = deposit32(env->fpc, 0, 3, rnd); + set_float_rounding_mode(fpc_to_rnd[rnd & 0x7], &env->fpu_status); +} diff --git a/qemu/target/s390x/gen-features.c b/qemu/target/s390x/gen-features.c new file mode 100644 index 00000000..6278845b --- /dev/null +++ b/qemu/target/s390x/gen-features.c @@ -0,0 +1,986 @@ +/* + * S390 feature list generator + * + * Copyright IBM Corp. 2016, 2018 + * + * Author(s): Michael Mueller + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include +#include +#include +#include "cpu_features_def.h" + +#define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0])) + +/***** BEGIN FEATURE DEFS *****/ + +#define S390_FEAT_GROUP_PLO \ + S390_FEAT_PLO_CL, \ + S390_FEAT_PLO_CLG, \ + S390_FEAT_PLO_CLGR, \ + S390_FEAT_PLO_CLX, \ + S390_FEAT_PLO_CS, \ + S390_FEAT_PLO_CSG, \ + S390_FEAT_PLO_CSGR, \ + S390_FEAT_PLO_CSX, \ + S390_FEAT_PLO_DCS, \ + S390_FEAT_PLO_DCSG, \ + S390_FEAT_PLO_DCSGR, \ + S390_FEAT_PLO_DCSX, \ + S390_FEAT_PLO_CSST, \ + S390_FEAT_PLO_CSSTG, \ + S390_FEAT_PLO_CSSTGR, \ + S390_FEAT_PLO_CSSTX, \ + S390_FEAT_PLO_CSDST, \ + S390_FEAT_PLO_CSDSTG, \ + S390_FEAT_PLO_CSDSTGR, \ + S390_FEAT_PLO_CSDSTX, \ + S390_FEAT_PLO_CSTST, \ + S390_FEAT_PLO_CSTSTG, \ + S390_FEAT_PLO_CSTSTGR, \ + S390_FEAT_PLO_CSTSTX + +#define S390_FEAT_GROUP_TOD_CLOCK_STEERING \ + S390_FEAT_TOD_CLOCK_STEERING, \ + S390_FEAT_PTFF_QTO, \ + S390_FEAT_PTFF_QSI, \ + S390_FEAT_PTFF_QPT, \ + S390_FEAT_PTFF_STO + +#define S390_FEAT_GROUP_GEN13_PTFF \ + S390_FEAT_PTFF_QUI, \ + S390_FEAT_PTFF_QTOU, \ + S390_FEAT_PTFF_STOU + +#define S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF \ + S390_FEAT_PTFF_QSIE, \ + S390_FEAT_PTFF_QTOUE, \ + S390_FEAT_PTFF_STOE, \ + S390_FEAT_PTFF_STOUE + +#define S390_FEAT_GROUP_MSA \ + S390_FEAT_MSA, \ + S390_FEAT_KMAC_DEA, \ + S390_FEAT_KMAC_TDEA_128, \ + S390_FEAT_KMAC_TDEA_192, \ + S390_FEAT_KMC_DEA, \ + S390_FEAT_KMC_TDEA_128, \ + S390_FEAT_KMC_TDEA_192, \ + S390_FEAT_KM_DEA, \ + S390_FEAT_KM_TDEA_128, \ + S390_FEAT_KM_TDEA_192, \ + S390_FEAT_KIMD_SHA_1, \ + S390_FEAT_KLMD_SHA_1 + +#define S390_FEAT_GROUP_MSA_EXT_1 \ + S390_FEAT_KMC_AES_128, \ + S390_FEAT_KM_AES_128, \ + S390_FEAT_KIMD_SHA_256, \ + S390_FEAT_KLMD_SHA_256 + +#define S390_FEAT_GROUP_MSA_EXT_2 \ + S390_FEAT_KMC_AES_192, \ + S390_FEAT_KMC_AES_256, \ + S390_FEAT_KMC_PRNG, \ + S390_FEAT_KM_AES_192, \ + S390_FEAT_KM_AES_256, \ + S390_FEAT_KIMD_SHA_512, \ + S390_FEAT_KLMD_SHA_512 + +#define S390_FEAT_GROUP_MSA_EXT_3 \ + S390_FEAT_MSA_EXT_3, \ + S390_FEAT_KMAC_EDEA, \ + S390_FEAT_KMAC_ETDEA_128, \ + S390_FEAT_KMAC_ETDEA_192, \ + S390_FEAT_KMC_EAES_128, \ + S390_FEAT_KMC_EAES_192, \ + S390_FEAT_KMC_EAES_256, \ + S390_FEAT_KMC_EDEA, \ + S390_FEAT_KMC_ETDEA_128, \ + S390_FEAT_KMC_ETDEA_192, \ + S390_FEAT_KM_EDEA, \ + S390_FEAT_KM_ETDEA_128, \ + S390_FEAT_KM_ETDEA_192, \ + S390_FEAT_KM_EAES_128, \ + S390_FEAT_KM_EAES_192, \ + S390_FEAT_KM_EAES_256, \ + S390_FEAT_PCKMO_EDEA, \ + S390_FEAT_PCKMO_ETDEA_128, \ + S390_FEAT_PCKMO_ETDEA_256, \ + S390_FEAT_PCKMO_AES_128, \ + S390_FEAT_PCKMO_AES_192, \ + S390_FEAT_PCKMO_AES_256 + +#define S390_FEAT_GROUP_MSA_EXT_4 \ + S390_FEAT_MSA_EXT_4, \ + S390_FEAT_KMAC_AES_128, \ + S390_FEAT_KMAC_AES_192, \ + S390_FEAT_KMAC_AES_256, \ + S390_FEAT_KMAC_EAES_128, \ + S390_FEAT_KMAC_EAES_192, \ + S390_FEAT_KMAC_EAES_256, \ + S390_FEAT_KM_XTS_AES_128, \ + S390_FEAT_KM_XTS_AES_256, \ + S390_FEAT_KM_XTS_EAES_128, \ + S390_FEAT_KM_XTS_EAES_256, \ + S390_FEAT_KIMD_GHASH, \ + S390_FEAT_KMCTR_DEA, \ + S390_FEAT_KMCTR_TDEA_128, \ + S390_FEAT_KMCTR_TDEA_192, \ + S390_FEAT_KMCTR_EDEA, \ + S390_FEAT_KMCTR_ETDEA_128, \ + S390_FEAT_KMCTR_ETDEA_192, \ + S390_FEAT_KMCTR_AES_128, \ + S390_FEAT_KMCTR_AES_192, \ + S390_FEAT_KMCTR_AES_256, \ + S390_FEAT_KMCTR_EAES_128, \ + S390_FEAT_KMCTR_EAES_192, \ + S390_FEAT_KMCTR_EAES_256, \ + S390_FEAT_KMF_DEA, \ + S390_FEAT_KMF_TDEA_128, \ + S390_FEAT_KMF_TDEA_192, \ + S390_FEAT_KMF_EDEA, \ + S390_FEAT_KMF_ETDEA_128, \ + S390_FEAT_KMF_ETDEA_192, \ + S390_FEAT_KMF_AES_128, \ + S390_FEAT_KMF_AES_192, \ + S390_FEAT_KMF_AES_256, \ + S390_FEAT_KMF_EAES_128, \ + S390_FEAT_KMF_EAES_192, \ + S390_FEAT_KMF_EAES_256, \ + S390_FEAT_KMO_DEA, \ + S390_FEAT_KMO_TDEA_128, \ + S390_FEAT_KMO_TDEA_192, \ + S390_FEAT_KMO_EDEA, \ + S390_FEAT_KMO_ETDEA_128, \ + S390_FEAT_KMO_ETDEA_192, \ + S390_FEAT_KMO_AES_128, \ + S390_FEAT_KMO_AES_192, \ + S390_FEAT_KMO_AES_256, \ + S390_FEAT_KMO_EAES_128, \ + S390_FEAT_KMO_EAES_192, \ + S390_FEAT_KMO_EAES_256, \ + S390_FEAT_PCC_CMAC_DEA, \ + S390_FEAT_PCC_CMAC_TDEA_128, \ + S390_FEAT_PCC_CMAC_TDEA_192, \ + S390_FEAT_PCC_CMAC_ETDEA_128, \ + S390_FEAT_PCC_CMAC_ETDEA_192, \ + S390_FEAT_PCC_CMAC_TDEA, \ + S390_FEAT_PCC_CMAC_AES_128, \ + S390_FEAT_PCC_CMAC_AES_192, \ + S390_FEAT_PCC_CMAC_AES_256, \ + S390_FEAT_PCC_CMAC_EAES_128, \ + S390_FEAT_PCC_CMAC_EAES_192, \ + S390_FEAT_PCC_CMAC_EAES_256, \ + S390_FEAT_PCC_XTS_AES_128, \ + S390_FEAT_PCC_XTS_AES_256, \ + S390_FEAT_PCC_XTS_EAES_128, \ + S390_FEAT_PCC_XTS_EAES_256 + +#define S390_FEAT_GROUP_MSA_EXT_5 \ + S390_FEAT_MSA_EXT_5, \ + S390_FEAT_PPNO_SHA_512_DRNG + +#define S390_FEAT_GROUP_MSA_EXT_6 \ + S390_FEAT_KIMD_SHA3_224, \ + S390_FEAT_KIMD_SHA3_256, \ + S390_FEAT_KIMD_SHA3_384, \ + S390_FEAT_KIMD_SHA3_512, \ + S390_FEAT_KIMD_SHAKE_128, \ + S390_FEAT_KIMD_SHAKE_256, \ + S390_FEAT_KLMD_SHA3_224, \ + S390_FEAT_KLMD_SHA3_256, \ + S390_FEAT_KLMD_SHA3_384, \ + S390_FEAT_KLMD_SHA3_512, \ + S390_FEAT_KLMD_SHAKE_128, \ + S390_FEAT_KLMD_SHAKE_256 + +#define S390_FEAT_GROUP_MSA_EXT_7 \ + S390_FEAT_PRNO_TRNG_QRTCR, \ + S390_FEAT_PRNO_TRNG + +#define S390_FEAT_GROUP_MSA_EXT_8 \ + S390_FEAT_MSA_EXT_8, \ + S390_FEAT_KMA_GCM_AES_128, \ + S390_FEAT_KMA_GCM_AES_192, \ + S390_FEAT_KMA_GCM_AES_256 , \ + S390_FEAT_KMA_GCM_EAES_128, \ + S390_FEAT_KMA_GCM_EAES_192, \ + S390_FEAT_KMA_GCM_EAES_256 + +#define S390_FEAT_GROUP_MSA_EXT_9 \ + S390_FEAT_MSA_EXT_9, \ + S390_FEAT_KDSA_ECDSA_VERIFY_P256, \ + S390_FEAT_KDSA_ECDSA_VERIFY_P384, \ + S390_FEAT_KDSA_ECDSA_VERIFY_P512, \ + S390_FEAT_KDSA_ECDSA_SIGN_P256, \ + S390_FEAT_KDSA_ECDSA_SIGN_P384, \ + S390_FEAT_KDSA_ECDSA_SIGN_P512, \ + S390_FEAT_KDSA_EECDSA_SIGN_P256, \ + S390_FEAT_KDSA_EECDSA_SIGN_P384, \ + S390_FEAT_KDSA_EECDSA_SIGN_P512, \ + S390_FEAT_KDSA_EDDSA_VERIFY_ED25519, \ + S390_FEAT_KDSA_EDDSA_VERIFY_ED448, \ + S390_FEAT_KDSA_EDDSA_SIGN_ED25519, \ + S390_FEAT_KDSA_EDDSA_SIGN_ED448, \ + S390_FEAT_KDSA_EEDDSA_SIGN_ED25519, \ + S390_FEAT_KDSA_EEDDSA_SIGN_ED448, \ + S390_FEAT_PCC_SCALAR_MULT_P256, \ + S390_FEAT_PCC_SCALAR_MULT_P384, \ + S390_FEAT_PCC_SCALAR_MULT_P512, \ + S390_FEAT_PCC_SCALAR_MULT_ED25519, \ + S390_FEAT_PCC_SCALAR_MULT_ED448, \ + S390_FEAT_PCC_SCALAR_MULT_X25519, \ + S390_FEAT_PCC_SCALAR_MULT_X448 + +#define S390_FEAT_GROUP_MSA_EXT_9_PCKMO \ + S390_FEAT_PCKMO_ECC_P256, \ + S390_FEAT_PCKMO_ECC_P384, \ + S390_FEAT_PCKMO_ECC_P521, \ + S390_FEAT_PCKMO_ECC_ED25519, \ + S390_FEAT_PCKMO_ECC_ED448 + +#define S390_FEAT_GROUP_ENH_SORT \ + S390_FEAT_ESORT_BASE, \ + S390_FEAT_SORTL_SFLR, \ + S390_FEAT_SORTL_SVLR, \ + S390_FEAT_SORTL_32, \ + S390_FEAT_SORTL_128, \ + S390_FEAT_SORTL_F0 + + +#define S390_FEAT_GROUP_DEFLATE_CONVERSION \ + S390_FEAT_DEFLATE_BASE, \ + S390_FEAT_DEFLATE_GHDT, \ + S390_FEAT_DEFLATE_CMPR, \ + S390_FEAT_DEFLATE_XPND, \ + S390_FEAT_DEFLATE_F0 + +/* cpu feature groups */ +static uint16_t group_PLO[] = { + S390_FEAT_GROUP_PLO, +}; +static uint16_t group_TOD_CLOCK_STEERING[] = { + S390_FEAT_GROUP_TOD_CLOCK_STEERING, +}; +static uint16_t group_GEN13_PTFF[] = { + S390_FEAT_GROUP_GEN13_PTFF, +}; +static uint16_t group_MULTIPLE_EPOCH_PTFF[] = { + S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, +}; +static uint16_t group_MSA[] = { + S390_FEAT_GROUP_MSA, +}; +static uint16_t group_MSA_EXT_1[] = { + S390_FEAT_GROUP_MSA_EXT_1, +}; +static uint16_t group_MSA_EXT_2[] = { + S390_FEAT_GROUP_MSA_EXT_2, +}; +static uint16_t group_MSA_EXT_3[] = { + S390_FEAT_GROUP_MSA_EXT_3, +}; +static uint16_t group_MSA_EXT_4[] = { + S390_FEAT_GROUP_MSA_EXT_4, +}; +static uint16_t group_MSA_EXT_5[] = { + S390_FEAT_GROUP_MSA_EXT_5, +}; +static uint16_t group_MSA_EXT_6[] = { + S390_FEAT_GROUP_MSA_EXT_6, +}; +static uint16_t group_MSA_EXT_7[] = { + S390_FEAT_GROUP_MSA_EXT_7, +}; +static uint16_t group_MSA_EXT_8[] = { + S390_FEAT_GROUP_MSA_EXT_8, +}; + +static uint16_t group_MSA_EXT_9[] = { + S390_FEAT_GROUP_MSA_EXT_9, +}; + +static uint16_t group_MSA_EXT_9_PCKMO[] = { + S390_FEAT_GROUP_MSA_EXT_9_PCKMO, +}; + +static uint16_t group_ENH_SORT[] = { + S390_FEAT_GROUP_ENH_SORT, +}; + +static uint16_t group_DEFLATE_CONVERSION[] = { + S390_FEAT_GROUP_DEFLATE_CONVERSION, +}; + +/* Base features (in order of release) + * Only non-hypervisor managed features belong here. + * Base feature sets are static meaning they do not change in future QEMU + * releases. + */ +static uint16_t base_GEN7_GA1[] = { + S390_FEAT_GROUP_PLO, + S390_FEAT_ESAN3, + S390_FEAT_ZARCH, +}; + +#define base_GEN7_GA2 EmptyFeat +#define base_GEN7_GA3 EmptyFeat + +static uint16_t base_GEN8_GA1[] = { + S390_FEAT_DAT_ENH, + S390_FEAT_EXTENDED_TRANSLATION_2, + S390_FEAT_GROUP_MSA, + S390_FEAT_LONG_DISPLACEMENT, + S390_FEAT_LONG_DISPLACEMENT_FAST, + S390_FEAT_HFP_MADDSUB, +}; + +#define base_GEN8_GA2 EmptyFeat +#define base_GEN8_GA3 EmptyFeat +#define base_GEN8_GA4 EmptyFeat +#define base_GEN8_GA5 EmptyFeat + +static uint16_t base_GEN9_GA1[] = { + S390_FEAT_IDTE_SEGMENT, + S390_FEAT_ASN_LX_REUSE, + S390_FEAT_STFLE, + S390_FEAT_SENSE_RUNNING_STATUS, + S390_FEAT_EXTENDED_IMMEDIATE, + S390_FEAT_EXTENDED_TRANSLATION_3, + S390_FEAT_HFP_UNNORMALIZED_EXT, + S390_FEAT_ETF2_ENH, + S390_FEAT_STORE_CLOCK_FAST, + S390_FEAT_GROUP_TOD_CLOCK_STEERING, + S390_FEAT_ETF3_ENH, + S390_FEAT_DAT_ENH_2, +}; + +#define base_GEN9_GA2 EmptyFeat +#define base_GEN9_GA3 EmptyFeat + +static uint16_t base_GEN10_GA1[] = { + S390_FEAT_CONDITIONAL_SSKE, + S390_FEAT_PARSING_ENH, + S390_FEAT_MOVE_WITH_OPTIONAL_SPEC, + S390_FEAT_EXTRACT_CPU_TIME, + S390_FEAT_COMPARE_AND_SWAP_AND_STORE, + S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2, + S390_FEAT_GENERAL_INSTRUCTIONS_EXT, + S390_FEAT_EXECUTE_EXT, + S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, + S390_FEAT_DFP, + S390_FEAT_DFP_FAST, + S390_FEAT_PFPO, +}; +#define base_GEN10_GA2 EmptyFeat +#define base_GEN10_GA3 EmptyFeat + +static uint16_t base_GEN11_GA1[] = { + S390_FEAT_NONQ_KEY_SETTING, + S390_FEAT_ENHANCED_MONITOR, + S390_FEAT_FLOATING_POINT_EXT, + S390_FEAT_SET_PROGRAM_PARAMETERS, + S390_FEAT_STFLE_45, + S390_FEAT_CMPSC_ENH, + S390_FEAT_INTERLOCKED_ACCESS_2, +}; + +#define base_GEN11_GA2 EmptyFeat + +static uint16_t base_GEN12_GA1[] = { + S390_FEAT_DFP_ZONED_CONVERSION, + S390_FEAT_STFLE_49, + S390_FEAT_LOCAL_TLB_CLEARING, +}; + +#define base_GEN12_GA2 EmptyFeat + +static uint16_t base_GEN13_GA1[] = { + S390_FEAT_STFLE_53, + S390_FEAT_DFP_PACKED_CONVERSION, + S390_FEAT_GROUP_GEN13_PTFF, +}; + +#define base_GEN13_GA2 EmptyFeat + +static uint16_t base_GEN14_GA1[] = { + S390_FEAT_ENTROPY_ENC_COMP, + S390_FEAT_MISC_INSTRUCTION_EXT, + S390_FEAT_SEMAPHORE_ASSIST, + S390_FEAT_TIME_SLICE_INSTRUMENTATION, + S390_FEAT_ORDER_PRESERVING_COMPRESSION, +}; + +#define base_GEN14_GA2 EmptyFeat + +static uint16_t base_GEN15_GA1[] = { + S390_FEAT_MISC_INSTRUCTION_EXT3, +}; + +/* Full features (in order of release) + * Automatically includes corresponding base features. + * Full features are all features this hardware supports even if kvm/QEMU do not + * support these features yet. + */ +static uint16_t full_GEN7_GA1[] = { + S390_FEAT_PPA15, + S390_FEAT_BPB, + S390_FEAT_SIE_F2, + S390_FEAT_SIE_SKEY, + S390_FEAT_SIE_GPERE, + S390_FEAT_SIE_IB, + S390_FEAT_SIE_CEI, +}; + +static uint16_t full_GEN7_GA2[] = { + S390_FEAT_EXTENDED_TRANSLATION_2, +}; + +static uint16_t full_GEN7_GA3[] = { + S390_FEAT_LONG_DISPLACEMENT, + S390_FEAT_SIE_SIIF, +}; + +static uint16_t full_GEN8_GA1[] = { + S390_FEAT_SIE_GSLS, + S390_FEAT_SIE_64BSCAO, +}; + +#define full_GEN8_GA2 EmptyFeat + +static uint16_t full_GEN8_GA3[] = { + S390_FEAT_ASN_LX_REUSE, + S390_FEAT_EXTENDED_TRANSLATION_3, +}; + +#define full_GEN8_GA4 EmptyFeat +#define full_GEN8_GA5 EmptyFeat + +static uint16_t full_GEN9_GA1[] = { + S390_FEAT_STORE_HYPERVISOR_INFO, + S390_FEAT_GROUP_MSA_EXT_1, + S390_FEAT_CMM, + S390_FEAT_SIE_CMMA, +}; + +static uint16_t full_GEN9_GA2[] = { + S390_FEAT_MOVE_WITH_OPTIONAL_SPEC, + S390_FEAT_EXTRACT_CPU_TIME, + S390_FEAT_COMPARE_AND_SWAP_AND_STORE, + S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, + S390_FEAT_DFP, +}; + +static uint16_t full_GEN9_GA3[] = { + S390_FEAT_CONDITIONAL_SSKE, + S390_FEAT_PFPO, +}; + +static uint16_t full_GEN10_GA1[] = { + S390_FEAT_EDAT, + S390_FEAT_CONFIGURATION_TOPOLOGY, + S390_FEAT_GROUP_MSA_EXT_2, + S390_FEAT_ESOP, + S390_FEAT_SIE_PFMFI, + S390_FEAT_SIE_SIGPIF, +}; + +static uint16_t full_GEN10_GA2[] = { + S390_FEAT_SET_PROGRAM_PARAMETERS, + S390_FEAT_SIE_IBS, +}; + +static uint16_t full_GEN10_GA3[] = { + S390_FEAT_GROUP_MSA_EXT_3, +}; + +static uint16_t full_GEN11_GA1[] = { + S390_FEAT_IPTE_RANGE, + S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION, + S390_FEAT_GROUP_MSA_EXT_4, +}; + +#define full_GEN11_GA2 EmptyFeat + +static uint16_t full_GEN12_GA1[] = { + S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE, + S390_FEAT_TRANSACTIONAL_EXE, + S390_FEAT_RUNTIME_INSTRUMENTATION, + S390_FEAT_ZPCI, + S390_FEAT_ADAPTER_EVENT_NOTIFICATION, + S390_FEAT_ADAPTER_INT_SUPPRESSION, + S390_FEAT_EDAT_2, + S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, + S390_FEAT_AP_QUERY_CONFIG_INFO, + S390_FEAT_AP_QUEUE_INTERRUPT_CONTROL, + S390_FEAT_AP_FACILITIES_TEST, + S390_FEAT_AP, +}; + +static uint16_t full_GEN12_GA2[] = { + S390_FEAT_GROUP_MSA_EXT_5, +}; + +static uint16_t full_GEN13_GA1[] = { + S390_FEAT_VECTOR, +}; + +#define full_GEN13_GA2 EmptyFeat + +static uint16_t full_GEN14_GA1[] = { + S390_FEAT_INSTRUCTION_EXEC_PROT, + S390_FEAT_GUARDED_STORAGE, + S390_FEAT_VECTOR_PACKED_DECIMAL, + S390_FEAT_VECTOR_ENH, + S390_FEAT_MULTIPLE_EPOCH, + S390_FEAT_TEST_PENDING_EXT_INTERRUPTION, + S390_FEAT_INSERT_REFERENCE_BITS_MULT, + S390_FEAT_GROUP_MSA_EXT_6, + S390_FEAT_GROUP_MSA_EXT_7, + S390_FEAT_GROUP_MSA_EXT_8, + S390_FEAT_CMM_NT, + S390_FEAT_ETOKEN, + S390_FEAT_HPMA2, + S390_FEAT_SIE_KSS, + S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, +}; + +#define full_GEN14_GA2 EmptyFeat + +static uint16_t full_GEN15_GA1[] = { + S390_FEAT_VECTOR_ENH2, + S390_FEAT_GROUP_ENH_SORT, + S390_FEAT_GROUP_DEFLATE_CONVERSION, + S390_FEAT_VECTOR_PACKED_DECIMAL_ENH, + S390_FEAT_GROUP_MSA_EXT_9, + S390_FEAT_GROUP_MSA_EXT_9_PCKMO, + S390_FEAT_ETOKEN, +}; + +/* Default features (in order of release) + * Automatically includes corresponding base features. + * Default features are all features this version of QEMU supports for this + * hardware model. Default feature sets can grow with new QEMU releases. + */ +#define default_GEN7_GA1 EmptyFeat +#define default_GEN7_GA2 EmptyFeat +#define default_GEN7_GA3 EmptyFeat +#define default_GEN8_GA1 EmptyFeat +#define default_GEN8_GA2 EmptyFeat +#define default_GEN8_GA3 EmptyFeat +#define default_GEN8_GA4 EmptyFeat +#define default_GEN8_GA5 EmptyFeat + +static uint16_t default_GEN9_GA1[] = { + S390_FEAT_STORE_HYPERVISOR_INFO, + S390_FEAT_GROUP_MSA_EXT_1, + S390_FEAT_CMM, +}; + +#define default_GEN9_GA2 EmptyFeat +#define default_GEN9_GA3 EmptyFeat + +static uint16_t default_GEN10_GA1[] = { + S390_FEAT_EDAT, + S390_FEAT_GROUP_MSA_EXT_2, +}; + +#define default_GEN10_GA2 EmptyFeat +#define default_GEN10_GA3 EmptyFeat + +static uint16_t default_GEN11_GA1[] = { + S390_FEAT_GROUP_MSA_EXT_3, + S390_FEAT_IPTE_RANGE, + S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION, + S390_FEAT_GROUP_MSA_EXT_4, + S390_FEAT_PPA15, + S390_FEAT_BPB, +}; + +#define default_GEN11_GA2 EmptyFeat + +static uint16_t default_GEN12_GA1[] = { + S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE, + S390_FEAT_TRANSACTIONAL_EXE, + S390_FEAT_RUNTIME_INSTRUMENTATION, + S390_FEAT_ZPCI, + S390_FEAT_ADAPTER_EVENT_NOTIFICATION, + S390_FEAT_EDAT_2, + S390_FEAT_ESOP, + S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, +}; + +#define default_GEN12_GA2 EmptyFeat + +static uint16_t default_GEN13_GA1[] = { + S390_FEAT_GROUP_MSA_EXT_5, + S390_FEAT_VECTOR, +}; + +#define default_GEN13_GA2 EmptyFeat + +static uint16_t default_GEN14_GA1[] = { + S390_FEAT_INSTRUCTION_EXEC_PROT, + S390_FEAT_GUARDED_STORAGE, + S390_FEAT_VECTOR_PACKED_DECIMAL, + S390_FEAT_VECTOR_ENH, + S390_FEAT_GROUP_MSA_EXT_6, + S390_FEAT_GROUP_MSA_EXT_7, + S390_FEAT_GROUP_MSA_EXT_8, + S390_FEAT_MULTIPLE_EPOCH, + S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, +}; + +#define default_GEN14_GA2 EmptyFeat + +static uint16_t default_GEN15_GA1[] = { + S390_FEAT_VECTOR_ENH2, + S390_FEAT_GROUP_DEFLATE_CONVERSION, + S390_FEAT_VECTOR_PACKED_DECIMAL_ENH, + S390_FEAT_GROUP_MSA_EXT_9, + S390_FEAT_GROUP_MSA_EXT_9_PCKMO, + S390_FEAT_ETOKEN, +}; + +/* QEMU (CPU model) features */ + +static uint16_t qemu_V2_11[] = { + S390_FEAT_GROUP_PLO, + S390_FEAT_ESAN3, + S390_FEAT_ZARCH, +}; + +static uint16_t qemu_V3_1[] = { + S390_FEAT_DAT_ENH, + S390_FEAT_IDTE_SEGMENT, + S390_FEAT_STFLE, + S390_FEAT_SENSE_RUNNING_STATUS, + S390_FEAT_EXTENDED_TRANSLATION_2, + S390_FEAT_MSA, + S390_FEAT_LONG_DISPLACEMENT, + S390_FEAT_LONG_DISPLACEMENT_FAST, + S390_FEAT_EXTENDED_IMMEDIATE, + S390_FEAT_EXTENDED_TRANSLATION_3, + S390_FEAT_ETF2_ENH, + S390_FEAT_STORE_CLOCK_FAST, + S390_FEAT_MOVE_WITH_OPTIONAL_SPEC, + S390_FEAT_ETF3_ENH, + S390_FEAT_EXTRACT_CPU_TIME, + S390_FEAT_COMPARE_AND_SWAP_AND_STORE, + S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2, + S390_FEAT_GENERAL_INSTRUCTIONS_EXT, + S390_FEAT_EXECUTE_EXT, + S390_FEAT_SET_PROGRAM_PARAMETERS, + S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, + S390_FEAT_STFLE_45, + S390_FEAT_STFLE_49, + S390_FEAT_LOCAL_TLB_CLEARING, + S390_FEAT_INTERLOCKED_ACCESS_2, + S390_FEAT_ADAPTER_EVENT_NOTIFICATION, + S390_FEAT_ADAPTER_INT_SUPPRESSION, + S390_FEAT_MSA_EXT_3, + S390_FEAT_MSA_EXT_4, +}; + +static uint16_t qemu_V4_0[] = { + /* + * Only BFP bits are implemented (HFP, DFP, PFPO and DIVIDE TO INTEGER not + * implemented yet). + */ + S390_FEAT_FLOATING_POINT_EXT, + S390_FEAT_ZPCI, +}; + +static uint16_t qemu_V4_1[] = { + S390_FEAT_STFLE_53, + S390_FEAT_VECTOR, +}; + +static uint16_t qemu_LATEST[] = { + S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION, + S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, + S390_FEAT_ESOP, +}; + +/* add all new definitions before this point */ +static uint16_t qemu_MAX[] = { + /* generates a dependency warning, leave it out for now */ + S390_FEAT_MSA_EXT_5, + /* features introduced after the z13 */ + S390_FEAT_INSTRUCTION_EXEC_PROT, +}; + +/****** END FEATURE DEFS ******/ + +#define _YEARS "2016" +#define _NAME_H "TARGET_S390X_GEN_FEATURES_H" + +#define CPU_FEAT_INITIALIZER(_name) \ + { \ + .name = "S390_FEAT_LIST_" #_name, \ + .base_bits = \ + { .data = base_##_name, \ + .len = ARRAY_SIZE(base_##_name) }, \ + .default_bits = \ + { .data = default_##_name, \ + .len = ARRAY_SIZE(default_##_name) }, \ + .full_bits = \ + { .data = full_##_name, \ + .len = ARRAY_SIZE(full_##_name) }, \ + } + +typedef struct BitSpec { + uint16_t *data; + uint32_t len; +} BitSpec; + +typedef struct { + const char *name; + BitSpec base_bits; + BitSpec default_bits; + BitSpec full_bits; +} CpuFeatDefSpec; + +static uint16_t EmptyFeat[] = {}; + +/******************************* + * processor GA series + *******************************/ +static CpuFeatDefSpec CpuFeatDef[] = { + CPU_FEAT_INITIALIZER(GEN7_GA1), + CPU_FEAT_INITIALIZER(GEN7_GA2), + CPU_FEAT_INITIALIZER(GEN7_GA3), + CPU_FEAT_INITIALIZER(GEN8_GA1), + CPU_FEAT_INITIALIZER(GEN8_GA2), + CPU_FEAT_INITIALIZER(GEN8_GA3), + CPU_FEAT_INITIALIZER(GEN8_GA4), + CPU_FEAT_INITIALIZER(GEN8_GA5), + CPU_FEAT_INITIALIZER(GEN9_GA1), + CPU_FEAT_INITIALIZER(GEN9_GA2), + CPU_FEAT_INITIALIZER(GEN9_GA3), + CPU_FEAT_INITIALIZER(GEN10_GA1), + CPU_FEAT_INITIALIZER(GEN10_GA2), + CPU_FEAT_INITIALIZER(GEN10_GA3), + CPU_FEAT_INITIALIZER(GEN11_GA1), + CPU_FEAT_INITIALIZER(GEN11_GA2), + CPU_FEAT_INITIALIZER(GEN12_GA1), + CPU_FEAT_INITIALIZER(GEN12_GA2), + CPU_FEAT_INITIALIZER(GEN13_GA1), + CPU_FEAT_INITIALIZER(GEN13_GA2), + CPU_FEAT_INITIALIZER(GEN14_GA1), + CPU_FEAT_INITIALIZER(GEN14_GA2), + CPU_FEAT_INITIALIZER(GEN15_GA1), +}; + +#define FEAT_GROUP_INITIALIZER(_name) \ + { \ + .name = "S390_FEAT_GROUP_LIST_" #_name, \ + .enum_name = "S390_FEAT_GROUP_" #_name, \ + .bits = \ + { .data = group_##_name, \ + .len = ARRAY_SIZE(group_##_name) }, \ + } + +typedef struct { + const char *name; + const char *enum_name; + BitSpec bits; +} FeatGroupDefSpec; + +/******************************* + * feature groups + *******************************/ +static FeatGroupDefSpec FeatGroupDef[] = { + FEAT_GROUP_INITIALIZER(PLO), + FEAT_GROUP_INITIALIZER(TOD_CLOCK_STEERING), + FEAT_GROUP_INITIALIZER(GEN13_PTFF), + FEAT_GROUP_INITIALIZER(MSA), + FEAT_GROUP_INITIALIZER(MSA_EXT_1), + FEAT_GROUP_INITIALIZER(MSA_EXT_2), + FEAT_GROUP_INITIALIZER(MSA_EXT_3), + FEAT_GROUP_INITIALIZER(MSA_EXT_4), + FEAT_GROUP_INITIALIZER(MSA_EXT_5), + FEAT_GROUP_INITIALIZER(MSA_EXT_6), + FEAT_GROUP_INITIALIZER(MSA_EXT_7), + FEAT_GROUP_INITIALIZER(MSA_EXT_8), + FEAT_GROUP_INITIALIZER(MSA_EXT_9), + FEAT_GROUP_INITIALIZER(MSA_EXT_9_PCKMO), + FEAT_GROUP_INITIALIZER(MULTIPLE_EPOCH_PTFF), + FEAT_GROUP_INITIALIZER(ENH_SORT), + FEAT_GROUP_INITIALIZER(DEFLATE_CONVERSION), +}; + +#define QEMU_FEAT_INITIALIZER(_name) \ + { \ + .name = "S390_FEAT_LIST_QEMU_" #_name, \ + .bits = \ + { .data = qemu_##_name, \ + .len = ARRAY_SIZE(qemu_##_name) }, \ + } + +/******************************* + * QEMU (CPU model) features + *******************************/ +static FeatGroupDefSpec QemuFeatDef[] = { + QEMU_FEAT_INITIALIZER(V2_11), + QEMU_FEAT_INITIALIZER(V3_1), + QEMU_FEAT_INITIALIZER(V4_0), + QEMU_FEAT_INITIALIZER(V4_1), + QEMU_FEAT_INITIALIZER(LATEST), + QEMU_FEAT_INITIALIZER(MAX), +}; + + +static void set_bits(uint64_t list[], BitSpec bits) +{ + uint32_t i; + + for (i = 0; i < bits.len; i++) { + list[bits.data[i] / 64] |= 1ULL << (bits.data[i] % 64); + } +} + +static inline void clear_bit(uint64_t list[], unsigned long nr) +{ + list[nr / 64] &= ~(1ULL << (nr % 64)); +} + +static void print_feature_defs(void) +{ + uint64_t base_feat[S390_FEAT_MAX / 64 + 1] = {}; + uint64_t default_feat[S390_FEAT_MAX / 64 + 1] = {}; + uint64_t full_feat[S390_FEAT_MAX / 64 + 1] = {}; + int i, j; + + printf("\n/* CPU model feature list data */\n"); + + for (i = 0; i < ARRAY_SIZE(CpuFeatDef); i++) { + /* With gen15 CSSKE and BPB are deprecated */ + if (strcmp(CpuFeatDef[i].name, "S390_FEAT_LIST_GEN15_GA1") == 0) { + clear_bit(base_feat, S390_FEAT_CONDITIONAL_SSKE); + clear_bit(default_feat, S390_FEAT_CONDITIONAL_SSKE); + clear_bit(default_feat, S390_FEAT_BPB); + } + set_bits(base_feat, CpuFeatDef[i].base_bits); + /* add the base to the default features */ + set_bits(default_feat, CpuFeatDef[i].base_bits); + set_bits(default_feat, CpuFeatDef[i].default_bits); + /* add the base to the full features */ + set_bits(full_feat, CpuFeatDef[i].base_bits); + set_bits(full_feat, CpuFeatDef[i].full_bits); + + printf("#define %s_BASE\t", CpuFeatDef[i].name); + for (j = 0; j < ARRAY_SIZE(base_feat); j++) { + printf("0x%016"PRIx64"ULL", base_feat[j]); + if (j < ARRAY_SIZE(base_feat) - 1) { + printf(","); + } else { + printf("\n"); + } + } + printf("#define %s_DEFAULT\t", CpuFeatDef[i].name); + for (j = 0; j < ARRAY_SIZE(default_feat); j++) { + printf("0x%016"PRIx64"ULL", default_feat[j]); + if (j < ARRAY_SIZE(default_feat) - 1) { + printf(","); + } else { + printf("\n"); + } + } + printf("#define %s_FULL\t\t", CpuFeatDef[i].name); + for (j = 0; j < ARRAY_SIZE(full_feat); j++) { + printf("0x%016"PRIx64"ULL", full_feat[j]); + if (j < ARRAY_SIZE(full_feat) - 1) { + printf(","); + } else { + printf("\n"); + } + } + } +} + +static void print_qemu_feature_defs(void) +{ + uint64_t feat[S390_FEAT_MAX / 64 + 1] = {}; + int i, j; + + printf("\n/* QEMU (CPU model) feature list data */\n"); + + /* for now we assume that we only add new features */ + for (i = 0; i < ARRAY_SIZE(QemuFeatDef); i++) { + set_bits(feat, QemuFeatDef[i].bits); + + printf("#define %s\t", QemuFeatDef[i].name); + for (j = 0; j < ARRAY_SIZE(feat); j++) { + printf("0x%016"PRIx64"ULL", feat[j]); + if (j < ARRAY_SIZE(feat) - 1) { + printf(","); + } else { + printf("\n"); + } + } + } +} + +static void print_feature_group_defs(void) +{ + int i, j; + + printf("\n/* CPU feature group list data */\n"); + + for (i = 0; i < ARRAY_SIZE(FeatGroupDef); i++) { + uint64_t feat[S390_FEAT_MAX / 64 + 1] = {}; + + set_bits(feat, FeatGroupDef[i].bits); + printf("#define %s\t", FeatGroupDef[i].name); + for (j = 0; j < ARRAY_SIZE(feat); j++) { + printf("0x%016"PRIx64"ULL", feat[j]); + if (j < ARRAY_SIZE(feat) - 1) { + printf(","); + } else { + printf("\n"); + } + } + } +} + +static void print_feature_group_enum_type(void) +{ + int i; + + printf("\n/* CPU feature group enum type */\n" + "typedef enum {\n"); + for (i = 0; i < ARRAY_SIZE(FeatGroupDef); i++) { + printf("\t%s,\n", FeatGroupDef[i].enum_name); + } + printf("\tS390_FEAT_GROUP_MAX,\n" + "} S390FeatGroup;\n"); +} + +int main(int argc, char *argv[]) +{ + printf("/*\n" + " * AUTOMATICALLY GENERATED, DO NOT MODIFY HERE, EDIT\n" + " * SOURCE FILE \"%s\" INSTEAD.\n" + " *\n" + " * Copyright %s IBM Corp.\n" + " *\n" + " * This work is licensed under the terms of the GNU GPL, " + "version 2 or (at\n * your option) any later version. See " + "the COPYING file in the top-level\n * directory.\n" + " */\n\n" + "#ifndef %s\n#define %s\n", __FILE__, _YEARS, _NAME_H, _NAME_H); + print_feature_defs(); + print_feature_group_defs(); + print_qemu_feature_defs(); + print_feature_group_enum_type(); + printf("\n#endif\n"); + return 0; +} diff --git a/qemu/target/s390x/gen-features.h b/qemu/target/s390x/gen-features.h new file mode 100644 index 00000000..2bd117f8 --- /dev/null +++ b/qemu/target/s390x/gen-features.h @@ -0,0 +1,135 @@ +/* + * AUTOMATICALLY GENERATED, DO NOT MODIFY HERE, EDIT + * SOURCE FILE "/home/me/projects/qemu/qemu-5.0.1/target/s390x/gen-features.c" INSTEAD. + * + * Copyright 2016 IBM Corp. + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#ifndef TARGET_S390X_GEN_FEATURES_H +#define TARGET_S390X_GEN_FEATURES_H + +/* CPU model feature list data */ +#define S390_FEAT_LIST_GEN7_GA1_BASE 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN7_GA1_DEFAULT 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN7_GA1_FULL 0x0000000000000003ULL,0xfffffe3380000030ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN7_GA2_BASE 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN7_GA2_DEFAULT 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN7_GA2_FULL 0x0000000000008003ULL,0xfffffe3380000030ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN7_GA3_BASE 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN7_GA3_DEFAULT 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN7_GA3_FULL 0x0000000000028003ULL,0xfffffe3780000030ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA1_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA1_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA1_FULL 0x00000000000f8007ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA2_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA2_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA2_FULL 0x00000000000f8007ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA3_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA3_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA3_FULL 0x00000000002f8027ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA4_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA4_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA4_FULL 0x00000000002f8027ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA5_BASE 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA5_DEFAULT 0x00000000000f8007ULL,0xfffffe0000000000ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN8_GA5_FULL 0x00000000002f8027ULL,0xfffffe3788800030ULL,0x802000e007007001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA1_BASE 0x0000000019ff816fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA1_DEFAULT 0x4000000019ff816fULL,0xfffffec000000000ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA1_FULL 0x4000000019ff816fULL,0xfffffef798800030ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA2_BASE 0x0000000019ff816fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA2_DEFAULT 0x4000000019ff816fULL,0xfffffec000000000ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA2_FULL 0x400000c07dff816fULL,0xfffffef798800030ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA3_BASE 0x0000000019ff816fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA3_DEFAULT 0x4000000019ff816fULL,0xfffffec000000000ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN9_GA3_FULL 0x400002c07dff836fULL,0xfffffef798800030ULL,0x806008e04700710fULL,0x0000000000000001ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA1_BASE 0x000003c3ffff836fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA1_DEFAULT 0x400003c3ffff83efULL,0xfffffec000000000ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA1_FULL 0x400003c3ffff87efULL,0xfffffeffb9800030ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA2_BASE 0x000003c3ffff836fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA2_DEFAULT 0x400003c3ffff83efULL,0xfffffec000000000ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA2_FULL 0x400003e3ffff87efULL,0xfffffefff9800030ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA3_BASE 0x000003c3ffff836fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA3_DEFAULT 0x400003c3ffff83efULL,0xfffffec000000000ULL,0x80e038f1c700710fULL,0x0000000000000003ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN10_GA3_FULL 0x400003e3ffff87efULL,0xfffffefff9800031ULL,0x80e1ffffff03f10fULL,0x0000000000003f03ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN11_GA1_BASE 0x00010fefffffa36fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN11_GA1_DEFAULT 0xc0010fefffffb3efULL,0xfffffec000000033ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL +#define S390_FEAT_LIST_GEN11_GA1_FULL 0xc0010fefffffb7efULL,0xfffffefff9800033ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL +#define S390_FEAT_LIST_GEN11_GA2_BASE 0x00010fefffffa36fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN11_GA2_DEFAULT 0xc0010fefffffb3efULL,0xfffffec000000033ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL +#define S390_FEAT_LIST_GEN11_GA2_FULL 0xc0010fefffffb7efULL,0xfffffefff9800033ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL +#define S390_FEAT_LIST_GEN12_GA1_BASE 0x0001bfefffffa36fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN12_GA1_DEFAULT 0xed01ffefffffb3efULL,0xfffffec001000137ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL +#define S390_FEAT_LIST_GEN12_GA1_FULL 0xff01ffefffffffefULL,0xfffffffff9800137ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL +#define S390_FEAT_LIST_GEN12_GA2_BASE 0x0001bfefffffa36fULL,0xfffffe4000000000ULL,0x802000e00700710fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN12_GA2_DEFAULT 0xed01ffefffffb3efULL,0xfffffec001000137ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000007fULL +#define S390_FEAT_LIST_GEN12_GA2_FULL 0xff09ffefffffffefULL,0xfffffffff9800137ULL,0xc0fffffffffff10fULL,0xfffffffffff83f03ULL,0x000000000000407fULL +#define S390_FEAT_LIST_GEN13_GA1_BASE 0x0003bfefffffa36fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN13_GA1_DEFAULT 0xed0bffefffffb3efULL,0xfffffec00100017fULL,0xc0fffffffffff33fULL,0xfffffffffff83f03ULL,0x000000000000407fULL +#define S390_FEAT_LIST_GEN13_GA1_FULL 0xff0bffefffffffefULL,0xfffffffff980017fULL,0xc0fffffffffff33fULL,0xfffffffffff83f03ULL,0x000000000000407fULL +#define S390_FEAT_LIST_GEN13_GA2_BASE 0x0003bfefffffa36fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN13_GA2_DEFAULT 0xed0bffefffffb3efULL,0xfffffec00100017fULL,0xc0fffffffffff33fULL,0xfffffffffff83f03ULL,0x000000000000407fULL +#define S390_FEAT_LIST_GEN13_GA2_FULL 0xff0bffefffffffefULL,0xfffffffff980017fULL,0xc0fffffffffff33fULL,0xfffffffffff83f03ULL,0x000000000000407fULL +#define S390_FEAT_LIST_GEN14_GA1_BASE 0x0077bfffffffa36fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN14_GA1_DEFAULT 0xed7fffffffffb3efULL,0xfffffec001009fffULL,0xffffffffffffffffULL,0xfffffffffff83fffULL,0x00000000007fc07fULL +#define S390_FEAT_LIST_GEN14_GA1_FULL 0xff7fffffffffffefULL,0xffffffffffc1ffffULL,0xffffffffffffffffULL,0xfffffffffff83fffULL,0x00000000007fc07fULL +#define S390_FEAT_LIST_GEN14_GA2_BASE 0x0077bfffffffa36fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN14_GA2_DEFAULT 0xed7fffffffffb3efULL,0xfffffec001009fffULL,0xffffffffffffffffULL,0xfffffffffff83fffULL,0x00000000007fc07fULL +#define S390_FEAT_LIST_GEN14_GA2_FULL 0xff7fffffffffffefULL,0xffffffffffc1ffffULL,0xffffffffffffffffULL,0xfffffffffff83fffULL,0x00000000007fc07fULL +#define S390_FEAT_LIST_GEN15_GA1_BASE 0x00f7bfffffffa16fULL,0xfffffe4000000008ULL,0x802000e00700733fULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_GEN15_GA1_DEFAULT 0xedffffffffffb1efULL,0xfffffec0017a9fdfULL,0xffffffffffffffffULL,0xffffffffffffffffULL,0x0000783fffffffffULL +#define S390_FEAT_LIST_GEN15_GA1_FULL 0xffffffffffffffefULL,0xffffffffffffffffULL,0xffffffffffffffffULL,0xffffffffffffffffULL,0x00007fffffffffffULL + +/* CPU feature group list data */ +#define S390_FEAT_GROUP_LIST_PLO 0x0000000000000000ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_TOD_CLOCK_STEERING 0x0000000008000000ULL,0x0000000000000000ULL,0x000000000000010eULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_GEN13_PTFF 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000230ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_MSA 0x0000000000010000ULL,0x0000000000000000ULL,0x802000e007007000ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_1 0x0000000000000000ULL,0x0000000000000000ULL,0x0040080040000000ULL,0x0000000000000001ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_2 0x0000000000000000ULL,0x0000000000000000ULL,0x0080301180000000ULL,0x0000000000000002ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_3 0x0000000000000000ULL,0x0000000000000001ULL,0x0001c70e38038000ULL,0x0000000000003f00ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_4 0x0000000000000000ULL,0x0000000000000002ULL,0x401e000000fc0000ULL,0xfffffffffff80000ULL,0x000000000000007fULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_5 0x0008000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000004000ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_6 0x0000000000000000ULL,0x0000000000000000ULL,0x3f00000000000000ULL,0x00000000000000fcULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_7 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000018000ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_8 0x0000000000000000ULL,0x0000000000008000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x00000000007e0000ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_9 0x0000000000000000ULL,0x0000000000200000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000003fff803f80ULL +#define S390_FEAT_GROUP_LIST_MSA_EXT_9_PCKMO 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x000000000007c000ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_MULTIPLE_EPOCH_PTFF 0x0000000000000000ULL,0x0000000000000000ULL,0x0000000000000cc0ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_GROUP_LIST_ENH_SORT 0x0000000000000000ULL,0x0000000000040000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x000007c000000000ULL +#define S390_FEAT_GROUP_LIST_DEFLATE_CONVERSION 0x0000000000000000ULL,0x0000000000080000ULL,0x0000000000000000ULL,0x0000000000000000ULL,0x0000780000000000ULL + +/* QEMU (CPU model) feature list data */ +#define S390_FEAT_LIST_QEMU_V2_11 0x0000000000000003ULL,0xfffffe0000000000ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_QEMU_V3_1 0x1801a463f5b7814fULL,0xfffffe0000000003ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_QEMU_V4_0 0x1c01a46bf5b7814fULL,0xfffffe0000000003ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_QEMU_V4_1 0x1c03a46bf5b7814fULL,0xfffffe0000000043ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_QEMU_LATEST 0x9c03a46bf5b7814fULL,0xfffffe0001000143ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL +#define S390_FEAT_LIST_QEMU_MAX 0x9c0ba46bf5b7814fULL,0xfffffe00010001c3ULL,0x0000000000000001ULL,0x0000000000000000ULL,0x0000000000000000ULL + +/* CPU feature group enum type */ +typedef enum { + S390_FEAT_GROUP_PLO, + S390_FEAT_GROUP_TOD_CLOCK_STEERING, + S390_FEAT_GROUP_GEN13_PTFF, + S390_FEAT_GROUP_MSA, + S390_FEAT_GROUP_MSA_EXT_1, + S390_FEAT_GROUP_MSA_EXT_2, + S390_FEAT_GROUP_MSA_EXT_3, + S390_FEAT_GROUP_MSA_EXT_4, + S390_FEAT_GROUP_MSA_EXT_5, + S390_FEAT_GROUP_MSA_EXT_6, + S390_FEAT_GROUP_MSA_EXT_7, + S390_FEAT_GROUP_MSA_EXT_8, + S390_FEAT_GROUP_MSA_EXT_9, + S390_FEAT_GROUP_MSA_EXT_9_PCKMO, + S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF, + S390_FEAT_GROUP_ENH_SORT, + S390_FEAT_GROUP_DEFLATE_CONVERSION, + S390_FEAT_GROUP_MAX, +} S390FeatGroup; + +#endif diff --git a/qemu/target/s390x/helper.c b/qemu/target/s390x/helper.c new file mode 100644 index 00000000..da06c624 --- /dev/null +++ b/qemu/target/s390x/helper.c @@ -0,0 +1,358 @@ +/* + * S/390 helpers + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2011 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/timer.h" +#include "hw/s390x/ioinst.h" +#include "sysemu/tcg.h" + +void s390x_tod_timer(void *opaque) +{ + cpu_inject_clock_comparator((S390CPU *) opaque); +} + +void s390x_cpu_timer(void *opaque) +{ + cpu_inject_cpu_timer((S390CPU *) opaque); +} + +hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr) +{ + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + target_ulong raddr; + int prot; + uint64_t asc = env->psw.mask & PSW_MASK_ASC; + uint64_t tec; + + /* 31-Bit mode */ + if (!(env->psw.mask & PSW_MASK_64)) { + vaddr &= 0x7fffffff; + } + + /* We want to read the code (e.g., see what we are single-stepping).*/ + if (asc != PSW_ASC_HOME) { + asc = PSW_ASC_PRIMARY; + } + + /* + * We want to read code even if IEP is active. Use MMU_DATA_LOAD instead + * of MMU_INST_FETCH. + */ + if (mmu_translate(env, vaddr, MMU_DATA_LOAD, asc, &raddr, &prot, &tec)) { + return -1; + } + return raddr; +} + +hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr) +{ + hwaddr phys_addr; + target_ulong page; + + page = vaddr & TARGET_PAGE_MASK; + phys_addr = cpu_get_phys_page_debug(cs, page); + phys_addr += (vaddr & ~TARGET_PAGE_MASK); + + return phys_addr; +} + +static inline bool is_special_wait_psw(uint64_t psw_addr) +{ + /* signal quiesce */ + return (psw_addr & 0xfffUL) == 0xfffUL; +} + +void s390_handle_wait(S390CPU *cpu) +{ +#if 0 + CPUState *cs = CPU(cpu); + + if (s390_cpu_halt(cpu) == 0) { + if (is_special_wait_psw(cpu->env.psw.addr)) { + // qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } else { + cpu->env.crash_reason = S390_CRASH_REASON_DISABLED_WAIT; + qemu_system_guest_panicked(cpu_get_crash_info(cs)); + } + } +#endif +} + +void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) +{ + uint64_t old_mask = env->psw.mask; + + env->psw.addr = addr; + env->psw.mask = mask; + + env->cc_op = (mask >> 44) & 3; + + if ((old_mask ^ mask) & PSW_MASK_PER) { + s390_cpu_recompute_watchpoints(env_cpu(env)); + } + + if (mask & PSW_MASK_WAIT) { + s390_handle_wait(env_archcpu(env)); + } +} + +uint64_t get_psw_mask(CPUS390XState *env) +{ + uint64_t r = env->psw.mask; + + env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, + env->cc_vr); + + r &= ~PSW_MASK_CC; + assert(!(env->cc_op & ~3)); + r |= (uint64_t)env->cc_op << 44; + + return r; +} + +LowCore *cpu_map_lowcore(CPUS390XState *env) +{ + LowCore *lowcore; + hwaddr len = sizeof(LowCore); + + lowcore = cpu_physical_memory_map(env_cpu(env)->as, env->psa, &len, true); + + if (len < sizeof(LowCore)) { + cpu_abort(env_cpu(env), "Could not map lowcore\n"); + } + + return lowcore; +} + +void cpu_unmap_lowcore(CPUS390XState *env, LowCore *lowcore) +{ + cpu_physical_memory_unmap(env_cpu(env)->as, lowcore, sizeof(LowCore), 1, sizeof(LowCore)); +} + +void do_restart_interrupt(CPUS390XState *env) +{ + uint64_t mask, addr; + LowCore *lowcore; + + lowcore = cpu_map_lowcore(env); + + lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env)); + lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr); + mask = be64_to_cpu(lowcore->restart_new_psw.mask); + addr = be64_to_cpu(lowcore->restart_new_psw.addr); + + cpu_unmap_lowcore(env, lowcore); + env->pending_int &= ~INTERRUPT_RESTART; + + load_psw(env, mask, addr); +} + +void s390_cpu_recompute_watchpoints(CPUState *cs) +{ + const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS; + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + + /* We are called when the watchpoints have changed. First + remove them all. */ + cpu_watchpoint_remove_all(cs, BP_CPU); + + /* Return if PER is not enabled */ + if (!(env->psw.mask & PSW_MASK_PER)) { + return; + } + + /* Return if storage-alteration event is not enabled. */ + if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) { + return; + } + + if (env->cregs[10] == 0 && env->cregs[11] == -1LL) { + /* We can't create a watchoint spanning the whole memory range, so + split it in two parts. */ + cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL); + cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL); + } else if (env->cregs[10] > env->cregs[11]) { + /* The address range loops, create two watchpoints. */ + cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10], + wp_flags, NULL); + cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL); + + } else { + /* Default case, create a single watchpoint. */ + cpu_watchpoint_insert(cs, env->cregs[10], + env->cregs[11] - env->cregs[10] + 1, + wp_flags, NULL); + } +} + +typedef struct SigpSaveArea { + uint64_t fprs[16]; /* 0x0000 */ + uint64_t grs[16]; /* 0x0080 */ + PSW psw; /* 0x0100 */ + uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */ + uint32_t prefix; /* 0x0118 */ + uint32_t fpc; /* 0x011c */ + uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */ + uint32_t todpr; /* 0x0124 */ + uint64_t cputm; /* 0x0128 */ + uint64_t ckc; /* 0x0130 */ + uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */ + uint32_t ars[16]; /* 0x0140 */ + uint64_t crs[16]; /* 0x0384 */ +} SigpSaveArea; +QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512); + +int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch) +{ + static const uint8_t ar_id = 1; + SigpSaveArea *sa; + hwaddr len = sizeof(*sa); + int i; + + sa = cpu_physical_memory_map(CPU(cpu)->as, addr, &len, true); + if (!sa) { + return -EFAULT; + } + if (len != sizeof(*sa)) { + cpu_physical_memory_unmap(CPU(cpu)->as, sa, len, 1, 0); + return -EFAULT; + } + + if (store_arch) { + cpu_physical_memory_write(CPU(cpu)->as, offsetof(LowCore, ar_access_id), &ar_id, 1); + } + for (i = 0; i < 16; ++i) { + sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i)); + } + for (i = 0; i < 16; ++i) { + sa->grs[i] = cpu_to_be64(cpu->env.regs[i]); + } + sa->psw.addr = cpu_to_be64(cpu->env.psw.addr); + sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env)); + sa->prefix = cpu_to_be32(cpu->env.psa); + sa->fpc = cpu_to_be32(cpu->env.fpc); + sa->todpr = cpu_to_be32(cpu->env.todpr); + sa->cputm = cpu_to_be64(cpu->env.cputm); + sa->ckc = cpu_to_be64(cpu->env.ckc >> 8); + for (i = 0; i < 16; ++i) { + sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]); + } + for (i = 0; i < 16; ++i) { + sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]); + } + + cpu_physical_memory_unmap(CPU(cpu)->as, sa, len, 1, len); + + return 0; +} + +typedef struct SigpAdtlSaveArea { + uint64_t vregs[32][2]; /* 0x0000 */ + uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ + uint64_t gscb[4]; /* 0x0400 */ + uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */ +} SigpAdtlSaveArea; +QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096); + +#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */ +int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len) +{ + SigpAdtlSaveArea *sa; + hwaddr save = len; + int i; + + sa = cpu_physical_memory_map(CPU(cpu)->as, addr, &save, true); + if (!sa) { + return -EFAULT; + } + if (save != len) { + cpu_physical_memory_unmap(CPU(cpu)->as, sa, len, 1, 0); + return -EFAULT; + } + + if (s390_has_feat(cpu->env.uc, S390_FEAT_VECTOR)) { + for (i = 0; i < 32; i++) { + sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]); + sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]); + } + } + if (s390_has_feat(cpu->env.uc, S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) { + for (i = 0; i < 4; i++) { + sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]); + } + } + + cpu_physical_memory_unmap(CPU(cpu)->as, sa, len, 1, len); + return 0; +} + +const char *cc_name(enum cc_op cc_op) +{ + static const char * const cc_names[] = { + [CC_OP_CONST0] = "CC_OP_CONST0", + [CC_OP_CONST1] = "CC_OP_CONST1", + [CC_OP_CONST2] = "CC_OP_CONST2", + [CC_OP_CONST3] = "CC_OP_CONST3", + [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC", + [CC_OP_STATIC] = "CC_OP_STATIC", + [CC_OP_NZ] = "CC_OP_NZ", + [CC_OP_LTGT_32] = "CC_OP_LTGT_32", + [CC_OP_LTGT_64] = "CC_OP_LTGT_64", + [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32", + [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64", + [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32", + [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64", + [CC_OP_ADD_64] = "CC_OP_ADD_64", + [CC_OP_ADDU_64] = "CC_OP_ADDU_64", + [CC_OP_ADDC_64] = "CC_OP_ADDC_64", + [CC_OP_SUB_64] = "CC_OP_SUB_64", + [CC_OP_SUBU_64] = "CC_OP_SUBU_64", + [CC_OP_SUBB_64] = "CC_OP_SUBB_64", + [CC_OP_ABS_64] = "CC_OP_ABS_64", + [CC_OP_NABS_64] = "CC_OP_NABS_64", + [CC_OP_ADD_32] = "CC_OP_ADD_32", + [CC_OP_ADDU_32] = "CC_OP_ADDU_32", + [CC_OP_ADDC_32] = "CC_OP_ADDC_32", + [CC_OP_SUB_32] = "CC_OP_SUB_32", + [CC_OP_SUBU_32] = "CC_OP_SUBU_32", + [CC_OP_SUBB_32] = "CC_OP_SUBB_32", + [CC_OP_ABS_32] = "CC_OP_ABS_32", + [CC_OP_NABS_32] = "CC_OP_NABS_32", + [CC_OP_COMP_32] = "CC_OP_COMP_32", + [CC_OP_COMP_64] = "CC_OP_COMP_64", + [CC_OP_TM_32] = "CC_OP_TM_32", + [CC_OP_TM_64] = "CC_OP_TM_64", + [CC_OP_NZ_F32] = "CC_OP_NZ_F32", + [CC_OP_NZ_F64] = "CC_OP_NZ_F64", + [CC_OP_NZ_F128] = "CC_OP_NZ_F128", + [CC_OP_ICM] = "CC_OP_ICM", + [CC_OP_SLA_32] = "CC_OP_SLA_32", + [CC_OP_SLA_64] = "CC_OP_SLA_64", + [CC_OP_FLOGR] = "CC_OP_FLOGR", + [CC_OP_LCBB] = "CC_OP_LCBB", + [CC_OP_VC] = "CC_OP_VC", + }; + + return cc_names[cc_op]; +} diff --git a/qemu/target/s390x/helper.h b/qemu/target/s390x/helper.h new file mode 100644 index 00000000..43eabb81 --- /dev/null +++ b/qemu/target/s390x/helper.h @@ -0,0 +1,358 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) + +DEF_HELPER_2(exception, noreturn, env, i32) +DEF_HELPER_2(data_exception, noreturn, env, i32) +DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(mvc, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(mvcin, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(clc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) +DEF_HELPER_3(mvcl, i32, env, i32, i32) +DEF_HELPER_3(clcl, i32, env, i32, i32) +DEF_HELPER_FLAGS_4(clm, TCG_CALL_NO_WG, i32, env, i32, i32, i64) +DEF_HELPER_FLAGS_3(divs32, TCG_CALL_NO_WG, s64, env, s64, s64) +DEF_HELPER_FLAGS_3(divu32, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(divs64, TCG_CALL_NO_WG, s64, env, s64, s64) +DEF_HELPER_FLAGS_4(divu64, TCG_CALL_NO_WG, i64, env, i64, i64, i64) +DEF_HELPER_3(srst, void, env, i32, i32) +DEF_HELPER_3(srstu, void, env, i32, i32) +DEF_HELPER_4(clst, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(mvn, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(mvo, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(mvpg, TCG_CALL_NO_WG, i32, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(mvz, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_3(mvst, i32, env, i32, i32) +DEF_HELPER_4(ex, void, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(stam, TCG_CALL_NO_WG, void, env, i32, i64, i32) +DEF_HELPER_FLAGS_4(lam, TCG_CALL_NO_WG, void, env, i32, i64, i32) +DEF_HELPER_4(mvcle, i32, env, i32, i64, i32) +DEF_HELPER_4(mvclu, i32, env, i32, i64, i32) +DEF_HELPER_4(clcle, i32, env, i32, i64, i32) +DEF_HELPER_4(clclu, i32, env, i32, i64, i32) +DEF_HELPER_3(cegb, i64, env, s64, i32) +DEF_HELPER_3(cdgb, i64, env, s64, i32) +DEF_HELPER_3(cxgb, i64, env, s64, i32) +DEF_HELPER_3(celgb, i64, env, i64, i32) +DEF_HELPER_3(cdlgb, i64, env, i64, i32) +DEF_HELPER_3(cxlgb, i64, env, i64, i32) +DEF_HELPER_4(cdsg, void, env, i64, i32, i32) +DEF_HELPER_4(cdsg_parallel, void, env, i64, i32, i32) +DEF_HELPER_4(csst, i32, env, i32, i64, i64) +DEF_HELPER_4(csst_parallel, i32, env, i32, i64, i64) +DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_3(seb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(sdb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_5(sxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_3(deb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(ddb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_5(dxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_3(meeb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(mdeb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(mdb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_5(mxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_4(mxdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_2(ldeb, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_4(ldxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +DEF_HELPER_FLAGS_2(lxdb, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(lxeb, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_3(ledb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_4(lexb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +DEF_HELPER_FLAGS_3(ceb, TCG_CALL_NO_WG_SE, i32, env, i64, i64) +DEF_HELPER_FLAGS_3(cdb, TCG_CALL_NO_WG_SE, i32, env, i64, i64) +DEF_HELPER_FLAGS_5(cxb, TCG_CALL_NO_WG_SE, i32, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_3(keb, TCG_CALL_NO_WG, i32, env, i64, i64) +DEF_HELPER_FLAGS_3(kdb, TCG_CALL_NO_WG, i32, env, i64, i64) +DEF_HELPER_FLAGS_5(kxb, TCG_CALL_NO_WG, i32, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_3(cgeb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(cgdb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_4(cgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +DEF_HELPER_FLAGS_3(cfeb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(cfdb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_4(cfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +DEF_HELPER_FLAGS_3(clgeb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(clgdb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_4(clgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +DEF_HELPER_FLAGS_3(clfeb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(clfdb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_4(clfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +DEF_HELPER_FLAGS_3(fieb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_3(fidb, TCG_CALL_NO_WG, i64, env, i64, i32) +DEF_HELPER_FLAGS_4(fixb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +DEF_HELPER_FLAGS_4(maeb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(madb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(mseb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(msdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_3(tceb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64) +DEF_HELPER_FLAGS_3(tcdb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64) +DEF_HELPER_FLAGS_4(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64, i64) +DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_3(sqxb, TCG_CALL_NO_WG, i64, env, i64, i64) +DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32) +DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(pka, TCG_CALL_NO_WG, void, env, i64, i64, i32) +DEF_HELPER_FLAGS_4(pku, TCG_CALL_NO_WG, void, env, i64, i64, i32) +DEF_HELPER_FLAGS_4(unpk, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_FLAGS_4(unpka, TCG_CALL_NO_WG, i32, env, i64, i32, i64) +DEF_HELPER_FLAGS_4(unpku, TCG_CALL_NO_WG, i32, env, i64, i32, i64) +DEF_HELPER_FLAGS_3(tp, TCG_CALL_NO_WG, i32, env, i64, i32) +DEF_HELPER_FLAGS_4(tr, TCG_CALL_NO_WG, void, env, i32, i64, i64) +DEF_HELPER_4(tre, i64, env, i64, i64, i64) +DEF_HELPER_4(trt, i32, env, i32, i64, i64) +DEF_HELPER_4(trtr, i32, env, i32, i64, i64) +DEF_HELPER_5(trXX, i32, env, i32, i32, i32, i32) +DEF_HELPER_4(cksm, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64) +DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_WG, void, env, i64) +DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64) +DEF_HELPER_FLAGS_2(srnm, TCG_CALL_NO_WG, void, env, i64) +DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_2(stfle, i32, env, i64) +DEF_HELPER_FLAGS_2(lpq, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_2(lpq_parallel, TCG_CALL_NO_WG, i64, env, i64) +DEF_HELPER_FLAGS_4(stpq, TCG_CALL_NO_WG, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(stpq_parallel, TCG_CALL_NO_WG, void, env, i64, i64, i64) +DEF_HELPER_4(mvcos, i32, env, i64, i64, i64) +DEF_HELPER_4(cu12, i32, env, i32, i32, i32) +DEF_HELPER_4(cu14, i32, env, i32, i32, i32) +DEF_HELPER_4(cu21, i32, env, i32, i32, i32) +DEF_HELPER_4(cu24, i32, env, i32, i32, i32) +DEF_HELPER_4(cu41, i32, env, i32, i32, i32) +DEF_HELPER_4(cu42, i32, env, i32, i32, i32) +DEF_HELPER_5(msa, i32, env, i32, i32, i32, i32) +DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env) +DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env) +DEF_HELPER_FLAGS_3(probe_write_access, TCG_CALL_NO_WG, void, env, i64, i64) + +/* === Vector Support Instructions === */ +DEF_HELPER_FLAGS_4(vll, TCG_CALL_NO_WG, void, env, ptr, i64, i64) +DEF_HELPER_FLAGS_4(gvec_vpk16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vpk32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vpk64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vpks16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vpks32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vpks64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_5(gvec_vpks_cc16, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vpks_cc32, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vpks_cc64, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vpkls16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vpkls32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vpkls64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_5(gvec_vpkls_cc16, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vpkls_cc32, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vpkls_cc64, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vperm, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(vstl, TCG_CALL_NO_WG, void, env, cptr, i64, i64) + +/* === Vector Integer Instructions === */ +DEF_HELPER_FLAGS_4(gvec_vavg8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vavg16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vavgl8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vavgl16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_3(gvec_vclz8, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_FLAGS_3(gvec_vclz16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_FLAGS_3(gvec_vctz8, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_FLAGS_3(gvec_vctz16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vgfm8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vgfm16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vgfm32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vgfm64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vgfma8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vgfma16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vgfma32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vgfma64, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmal8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmal16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmah8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmah16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmalh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmalh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmae8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmae16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmae32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmale8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmale16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmale32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmao8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmao16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmao32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmalo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmalo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vmalo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmlh8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmlh16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vme8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vme16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vme32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmle8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmle16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmle32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmlo8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmlo16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vmlo32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_3(gvec_vpopct8, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_FLAGS_3(gvec_vpopct16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_verllv8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_verllv16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_verll8, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_verll16, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_verim8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_verim16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vsl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_vsra, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_vsrl, TCG_CALL_NO_RWG, void, ptr, cptr, i64, i32) +DEF_HELPER_FLAGS_4(gvec_vscbi8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vscbi16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_4(gvec_vtm, void, ptr, cptr, env, i32) + +/* === Vector String Instructions === */ +DEF_HELPER_FLAGS_4(gvec_vfae8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vfae16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vfae32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_5(gvec_vfae_cc8, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfae_cc16, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfae_cc32, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfee8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vfee16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vfee32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_5(gvec_vfee_cc8, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfee_cc16, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfee_cc32, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfene8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vfene16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_4(gvec_vfene32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, i32) +DEF_HELPER_5(gvec_vfene_cc8, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfene_cc16, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfene_cc32, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_3(gvec_vistr8, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_FLAGS_3(gvec_vistr16, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_FLAGS_3(gvec_vistr32, TCG_CALL_NO_RWG, void, ptr, cptr, i32) +DEF_HELPER_4(gvec_vistr_cc8, void, ptr, cptr, env, i32) +DEF_HELPER_4(gvec_vistr_cc16, void, ptr, cptr, env, i32) +DEF_HELPER_4(gvec_vistr_cc32, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vstrc8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vstrc16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vstrc32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vstrc_rt8, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vstrc_rt16, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_FLAGS_5(gvec_vstrc_rt32, TCG_CALL_NO_RWG, void, ptr, cptr, cptr, cptr, i32) +DEF_HELPER_6(gvec_vstrc_cc8, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrc_cc16, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrc_cc32, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrc_cc_rt8, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrc_cc_rt16, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_6(gvec_vstrc_cc_rt32, void, ptr, cptr, cptr, cptr, env, i32) + +/* === Vector Floating-Point Instructions */ +DEF_HELPER_FLAGS_5(gvec_vfa64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfa64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_4(gvec_wfc64, void, cptr, cptr, env, i32) +DEF_HELPER_4(gvec_wfk64, void, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfce64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfce64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfce64_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfce64s_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfch64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfch64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfch64_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfch64s_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfche64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfche64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfche64_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_5(gvec_vfche64s_cc, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcdg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcdg64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcdlg64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcdlg64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vcgd64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vclgd64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vclgd64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfd64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfd64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfi64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfi64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfll32, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfll32s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vflr64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vflr64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfm64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfm64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfma64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfma64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfms64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_6(gvec_vfms64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfsq64, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_4(gvec_vfsq64s, TCG_CALL_NO_WG, void, ptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfs64, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_FLAGS_5(gvec_vfs64s, TCG_CALL_NO_WG, void, ptr, cptr, cptr, env, i32) +DEF_HELPER_4(gvec_vftci64, void, ptr, cptr, env, i32) +DEF_HELPER_4(gvec_vftci64s, void, ptr, cptr, env, i32) + +#ifndef CONFIG_USER_ONLY +DEF_HELPER_3(servc, i32, env, i64, i64) +DEF_HELPER_4(diag, void, env, i32, i32, i32) +DEF_HELPER_3(load_psw, noreturn, env, i64, i64) +DEF_HELPER_FLAGS_2(spx, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_FLAGS_2(sck, TCG_CALL_NO_RWG, i32, env, i64) +DEF_HELPER_FLAGS_2(sckc, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_FLAGS_2(sckpf, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_FLAGS_1(stckc, TCG_CALL_NO_RWG, i64, env) +DEF_HELPER_FLAGS_2(spt, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_4(stsi, i32, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32) +DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32) +DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32) +DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32) +DEF_HELPER_FLAGS_2(testblock, TCG_CALL_NO_WG, i32, env, i64) +DEF_HELPER_FLAGS_3(tprot, TCG_CALL_NO_WG, i32, env, i64, i64) +DEF_HELPER_FLAGS_2(iske, TCG_CALL_NO_RWG_SE, i64, env, i64) +DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64) +DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64) +DEF_HELPER_4(mvcs, i32, env, i64, i64, i64) +DEF_HELPER_4(mvcp, i32, env, i64, i64, i64) +DEF_HELPER_4(sigp, i32, env, i64, i32, i32) +DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64) +DEF_HELPER_FLAGS_4(idte, TCG_CALL_NO_RWG, void, env, i64, i64, i32) +DEF_HELPER_FLAGS_4(ipte, TCG_CALL_NO_RWG, void, env, i64, i64, i32) +DEF_HELPER_FLAGS_1(ptlb, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_1(purge, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_2(lra, i64, env, i64) +DEF_HELPER_1(per_check_exception, void, env) +DEF_HELPER_FLAGS_3(per_branch, TCG_CALL_NO_RWG, void, env, i64, i64) +DEF_HELPER_FLAGS_2(per_ifetch, TCG_CALL_NO_RWG, void, env, i64) +DEF_HELPER_FLAGS_1(per_store_real, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_1(stfl, TCG_CALL_NO_RWG, void, env) + +DEF_HELPER_2(xsch, void, env, i64) +DEF_HELPER_2(csch, void, env, i64) +DEF_HELPER_2(hsch, void, env, i64) +DEF_HELPER_3(msch, void, env, i64, i64) +DEF_HELPER_2(rchp, void, env, i64) +DEF_HELPER_2(rsch, void, env, i64) +DEF_HELPER_2(sal, void, env, i64) +DEF_HELPER_4(schm, void, env, i64, i64, i64) +DEF_HELPER_3(ssch, void, env, i64, i64) +DEF_HELPER_2(stcrw, void, env, i64) +DEF_HELPER_3(stsch, void, env, i64, i64) +DEF_HELPER_2(tpi, i32, env, i64) +DEF_HELPER_3(tsch, void, env, i64, i64) +DEF_HELPER_2(chsc, void, env, i64) + +DEF_HELPER_2(clp, void, env, i32) +DEF_HELPER_3(pcilg, void, env, i32, i32) +DEF_HELPER_3(pcistg, void, env, i32, i32) +DEF_HELPER_4(stpcifc, void, env, i32, i64, i32) +DEF_HELPER_3(sic, void, env, i64, i64) +DEF_HELPER_3(rpcit, void, env, i32, i32) +DEF_HELPER_5(pcistb, void, env, i32, i32, i64, i32) +DEF_HELPER_4(mpcifc, void, env, i32, i64, i32) +#endif diff --git a/qemu/target/s390x/insn-data.def b/qemu/target/s390x/insn-data.def new file mode 100644 index 00000000..2bc77f08 --- /dev/null +++ b/qemu/target/s390x/insn-data.def @@ -0,0 +1,1371 @@ +/* + * Arguments to the opcode prototypes + * + * C(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC) + * D(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA) + * E(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA, FLAGS) + * F(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, FLAGS) + * + * OPC = (op << 8) | op2 where op is the major, op2 the minor opcode + * NAME = name of the opcode, used internally + * FMT = format of the opcode (defined in insn-format.def) + * FAC = facility the opcode is available in (defined in DisasFacility) + * I1 = func in1_xx fills o->in1 + * I2 = func in2_xx fills o->in2 + * P = func prep_xx initializes o->*out* + * W = func wout_xx writes o->*out* somewhere + * OP = func op_xx does the bulk of the operation + * CC = func cout_xx defines how cc should get set + * DATA = immediate argument to op_xx function + * FLAGS = categorize the type of instruction (e.g. for advanced checks) + * + * The helpers get called in order: I1, I2, P, OP, W, CC + */ + +/* ADD */ + C(0x1a00, AR, RR_a, Z, r1, r2, new, r1_32, add, adds32) + C(0xb9f8, ARK, RRF_a, DO, r2, r3, new, r1_32, add, adds32) + C(0x5a00, A, RX_a, Z, r1, m2_32s, new, r1_32, add, adds32) + C(0xe35a, AY, RXY_a, LD, r1, m2_32s, new, r1_32, add, adds32) + C(0xb908, AGR, RRE, Z, r1, r2, r1, 0, add, adds64) + C(0xb918, AGFR, RRE, Z, r1, r2_32s, r1, 0, add, adds64) + C(0xb9e8, AGRK, RRF_a, DO, r2, r3, r1, 0, add, adds64) + C(0xe308, AG, RXY_a, Z, r1, m2_64, r1, 0, add, adds64) + C(0xe318, AGF, RXY_a, Z, r1, m2_32s, r1, 0, add, adds64) + F(0xb30a, AEBR, RRE, Z, e1, e2, new, e1, aeb, f32, IF_BFP) + F(0xb31a, ADBR, RRE, Z, f1, f2, new, f1, adb, f64, IF_BFP) + F(0xb34a, AXBR, RRE, Z, x2h, x2l, x1, x1, axb, f128, IF_BFP) + F(0xed0a, AEB, RXE, Z, e1, m2_32u, new, e1, aeb, f32, IF_BFP) + F(0xed1a, ADB, RXE, Z, f1, m2_64, new, f1, adb, f64, IF_BFP) +/* ADD HIGH */ + C(0xb9c8, AHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, adds32) + C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32) +/* ADD IMMEDIATE */ + C(0xc209, AFI, RIL_a, EI, r1, i2, new, r1_32, add, adds32) + D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL) + C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32) + C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64) + D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ) + C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64) +/* ADD IMMEDIATE HIGH */ + C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32) +/* ADD HALFWORD */ + C(0x4a00, AH, RX_a, Z, r1, m2_16s, new, r1_32, add, adds32) + C(0xe37a, AHY, RXY_a, LD, r1, m2_16s, new, r1_32, add, adds32) +/* ADD HALFWORD IMMEDIATE */ + C(0xa70a, AHI, RI_a, Z, r1, i2, new, r1_32, add, adds32) + C(0xa70b, AGHI, RI_a, Z, r1, i2, r1, 0, add, adds64) + +/* ADD LOGICAL */ + C(0x1e00, ALR, RR_a, Z, r1, r2, new, r1_32, add, addu32) + C(0xb9fa, ALRK, RRF_a, DO, r2, r3, new, r1_32, add, addu32) + C(0x5e00, AL, RX_a, Z, r1, m2_32u, new, r1_32, add, addu32) + C(0xe35e, ALY, RXY_a, LD, r1, m2_32u, new, r1_32, add, addu32) + C(0xb90a, ALGR, RRE, Z, r1, r2, r1, 0, add, addu64) + C(0xb91a, ALGFR, RRE, Z, r1, r2_32u, r1, 0, add, addu64) + C(0xb9ea, ALGRK, RRF_a, DO, r2, r3, r1, 0, add, addu64) + C(0xe30a, ALG, RXY_a, Z, r1, m2_64, r1, 0, add, addu64) + C(0xe31a, ALGF, RXY_a, Z, r1, m2_32u, r1, 0, add, addu64) +/* ADD LOGICAL HIGH */ + C(0xb9ca, ALHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, addu32) + C(0xb9da, ALHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, addu32) +/* ADD LOGICAL IMMEDIATE */ + C(0xc20b, ALFI, RIL_a, EI, r1, i2_32u, new, r1_32, add, addu32) + C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, add, addu64) +/* ADD LOGICAL WITH SIGNED IMMEDIATE */ + D(0xeb6e, ALSI, SIY, GIE, la1, i2, new, 0, asi, addu32, MO_TEUL) + C(0xecda, ALHSIK, RIE_d, DO, r3, i2, new, r1_32, add, addu32) + D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asi, addu64, MO_TEQ) + C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, add, addu64) +/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */ + C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, addu32) + C(0xcc0b, ALSIHN, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, 0) +/* ADD LOGICAL WITH CARRY */ + C(0xb998, ALCR, RRE, Z, r1, r2, new, r1_32, addc, addc32) + C(0xb988, ALCGR, RRE, Z, r1, r2, r1, 0, addc, addc64) + C(0xe398, ALC, RXY_a, Z, r1, m2_32u, new, r1_32, addc, addc32) + C(0xe388, ALCG, RXY_a, Z, r1, m2_64, r1, 0, addc, addc64) + +/* AND */ + C(0x1400, NR, RR_a, Z, r1, r2, new, r1_32, and, nz32) + C(0xb9f4, NRK, RRF_a, DO, r2, r3, new, r1_32, and, nz32) + C(0x5400, N, RX_a, Z, r1, m2_32s, new, r1_32, and, nz32) + C(0xe354, NY, RXY_a, LD, r1, m2_32s, new, r1_32, and, nz32) + C(0xb980, NGR, RRE, Z, r1, r2, r1, 0, and, nz64) + C(0xb9e4, NGRK, RRF_a, DO, r2, r3, r1, 0, and, nz64) + C(0xe380, NG, RXY_a, Z, r1, m2_64, r1, 0, and, nz64) + C(0xd400, NC, SS_a, Z, la1, a2, 0, 0, nc, 0) +/* AND IMMEDIATE */ + D(0xc00a, NIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2020) + D(0xc00b, NILF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2000) + D(0xa504, NIHH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1030) + D(0xa505, NIHL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1020) + D(0xa506, NILH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1010) + D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000) + D(0x9400, NI, SI, Z, la1, i2_8u, new, 0, ni, nz64, MO_UB) + D(0xeb54, NIY, SIY, LD, la1, i2_8u, new, 0, ni, nz64, MO_UB) + +/* BRANCH AND LINK */ + C(0x0500, BALR, RR_a, Z, 0, r2_nz, r1, 0, bal, 0) + C(0x4500, BAL, RX_a, Z, 0, a2, r1, 0, bal, 0) +/* BRANCH AND SAVE */ + C(0x0d00, BASR, RR_a, Z, 0, r2_nz, r1, 0, bas, 0) + C(0x4d00, BAS, RX_a, Z, 0, a2, r1, 0, bas, 0) +/* BRANCH RELATIVE AND SAVE */ + C(0xa705, BRAS, RI_b, Z, 0, 0, r1, 0, basi, 0) + C(0xc005, BRASL, RIL_b, Z, 0, 0, r1, 0, basi, 0) +/* BRANCH ON CONDITION */ + C(0x0700, BCR, RR_b, Z, 0, r2_nz, 0, 0, bc, 0) + C(0x4700, BC, RX_b, Z, 0, a2, 0, 0, bc, 0) +/* BRANCH RELATIVE ON CONDITION */ + C(0xa704, BRC, RI_c, Z, 0, 0, 0, 0, bc, 0) + C(0xc004, BRCL, RIL_c, Z, 0, 0, 0, 0, bc, 0) +/* BRANCH ON COUNT */ + C(0x0600, BCTR, RR_a, Z, 0, r2_nz, 0, 0, bct32, 0) + C(0xb946, BCTGR, RRE, Z, 0, r2_nz, 0, 0, bct64, 0) + C(0x4600, BCT, RX_a, Z, 0, a2, 0, 0, bct32, 0) + C(0xe346, BCTG, RXY_a, Z, 0, a2, 0, 0, bct64, 0) +/* BRANCH RELATIVE ON COUNT */ + C(0xa706, BRCT, RI_b, Z, 0, 0, 0, 0, bct32, 0) + C(0xa707, BRCTG, RI_b, Z, 0, 0, 0, 0, bct64, 0) +/* BRANCH RELATIVE ON COUNT HIGH */ + C(0xcc06, BRCTH, RIL_b, HW, 0, 0, 0, 0, bcth, 0) +/* BRANCH ON INDEX */ + D(0x8600, BXH, RS_a, Z, 0, a2, 0, 0, bx32, 0, 0) + D(0x8700, BXLE, RS_a, Z, 0, a2, 0, 0, bx32, 0, 1) + D(0xeb44, BXHG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 0) + D(0xeb45, BXLEG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 1) +/* BRANCH RELATIVE ON INDEX */ + D(0x8400, BRXH, RSI, Z, 0, 0, 0, 0, bx32, 0, 0) + D(0x8500, BRXLE, RSI, Z, 0, 0, 0, 0, bx32, 0, 1) + D(0xec44, BRXHG, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 0) + D(0xec45, BRXHLE, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 1) +/* BRANCH PREDICTION PRELOAD */ + /* ??? Format is SMI, but implemented as NOP, so we need no fields. */ + C(0xc700, BPP, E, EH, 0, 0, 0, 0, 0, 0) +/* BRANCH PREDICTION RELATIVE PRELOAD */ + /* ??? Format is MII, but implemented as NOP, so we need no fields. */ + C(0xc500, BPRP, E, EH, 0, 0, 0, 0, 0, 0) +/* NEXT INSTRUCTION ACCESS INTENT */ + /* ??? Format is IE, but implemented as NOP, so we need no fields. */ + C(0xb2fa, NIAI, E, EH, 0, 0, 0, 0, 0, 0) + +/* CHECKSUM */ + C(0xb241, CKSM, RRE, Z, r1_o, ra2, new, r1_32, cksm, 0) + +/* COPY SIGN */ + F(0xb372, CPSDR, RRF_b, FPSSH, f3, f2, new, f1, cps, 0, IF_AFP1 | IF_AFP2 | IF_AFP3) + +/* COMPARE */ + C(0x1900, CR, RR_a, Z, r1_o, r2_o, 0, 0, 0, cmps32) + C(0x5900, C, RX_a, Z, r1_o, m2_32s, 0, 0, 0, cmps32) + C(0xe359, CY, RXY_a, LD, r1_o, m2_32s, 0, 0, 0, cmps32) + C(0xb920, CGR, RRE, Z, r1_o, r2_o, 0, 0, 0, cmps64) + C(0xb930, CGFR, RRE, Z, r1_o, r2_32s, 0, 0, 0, cmps64) + C(0xe320, CG, RXY_a, Z, r1_o, m2_64, 0, 0, 0, cmps64) + C(0xe330, CGF, RXY_a, Z, r1_o, m2_32s, 0, 0, 0, cmps64) + F(0xb309, CEBR, RRE, Z, e1, e2, 0, 0, ceb, 0, IF_BFP) + F(0xb319, CDBR, RRE, Z, f1, f2, 0, 0, cdb, 0, IF_BFP) + F(0xb349, CXBR, RRE, Z, x2h, x2l, x1, 0, cxb, 0, IF_BFP) + F(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0, IF_BFP) + F(0xed19, CDB, RXE, Z, f1, m2_64, 0, 0, cdb, 0, IF_BFP) +/* COMPARE AND SIGNAL */ + F(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0, IF_BFP) + F(0xb318, KDBR, RRE, Z, f1, f2, 0, 0, kdb, 0, IF_BFP) + F(0xb348, KXBR, RRE, Z, x2h, x2l, x1, 0, kxb, 0, IF_BFP) + F(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0, IF_BFP) + F(0xed18, KDB, RXE, Z, f1, m2_64, 0, 0, kdb, 0, IF_BFP) +/* COMPARE IMMEDIATE */ + C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32) + C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64) +/* COMPARE RELATIVE LONG */ + C(0xc60d, CRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps32) + C(0xc608, CGRL, RIL_b, GIE, r1, mri2_64, 0, 0, 0, cmps64) + C(0xc60c, CGFRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps64) +/* COMPARE HALFWORD */ + C(0x4900, CH, RX_a, Z, r1_o, m2_16s, 0, 0, 0, cmps32) + C(0xe379, CHY, RXY_a, LD, r1_o, m2_16s, 0, 0, 0, cmps32) + C(0xe334, CGH, RXY_a, GIE, r1_o, m2_16s, 0, 0, 0, cmps64) +/* COMPARE HALFWORD IMMEDIATE */ + C(0xa70e, CHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps32) + C(0xa70f, CGHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps64) + C(0xe554, CHHSI, SIL, GIE, m1_16s, i2, 0, 0, 0, cmps64) + C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64) + C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64) +/* COMPARE HALFWORD RELATIVE LONG */ + C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32) + C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64) +/* COMPARE HIGH */ + C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32) + C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32) + C(0xe3cd, CHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmps32) +/* COMPARE IMMEDIATE HIGH */ + C(0xcc0d, CIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmps32) + +/* COMPARE LOGICAL */ + C(0x1500, CLR, RR_a, Z, r1, r2, 0, 0, 0, cmpu32) + C(0x5500, CL, RX_a, Z, r1, m2_32s, 0, 0, 0, cmpu32) + C(0xe355, CLY, RXY_a, LD, r1, m2_32s, 0, 0, 0, cmpu32) + C(0xb921, CLGR, RRE, Z, r1, r2, 0, 0, 0, cmpu64) + C(0xb931, CLGFR, RRE, Z, r1, r2_32u, 0, 0, 0, cmpu64) + C(0xe321, CLG, RXY_a, Z, r1, m2_64, 0, 0, 0, cmpu64) + C(0xe331, CLGF, RXY_a, Z, r1, m2_32u, 0, 0, 0, cmpu64) + C(0xd500, CLC, SS_a, Z, la1, a2, 0, 0, clc, 0) +/* COMPARE LOGICAL HIGH */ + C(0xb9cf, CLHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmpu32) + C(0xb9df, CLHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmpu32) + C(0xe3cf, CLHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmpu32) +/* COMPARE LOGICAL IMMEDIATE */ + C(0xc20f, CLFI, RIL_a, EI, r1, i2, 0, 0, 0, cmpu32) + C(0xc20e, CLGFI, RIL_a, EI, r1, i2_32u, 0, 0, 0, cmpu64) + C(0x9500, CLI, SI, Z, m1_8u, i2_8u, 0, 0, 0, cmpu64) + C(0xeb55, CLIY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, cmpu64) + C(0xe555, CLHHSI, SIL, GIE, m1_16u, i2_16u, 0, 0, 0, cmpu64) + C(0xe55d, CLFHSI, SIL, GIE, m1_32u, i2_16u, 0, 0, 0, cmpu64) + C(0xe559, CLGHSI, SIL, GIE, m1_64, i2_16u, 0, 0, 0, cmpu64) +/* COMPARE LOGICAL IMMEDIATE HIGH */ + C(0xcc0f, CLIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmpu32) +/* COMPARE LOGICAL RELATIVE LONG */ + C(0xc60f, CLRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu32) + C(0xc60a, CLGRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmpu64) + C(0xc60e, CLGFRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64) + C(0xc607, CLHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32) + C(0xc606, CLGHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64) +/* COMPARE LOGICAL LONG */ + C(0x0f00, CLCL, RR_a, Z, 0, 0, 0, 0, clcl, 0) +/* COMPARE LOGICAL LONG EXTENDED */ + C(0xa900, CLCLE, RS_a, Z, 0, a2, 0, 0, clcle, 0) +/* COMPARE LOGICAL LONG UNICODE */ + C(0xeb8f, CLCLU, RSY_a, E2, 0, a2, 0, 0, clclu, 0) +/* COMPARE LOGICAL CHARACTERS UNDER MASK */ + C(0xbd00, CLM, RS_b, Z, r1_o, a2, 0, 0, clm, 0) + C(0xeb21, CLMY, RSY_b, LD, r1_o, a2, 0, 0, clm, 0) + C(0xeb20, CLMH, RSY_b, Z, r1_sr32, a2, 0, 0, clm, 0) +/* COMPARE LOGICAL STRING */ + C(0xb25d, CLST, RRE, Z, r1_o, r2_o, 0, 0, clst, 0) + +/* COMPARE AND BRANCH */ + D(0xecf6, CRB, RRS, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0) + D(0xece4, CGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 0) + D(0xec76, CRJ, RIE_b, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0) + D(0xec64, CGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 0) + D(0xecfe, CIB, RIS, GIE, r1_32s, i2, 0, 0, cj, 0, 0) + D(0xecfc, CGIB, RIS, GIE, r1_o, i2, 0, 0, cj, 0, 0) + D(0xec7e, CIJ, RIE_c, GIE, r1_32s, i2, 0, 0, cj, 0, 0) + D(0xec7c, CGIJ, RIE_c, GIE, r1_o, i2, 0, 0, cj, 0, 0) +/* COMPARE LOGICAL AND BRANCH */ + D(0xecf7, CLRB, RRS, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1) + D(0xece5, CLGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 1) + D(0xec77, CLRJ, RIE_b, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1) + D(0xec65, CLGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 1) + D(0xecff, CLIB, RIS, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1) + D(0xecfd, CLGIB, RIS, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) + D(0xec7f, CLIJ, RIE_c, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1) + D(0xec7d, CLGIJ, RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) + +/* COMPARE AND SWAP */ + D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) + D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) + D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ) +/* COMPARE DOUBLE AND SWAP */ + D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) + D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) + C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0) +/* COMPARE AND SWAP AND STORE */ + C(0xc802, CSST, SSF, CASS, la1, a2, 0, 0, csst, 0) + +/* COMPARE AND TRAP */ + D(0xb972, CRT, RRF_c, GIE, r1_32s, r2_32s, 0, 0, ct, 0, 0) + D(0xb960, CGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 0) + D(0xec72, CIT, RIE_a, GIE, r1_32s, i2, 0, 0, ct, 0, 0) + D(0xec70, CGIT, RIE_a, GIE, r1_o, i2, 0, 0, ct, 0, 0) +/* COMPARE LOGICAL AND TRAP */ + D(0xb973, CLRT, RRF_c, GIE, r1_32u, r2_32u, 0, 0, ct, 0, 1) + D(0xb961, CLGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 1) + D(0xeb23, CLT, RSY_b, MIE, r1_32u, m2_32u, 0, 0, ct, 0, 1) + D(0xeb2b, CLGT, RSY_b, MIE, r1_o, m2_64, 0, 0, ct, 0, 1) + D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_32u, 0, 0, ct, 0, 1) + D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_32u, 0, 0, ct, 0, 1) + +/* CONVERT TO DECIMAL */ + C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0) + C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0) +/* CONVERT TO FIXED */ + F(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0, IF_BFP) + F(0xb399, CFDBR, RRF_e, Z, 0, f2, new, r1_32, cfdb, 0, IF_BFP) + F(0xb39a, CFXBR, RRF_e, Z, x2h, x2l, new, r1_32, cfxb, 0, IF_BFP) + F(0xb3a8, CGEBR, RRF_e, Z, 0, e2, r1, 0, cgeb, 0, IF_BFP) + F(0xb3a9, CGDBR, RRF_e, Z, 0, f2, r1, 0, cgdb, 0, IF_BFP) + F(0xb3aa, CGXBR, RRF_e, Z, x2h, x2l, r1, 0, cgxb, 0, IF_BFP) +/* CONVERT FROM FIXED */ + F(0xb394, CEFBR, RRF_e, Z, 0, r2_32s, new, e1, cegb, 0, IF_BFP) + F(0xb395, CDFBR, RRF_e, Z, 0, r2_32s, new, f1, cdgb, 0, IF_BFP) + F(0xb396, CXFBR, RRF_e, Z, 0, r2_32s, new_P, x1, cxgb, 0, IF_BFP) + F(0xb3a4, CEGBR, RRF_e, Z, 0, r2_o, new, e1, cegb, 0, IF_BFP) + F(0xb3a5, CDGBR, RRF_e, Z, 0, r2_o, new, f1, cdgb, 0, IF_BFP) + F(0xb3a6, CXGBR, RRF_e, Z, 0, r2_o, new_P, x1, cxgb, 0, IF_BFP) +/* CONVERT TO LOGICAL */ + F(0xb39c, CLFEBR, RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0, IF_BFP) + F(0xb39d, CLFDBR, RRF_e, FPE, 0, f2, new, r1_32, clfdb, 0, IF_BFP) + F(0xb39e, CLFXBR, RRF_e, FPE, x2h, x2l, new, r1_32, clfxb, 0, IF_BFP) + F(0xb3ac, CLGEBR, RRF_e, FPE, 0, e2, r1, 0, clgeb, 0, IF_BFP) + F(0xb3ad, CLGDBR, RRF_e, FPE, 0, f2, r1, 0, clgdb, 0, IF_BFP) + F(0xb3ae, CLGXBR, RRF_e, FPE, x2h, x2l, r1, 0, clgxb, 0, IF_BFP) +/* CONVERT FROM LOGICAL */ + F(0xb390, CELFBR, RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0, IF_BFP) + F(0xb391, CDLFBR, RRF_e, FPE, 0, r2_32u, new, f1, cdlgb, 0, IF_BFP) + F(0xb392, CXLFBR, RRF_e, FPE, 0, r2_32u, new_P, x1, cxlgb, 0, IF_BFP) + F(0xb3a0, CELGBR, RRF_e, FPE, 0, r2_o, new, e1, celgb, 0, IF_BFP) + F(0xb3a1, CDLGBR, RRF_e, FPE, 0, r2_o, new, f1, cdlgb, 0, IF_BFP) + F(0xb3a2, CXLGBR, RRF_e, FPE, 0, r2_o, new_P, x1, cxlgb, 0, IF_BFP) + +/* CONVERT UTF-8 TO UTF-16 */ + D(0xb2a7, CU12, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 12) +/* CONVERT UTF-8 TO UTF-32 */ + D(0xb9b0, CU14, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 14) +/* CONVERT UTF-16 to UTF-8 */ + D(0xb2a6, CU21, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 21) +/* CONVERT UTF-16 to UTF-32 */ + D(0xb9b1, CU24, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 24) +/* CONVERT UTF-32 to UTF-8 */ + D(0xb9b2, CU41, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 41) +/* CONVERT UTF-32 to UTF-16 */ + D(0xb9b3, CU42, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 42) + +/* DIVIDE */ + C(0x1d00, DR, RR_a, Z, r1_D32, r2_32s, new_P, r1_P32, divs32, 0) + C(0x5d00, D, RX_a, Z, r1_D32, m2_32s, new_P, r1_P32, divs32, 0) + F(0xb30d, DEBR, RRE, Z, e1, e2, new, e1, deb, 0, IF_BFP) + F(0xb31d, DDBR, RRE, Z, f1, f2, new, f1, ddb, 0, IF_BFP) + F(0xb34d, DXBR, RRE, Z, x2h, x2l, x1, x1, dxb, 0, IF_BFP) + F(0xed0d, DEB, RXE, Z, e1, m2_32u, new, e1, deb, 0, IF_BFP) + F(0xed1d, DDB, RXE, Z, f1, m2_64, new, f1, ddb, 0, IF_BFP) +/* DIVIDE LOGICAL */ + C(0xb997, DLR, RRE, Z, r1_D32, r2_32u, new_P, r1_P32, divu32, 0) + C(0xe397, DL, RXY_a, Z, r1_D32, m2_32u, new_P, r1_P32, divu32, 0) + C(0xb987, DLGR, RRE, Z, 0, r2_o, r1_P, 0, divu64, 0) + C(0xe387, DLG, RXY_a, Z, 0, m2_64, r1_P, 0, divu64, 0) +/* DIVIDE SINGLE */ + C(0xb90d, DSGR, RRE, Z, r1p1, r2, r1_P, 0, divs64, 0) + C(0xb91d, DSGFR, RRE, Z, r1p1, r2_32s, r1_P, 0, divs64, 0) + C(0xe30d, DSG, RXY_a, Z, r1p1, m2_64, r1_P, 0, divs64, 0) + C(0xe31d, DSGF, RXY_a, Z, r1p1, m2_32s, r1_P, 0, divs64, 0) + +/* EXCLUSIVE OR */ + C(0x1700, XR, RR_a, Z, r1, r2, new, r1_32, xor, nz32) + C(0xb9f7, XRK, RRF_a, DO, r2, r3, new, r1_32, xor, nz32) + C(0x5700, X, RX_a, Z, r1, m2_32s, new, r1_32, xor, nz32) + C(0xe357, XY, RXY_a, LD, r1, m2_32s, new, r1_32, xor, nz32) + C(0xb982, XGR, RRE, Z, r1, r2, r1, 0, xor, nz64) + C(0xb9e7, XGRK, RRF_a, DO, r2, r3, r1, 0, xor, nz64) + C(0xe382, XG, RXY_a, Z, r1, m2_64, r1, 0, xor, nz64) + C(0xd700, XC, SS_a, Z, 0, 0, 0, 0, xc, 0) +/* EXCLUSIVE OR IMMEDIATE */ + D(0xc006, XIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2020) + D(0xc007, XILF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2000) + D(0x9700, XI, SI, Z, la1, i2_8u, new, 0, xi, nz64, MO_UB) + D(0xeb57, XIY, SIY, LD, la1, i2_8u, new, 0, xi, nz64, MO_UB) + +/* EXECUTE */ + C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0) +/* EXECUTE RELATIVE LONG */ + C(0xc600, EXRL, RIL_b, EE, 0, ri2, 0, 0, ex, 0) + +/* EXTRACT ACCESS */ + C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0) +/* EXTRACT CPU ATTRIBUTE */ + C(0xeb4c, ECAG, RSY_a, GIE, 0, a2, r1, 0, ecag, 0) +/* EXTRACT CPU TIME */ + C(0xc801, ECTG, SSF, ECT, 0, 0, 0, 0, ectg, 0) +/* EXTRACT FPC */ + F(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0, IF_BFP) +/* EXTRACT PSW */ + C(0xb98d, EPSW, RRE, Z, 0, 0, 0, 0, epsw, 0) + +/* FIND LEFTMOST ONE */ + C(0xb983, FLOGR, RRE, EI, 0, r2_o, r1_P, 0, flogr, 0) + +/* INSERT CHARACTER */ + C(0x4300, IC, RX_a, Z, 0, m2_8u, 0, r1_8, mov2, 0) + C(0xe373, ICY, RXY_a, LD, 0, m2_8u, 0, r1_8, mov2, 0) +/* INSERT CHARACTERS UNDER MASK */ + D(0xbf00, ICM, RS_b, Z, 0, a2, r1, 0, icm, 0, 0) + D(0xeb81, ICMY, RSY_b, LD, 0, a2, r1, 0, icm, 0, 0) + D(0xeb80, ICMH, RSY_b, Z, 0, a2, r1, 0, icm, 0, 32) +/* INSERT IMMEDIATE */ + D(0xc008, IIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2020) + D(0xc009, IILF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2000) + D(0xa500, IIHH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1030) + D(0xa501, IIHL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1020) + D(0xa502, IILH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1010) + D(0xa503, IILL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1000) +/* INSERT PROGRAM MASK */ + C(0xb222, IPM, RRE, Z, 0, 0, r1, 0, ipm, 0) + +/* LOAD */ + C(0x1800, LR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, 0) + C(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0) + C(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0) + C(0xb904, LGR, RRE, Z, 0, r2_o, 0, r1, mov2, 0) + C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0) + C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0) + C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0) + F(0x2800, LDR, RR_a, Z, 0, f2, 0, f1, mov2, 0, IF_AFP1 | IF_AFP2) + F(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) + F(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) + F(0x3800, LER, RR_a, Z, 0, e2, 0, cond_e1e2, mov2, 0, IF_AFP1 | IF_AFP2) + F(0x7800, LE, RX_a, Z, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) + F(0xed64, LEY, RXY_a, LD, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) + F(0xb365, LXR, RRE, Z, x2h, x2l, 0, x1, movx, 0, IF_AFP1) +/* LOAD IMMEDIATE */ + C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0) +/* LOAD RELATIVE LONG */ + C(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0) + C(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0) + C(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0) +/* LOAD ADDRESS */ + C(0x4100, LA, RX_a, Z, 0, a2, 0, r1, mov2, 0) + C(0xe371, LAY, RXY_a, LD, 0, a2, 0, r1, mov2, 0) +/* LOAD ADDRESS EXTENDED */ + C(0x5100, LAE, RX_a, Z, 0, a2, 0, r1, mov2e, 0) + C(0xe375, LAEY, RXY_a, GIE, 0, a2, 0, r1, mov2e, 0) +/* LOAD ADDRESS RELATIVE LONG */ + C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0) +/* LOAD AND ADD */ + D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL) + D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ) +/* LOAD AND ADD LOGICAL */ + D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL) + D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ) +/* LOAD AND AND */ + D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL) + D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ) +/* LOAD AND EXCLUSIVE OR */ + D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL) + D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ) +/* LOAD AND OR */ + D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL) + D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ) +/* LOAD AND TEST */ + C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32) + C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64) + C(0xb912, LTGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, s64) + C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64) + C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64) + C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64) + F(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32, IF_BFP) + F(0xb312, LTDBR, RRE, Z, 0, f2, 0, f1, mov2, f64, IF_BFP) + F(0xb342, LTXBR, RRE, Z, x2h, x2l, 0, x1, movx, f128, IF_BFP) +/* LOAD AND TRAP */ + C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0) + C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0) +/* LOAD AND ZERO RIGHTMOST BYTE */ + C(0xe3eb, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0) + C(0xe32a, LZRG, RXY_a, LZRB, 0, m2_64, r1, 0, lzrb, 0) +/* LOAD LOGICAL AND ZERO RIGHTMOST BYTE */ + C(0xe33a, LLZRGF, RXY_a, LZRB, 0, m2_32u, r1, 0, lzrb, 0) +/* LOAD BYTE */ + C(0xb926, LBR, RRE, EI, 0, r2_8s, 0, r1_32, mov2, 0) + C(0xb906, LGBR, RRE, EI, 0, r2_8s, 0, r1, mov2, 0) + C(0xe376, LB, RXY_a, LD, 0, a2, new, r1_32, ld8s, 0) + C(0xe377, LGB, RXY_a, LD, 0, a2, r1, 0, ld8s, 0) +/* LOAD BYTE HIGH */ + C(0xe3c0, LBH, RXY_a, HW, 0, a2, new, r1_32h, ld8s, 0) +/* LOAD COMPLEMENT */ + C(0x1300, LCR, RR_a, Z, 0, r2, new, r1_32, neg, neg32) + C(0xb903, LCGR, RRE, Z, 0, r2, r1, 0, neg, neg64) + C(0xb913, LCGFR, RRE, Z, 0, r2_32s, r1, 0, neg, neg64) + F(0xb303, LCEBR, RRE, Z, 0, e2, new, e1, negf32, f32, IF_BFP) + F(0xb313, LCDBR, RRE, Z, 0, f2, new, f1, negf64, f64, IF_BFP) + F(0xb343, LCXBR, RRE, Z, x2h, x2l, new_P, x1, negf128, f128, IF_BFP) + F(0xb373, LCDFR, RRE, FPSSH, 0, f2, new, f1, negf64, 0, IF_AFP1 | IF_AFP2) +/* LOAD COUNT TO BLOCK BOUNDARY */ + C(0xe727, LCBB, RXE, V, la2, 0, r1, 0, lcbb, 0) +/* LOAD HALFWORD */ + C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0) + C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0) + C(0x4800, LH, RX_a, Z, 0, a2, new, r1_32, ld16s, 0) + C(0xe378, LHY, RXY_a, LD, 0, a2, new, r1_32, ld16s, 0) + C(0xe315, LGH, RXY_a, Z, 0, a2, r1, 0, ld16s, 0) +/* LOAD HALFWORD HIGH */ + C(0xe3c4, LHH, RXY_a, HW, 0, a2, new, r1_32h, ld16s, 0) +/* LOAD HALFWORD IMMEDIATE */ + C(0xa708, LHI, RI_a, Z, 0, i2, 0, r1_32, mov2, 0) + C(0xa709, LGHI, RI_a, Z, 0, i2, 0, r1, mov2, 0) +/* LOAD HALFWORD RELATIVE LONG */ + C(0xc405, LHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0) + C(0xc404, LGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0) +/* LOAD HIGH */ + C(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0) +/* LOAG HIGH AND TRAP */ + C(0xe3c8, LFHAT, RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0) +/* LOAD LOGICAL */ + C(0xb916, LLGFR, RRE, Z, 0, r2_32u, 0, r1, mov2, 0) + C(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0) +/* LOAD LOGICAL AND TRAP */ + C(0xe39d, LLGFAT, RXY_a, LAT, 0, a2, r1, 0, llgfat, 0) +/* LOAD LOGICAL RELATIVE LONG */ + C(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0) +/* LOAD LOGICAL CHARACTER */ + C(0xb994, LLCR, RRE, EI, 0, r2_8u, 0, r1_32, mov2, 0) + C(0xb984, LLGCR, RRE, EI, 0, r2_8u, 0, r1, mov2, 0) + C(0xe394, LLC, RXY_a, EI, 0, a2, new, r1_32, ld8u, 0) + C(0xe390, LLGC, RXY_a, Z, 0, a2, r1, 0, ld8u, 0) +/* LOAD LOGICAL CHARACTER HIGH */ + C(0xe3c2, LLCH, RXY_a, HW, 0, a2, new, r1_32h, ld8u, 0) +/* LOAD LOGICAL HALFWORD */ + C(0xb995, LLHR, RRE, EI, 0, r2_16u, 0, r1_32, mov2, 0) + C(0xb985, LLGHR, RRE, EI, 0, r2_16u, 0, r1, mov2, 0) + C(0xe395, LLH, RXY_a, EI, 0, a2, new, r1_32, ld16u, 0) + C(0xe391, LLGH, RXY_a, Z, 0, a2, r1, 0, ld16u, 0) +/* LOAD LOGICAL HALFWORD HIGH */ + C(0xe3c6, LLHH, RXY_a, HW, 0, a2, new, r1_32h, ld16u, 0) +/* LOAD LOGICAL HALFWORD RELATIVE LONG */ + C(0xc402, LLHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16u, 0) + C(0xc406, LLGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16u, 0) +/* LOAD LOGICAL IMMEDATE */ + D(0xc00e, LLIHF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 32) + D(0xc00f, LLILF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 0) + D(0xa50c, LLIHH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 48) + D(0xa50d, LLIHL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 32) + D(0xa50e, LLILH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 16) + D(0xa50f, LLILL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 0) +/* LOAD LOGICAL THIRTY ONE BITS */ + C(0xb917, LLGTR, RRE, Z, 0, r2_o, r1, 0, llgt, 0) + C(0xe317, LLGT, RXY_a, Z, 0, m2_32u, r1, 0, llgt, 0) +/* LOAD LOGICAL THIRTY ONE BITS AND TRAP */ + C(0xe39c, LLGTAT, RXY_a, LAT, 0, m2_32u, r1, 0, llgtat, 0) + +/* LOAD FPR FROM GR */ + F(0xb3c1, LDGR, RRE, FPRGR, 0, r2_o, 0, f1, mov2, 0, IF_AFP1) +/* LOAD GR FROM FPR */ + F(0xb3cd, LGDR, RRE, FPRGR, 0, f2, 0, r1, mov2, 0, IF_AFP2) +/* LOAD NEGATIVE */ + C(0x1100, LNR, RR_a, Z, 0, r2_32s, new, r1_32, nabs, nabs32) + C(0xb901, LNGR, RRE, Z, 0, r2, r1, 0, nabs, nabs64) + C(0xb911, LNGFR, RRE, Z, 0, r2_32s, r1, 0, nabs, nabs64) + F(0xb301, LNEBR, RRE, Z, 0, e2, new, e1, nabsf32, f32, IF_BFP) + F(0xb311, LNDBR, RRE, Z, 0, f2, new, f1, nabsf64, f64, IF_BFP) + F(0xb341, LNXBR, RRE, Z, x2h, x2l, new_P, x1, nabsf128, f128, IF_BFP) + F(0xb371, LNDFR, RRE, FPSSH, 0, f2, new, f1, nabsf64, 0, IF_AFP1 | IF_AFP2) +/* LOAD ON CONDITION */ + C(0xb9f2, LOCR, RRF_c, LOC, r1, r2, new, r1_32, loc, 0) + C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0) + C(0xebf2, LOC, RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0) + C(0xebe2, LOCG, RSY_b, LOC, r1, m2_64, r1, 0, loc, 0) +/* LOAD HALFWORD IMMEDIATE ON CONDITION */ + C(0xec42, LOCHI, RIE_g, LOC2, r1, i2, new, r1_32, loc, 0) + C(0xec46, LOCGHI, RIE_g, LOC2, r1, i2, r1, 0, loc, 0) + C(0xec4e, LOCHHI, RIE_g, LOC2, r1_sr32, i2, new, r1_32h, loc, 0) +/* LOAD HIGH ON CONDITION */ + C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2, new, r1_32h, loc, 0) + C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0) +/* LOAD PAIR DISJOINT */ + D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL) + D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ) +/* LOAD PAIR FROM QUADWORD */ + C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0) +/* LOAD POSITIVE */ + C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32) + C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64) + C(0xb910, LPGFR, RRE, Z, 0, r2_32s, r1, 0, abs, abs64) + F(0xb300, LPEBR, RRE, Z, 0, e2, new, e1, absf32, f32, IF_BFP) + F(0xb310, LPDBR, RRE, Z, 0, f2, new, f1, absf64, f64, IF_BFP) + F(0xb340, LPXBR, RRE, Z, x2h, x2l, new_P, x1, absf128, f128, IF_BFP) + F(0xb370, LPDFR, RRE, FPSSH, 0, f2, new, f1, absf64, 0, IF_AFP1 | IF_AFP2) +/* LOAD REVERSED */ + C(0xb91f, LRVR, RRE, Z, 0, r2_32u, new, r1_32, rev32, 0) + C(0xb90f, LRVGR, RRE, Z, 0, r2_o, r1, 0, rev64, 0) + C(0xe31f, LRVH, RXY_a, Z, 0, m2_16u, new, r1_16, rev16, 0) + C(0xe31e, LRV, RXY_a, Z, 0, m2_32u, new, r1_32, rev32, 0) + C(0xe30f, LRVG, RXY_a, Z, 0, m2_64, r1, 0, rev64, 0) +/* LOAD ZERO */ + F(0xb374, LZER, RRE, Z, 0, 0, 0, e1, zero, 0, IF_AFP1) + F(0xb375, LZDR, RRE, Z, 0, 0, 0, f1, zero, 0, IF_AFP1) + F(0xb376, LZXR, RRE, Z, 0, 0, 0, x1, zero2, 0, IF_AFP1) + +/* LOAD FPC */ + F(0xb29d, LFPC, S, Z, 0, m2_32u, 0, 0, sfpc, 0, IF_BFP) +/* LOAD FPC AND SIGNAL */ + F(0xb2bd, LFAS, S, IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0, IF_DFP) +/* LOAD FP INTEGER */ + F(0xb357, FIEBR, RRF_e, Z, 0, e2, new, e1, fieb, 0, IF_BFP) + F(0xb35f, FIDBR, RRF_e, Z, 0, f2, new, f1, fidb, 0, IF_BFP) + F(0xb347, FIXBR, RRF_e, Z, x2h, x2l, new_P, x1, fixb, 0, IF_BFP) + +/* LOAD LENGTHENED */ + F(0xb304, LDEBR, RRE, Z, 0, e2, new, f1, ldeb, 0, IF_BFP) + F(0xb305, LXDBR, RRE, Z, 0, f2, new_P, x1, lxdb, 0, IF_BFP) + F(0xb306, LXEBR, RRE, Z, 0, e2, new_P, x1, lxeb, 0, IF_BFP) + F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP) + F(0xed05, LXDB, RXE, Z, 0, m2_64, new_P, x1, lxdb, 0, IF_BFP) + F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_P, x1, lxeb, 0, IF_BFP) + F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) + F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1) +/* LOAD ROUNDED */ + F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP) + F(0xb345, LDXBR, RRF_e, Z, x2h, x2l, new, f1, ldxb, 0, IF_BFP) + F(0xb346, LEXBR, RRF_e, Z, x2h, x2l, new, e1, lexb, 0, IF_BFP) + +/* LOAD MULTIPLE */ + C(0x9800, LM, RS_a, Z, 0, a2, 0, 0, lm32, 0) + C(0xeb98, LMY, RSY_a, LD, 0, a2, 0, 0, lm32, 0) + C(0xeb04, LMG, RSY_a, Z, 0, a2, 0, 0, lm64, 0) +/* LOAD MULTIPLE HIGH */ + C(0xeb96, LMH, RSY_a, Z, 0, a2, 0, 0, lmh, 0) +/* LOAD ACCESS MULTIPLE */ + C(0x9a00, LAM, RS_a, Z, 0, a2, 0, 0, lam, 0) + C(0xeb9a, LAMY, RSY_a, LD, 0, a2, 0, 0, lam, 0) + +/* MOVE */ + C(0xd200, MVC, SS_a, Z, la1, a2, 0, 0, mvc, 0) + C(0xe544, MVHHI, SIL, GIE, la1, i2, 0, m1_16, mov2, 0) + C(0xe54c, MVHI, SIL, GIE, la1, i2, 0, m1_32, mov2, 0) + C(0xe548, MVGHI, SIL, GIE, la1, i2, 0, m1_64, mov2, 0) + C(0x9200, MVI, SI, Z, la1, i2, 0, m1_8, mov2, 0) + C(0xeb52, MVIY, SIY, LD, la1, i2, 0, m1_8, mov2, 0) +/* MOVE INVERSE */ + C(0xe800, MVCIN, SS_a, Z, la1, a2, 0, 0, mvcin, 0) +/* MOVE LONG */ + C(0x0e00, MVCL, RR_a, Z, 0, 0, 0, 0, mvcl, 0) +/* MOVE LONG EXTENDED */ + C(0xa800, MVCLE, RS_a, Z, 0, a2, 0, 0, mvcle, 0) +/* MOVE LONG UNICODE */ + C(0xeb8e, MVCLU, RSY_a, E2, 0, a2, 0, 0, mvclu, 0) +/* MOVE NUMERICS */ + C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0) +/* MOVE PAGE */ + C(0xb254, MVPG, RRE, Z, r1_o, r2_o, 0, 0, mvpg, 0) +/* MOVE STRING */ + C(0xb255, MVST, RRE, Z, 0, 0, 0, 0, mvst, 0) +/* MOVE WITH OPTIONAL SPECIFICATION */ + C(0xc800, MVCOS, SSF, MVCOS, la1, a2, 0, 0, mvcos, 0) +/* MOVE WITH OFFSET */ + /* Really format SS_b, but we pack both lengths into one argument + for the helper call, so we might as well leave one 8-bit field. */ + C(0xf100, MVO, SS_a, Z, la1, a2, 0, 0, mvo, 0) +/* MOVE ZONES */ + C(0xd300, MVZ, SS_a, Z, la1, a2, 0, 0, mvz, 0) + +/* MULTIPLY */ + C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0) + C(0x5c00, M, RX_a, Z, r1p1_32s, m2_32s, new, r1_D32, mul, 0) + C(0xe35c, MFY, RXY_a, GIE, r1p1_32s, m2_32s, new, r1_D32, mul, 0) + F(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0, IF_BFP) + F(0xb31c, MDBR, RRE, Z, f1, f2, new, f1, mdb, 0, IF_BFP) + F(0xb34c, MXBR, RRE, Z, x2h, x2l, x1, x1, mxb, 0, IF_BFP) + F(0xb30c, MDEBR, RRE, Z, f1, e2, new, f1, mdeb, 0, IF_BFP) + F(0xb307, MXDBR, RRE, Z, 0, f2, x1, x1, mxdb, 0, IF_BFP) + F(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0, IF_BFP) + F(0xed1c, MDB, RXE, Z, f1, m2_64, new, f1, mdb, 0, IF_BFP) + F(0xed0c, MDEB, RXE, Z, f1, m2_32u, new, f1, mdeb, 0, IF_BFP) + F(0xed07, MXDB, RXE, Z, 0, m2_64, x1, x1, mxdb, 0, IF_BFP) +/* MULTIPLY HALFWORD */ + C(0x4c00, MH, RX_a, Z, r1_o, m2_16s, new, r1_32, mul, 0) + C(0xe37c, MHY, RXY_a, GIE, r1_o, m2_16s, new, r1_32, mul, 0) +/* MULTIPLY HALFWORD IMMEDIATE */ + C(0xa70c, MHI, RI_a, Z, r1_o, i2, new, r1_32, mul, 0) + C(0xa70d, MGHI, RI_a, Z, r1_o, i2, r1, 0, mul, 0) +/* MULTIPLY LOGICAL */ + C(0xb996, MLR, RRE, Z, r1p1_32u, r2_32u, new, r1_D32, mul, 0) + C(0xe396, ML, RXY_a, Z, r1p1_32u, m2_32u, new, r1_D32, mul, 0) + C(0xb986, MLGR, RRE, Z, r1p1, r2_o, r1_P, 0, mul128, 0) + C(0xe386, MLG, RXY_a, Z, r1p1, m2_64, r1_P, 0, mul128, 0) +/* MULTIPLY SINGLE */ + C(0xb252, MSR, RRE, Z, r1_o, r2_o, new, r1_32, mul, 0) + C(0x7100, MS, RX_a, Z, r1_o, m2_32s, new, r1_32, mul, 0) + C(0xe351, MSY, RXY_a, LD, r1_o, m2_32s, new, r1_32, mul, 0) + C(0xb90c, MSGR, RRE, Z, r1_o, r2_o, r1, 0, mul, 0) + C(0xb91c, MSGFR, RRE, Z, r1_o, r2_32s, r1, 0, mul, 0) + C(0xe30c, MSG, RXY_a, Z, r1_o, m2_64, r1, 0, mul, 0) + C(0xe31c, MSGF, RXY_a, Z, r1_o, m2_32s, r1, 0, mul, 0) +/* MULTIPLY SINGLE IMMEDIATE */ + C(0xc201, MSFI, RIL_a, GIE, r1_o, i2, new, r1_32, mul, 0) + C(0xc200, MSGFI, RIL_a, GIE, r1_o, i2, r1, 0, mul, 0) + +/* MULTIPLY AND ADD */ + F(0xb30e, MAEBR, RRD, Z, e1, e2, new, e1, maeb, 0, IF_BFP) + F(0xb31e, MADBR, RRD, Z, f1, f2, new, f1, madb, 0, IF_BFP) + F(0xed0e, MAEB, RXF, Z, e1, m2_32u, new, e1, maeb, 0, IF_BFP) + F(0xed1e, MADB, RXF, Z, f1, m2_64, new, f1, madb, 0, IF_BFP) +/* MULTIPLY AND SUBTRACT */ + F(0xb30f, MSEBR, RRD, Z, e1, e2, new, e1, mseb, 0, IF_BFP) + F(0xb31f, MSDBR, RRD, Z, f1, f2, new, f1, msdb, 0, IF_BFP) + F(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0, IF_BFP) + F(0xed1f, MSDB, RXF, Z, f1, m2_64, new, f1, msdb, 0, IF_BFP) + +/* OR */ + C(0x1600, OR, RR_a, Z, r1, r2, new, r1_32, or, nz32) + C(0xb9f6, ORK, RRF_a, DO, r2, r3, new, r1_32, or, nz32) + C(0x5600, O, RX_a, Z, r1, m2_32s, new, r1_32, or, nz32) + C(0xe356, OY, RXY_a, LD, r1, m2_32s, new, r1_32, or, nz32) + C(0xb981, OGR, RRE, Z, r1, r2, r1, 0, or, nz64) + C(0xb9e6, OGRK, RRF_a, DO, r2, r3, r1, 0, or, nz64) + C(0xe381, OG, RXY_a, Z, r1, m2_64, r1, 0, or, nz64) + C(0xd600, OC, SS_a, Z, la1, a2, 0, 0, oc, 0) +/* OR IMMEDIATE */ + D(0xc00c, OIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2020) + D(0xc00d, OILF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2000) + D(0xa508, OIHH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1030) + D(0xa509, OIHL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1020) + D(0xa50a, OILH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1010) + D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000) + D(0x9600, OI, SI, Z, la1, i2_8u, new, 0, oi, nz64, MO_UB) + D(0xeb56, OIY, SIY, LD, la1, i2_8u, new, 0, oi, nz64, MO_UB) + +/* PACK */ + /* Really format SS_b, but we pack both lengths into one argument + for the helper call, so we might as well leave one 8-bit field. */ + C(0xf200, PACK, SS_a, Z, la1, a2, 0, 0, pack, 0) +/* PACK ASCII */ + C(0xe900, PKA, SS_f, E2, la1, a2, 0, 0, pka, 0) +/* PACK UNICODE */ + C(0xe100, PKU, SS_f, E2, la1, a2, 0, 0, pku, 0) + +/* PREFETCH */ + /* Implemented as nops of course. */ + C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0) + C(0xc602, PFDRL, RIL_c, GIE, 0, 0, 0, 0, 0, 0) +/* PERFORM PROCESSOR ASSIST */ + /* Implemented as nop of course. */ + C(0xb2e8, PPA, RRF_c, PPA, 0, 0, 0, 0, 0, 0) + +/* POPULATION COUNT */ + C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64) + +/* ROTATE LEFT SINGLE LOGICAL */ + C(0xeb1d, RLL, RSY_a, Z, r3_o, sh32, new, r1_32, rll32, 0) + C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh64, r1, 0, rll64, 0) + +/* ROTATE THEN INSERT SELECTED BITS */ + C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64) + C(0xec59, RISBGN, RIE_f, MIE, 0, r2, r1, 0, risbg, 0) + C(0xec5d, RISBHG, RIE_f, HW, 0, r2, r1, 0, risbg, 0) + C(0xec51, RISBLG, RIE_f, HW, 0, r2, r1, 0, risbg, 0) +/* ROTATE_THEN SELECTED BITS */ + C(0xec54, RNSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) + C(0xec56, ROSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) + C(0xec57, RXSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) + +/* SEARCH STRING */ + C(0xb25e, SRST, RRE, Z, 0, 0, 0, 0, srst, 0) +/* SEARCH STRING UNICODE */ + C(0xb9be, SRSTU, RRE, ETF3, 0, 0, 0, 0, srstu, 0) + +/* SET ACCESS */ + C(0xb24e, SAR, RRE, Z, 0, r2_o, 0, 0, sar, 0) +/* SET ADDRESSING MODE */ + D(0x010c, SAM24, E, Z, 0, 0, 0, 0, sam, 0, 0) + D(0x010d, SAM31, E, Z, 0, 0, 0, 0, sam, 0, 1) + D(0x010e, SAM64, E, Z, 0, 0, 0, 0, sam, 0, 3) +/* SET FPC */ + F(0xb384, SFPC, RRE, Z, 0, r1_o, 0, 0, sfpc, 0, IF_BFP) +/* SET FPC AND SIGNAL */ + F(0xb385, SFASR, RRE, IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0, IF_DFP) +/* SET BFP ROUNDING MODE */ + F(0xb299, SRNM, S, Z, la2, 0, 0, 0, srnm, 0, IF_BFP) + F(0xb2b8, SRNMB, S, FPE, la2, 0, 0, 0, srnmb, 0, IF_BFP) +/* SET DFP ROUNDING MODE */ + F(0xb2b9, SRNMT, S, DFPR, la2, 0, 0, 0, srnmt, 0, IF_DFP) +/* SET PROGRAM MASK */ + C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0) + +/* SHIFT LEFT SINGLE */ + D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31) + D(0xebdd, SLAK, RSY_a, DO, r3, sh32, new, r1_32, sla, 0, 31) + D(0xeb0b, SLAG, RSY_a, Z, r3, sh64, r1, 0, sla, 0, 63) +/* SHIFT LEFT SINGLE LOGICAL */ + C(0x8900, SLL, RS_a, Z, r1_o, sh32, new, r1_32, sll, 0) + C(0xebdf, SLLK, RSY_a, DO, r3_o, sh32, new, r1_32, sll, 0) + C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh64, r1, 0, sll, 0) +/* SHIFT RIGHT SINGLE */ + C(0x8a00, SRA, RS_a, Z, r1_32s, sh32, new, r1_32, sra, s32) + C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh32, new, r1_32, sra, s32) + C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh64, r1, 0, sra, s64) +/* SHIFT RIGHT SINGLE LOGICAL */ + C(0x8800, SRL, RS_a, Z, r1_32u, sh32, new, r1_32, srl, 0) + C(0xebde, SRLK, RSY_a, DO, r3_32u, sh32, new, r1_32, srl, 0) + C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh64, r1, 0, srl, 0) +/* SHIFT LEFT DOUBLE */ + D(0x8f00, SLDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sla, 0, 31) +/* SHIFT LEFT DOUBLE LOGICAL */ + C(0x8d00, SLDL, RS_a, Z, r1_D32, sh64, new, r1_D32, sll, 0) +/* SHIFT RIGHT DOUBLE */ + C(0x8e00, SRDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sra, s64) +/* SHIFT RIGHT DOUBLE LOGICAL */ + C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0) + +/* SQUARE ROOT */ + F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP) + F(0xb315, SQDBR, RRE, Z, 0, f2, new, f1, sqdb, 0, IF_BFP) + F(0xb316, SQXBR, RRE, Z, x2h, x2l, new, x1, sqxb, 0, IF_BFP) + F(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0, IF_BFP) + F(0xed15, SQDB, RXE, Z, 0, m2_64, new, f1, sqdb, 0, IF_BFP) + +/* STORE */ + C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0) + C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0) + C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0) + F(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, IF_AFP1) + F(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, IF_AFP1) + F(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, IF_AFP1) + F(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, IF_AFP1) +/* STORE RELATIVE LONG */ + C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0) + C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0) +/* STORE CHARACTER */ + C(0x4200, STC, RX_a, Z, r1_o, a2, 0, 0, st8, 0) + C(0xe372, STCY, RXY_a, LD, r1_o, a2, 0, 0, st8, 0) +/* STORE CHARACTER HIGH */ + C(0xe3c3, STCH, RXY_a, HW, r1_sr32, a2, 0, 0, st8, 0) +/* STORE CHARACTERS UNDER MASK */ + D(0xbe00, STCM, RS_b, Z, r1_o, a2, 0, 0, stcm, 0, 0) + D(0xeb2d, STCMY, RSY_b, LD, r1_o, a2, 0, 0, stcm, 0, 0) + D(0xeb2c, STCMH, RSY_b, Z, r1_o, a2, 0, 0, stcm, 0, 32) +/* STORE HALFWORD */ + C(0x4000, STH, RX_a, Z, r1_o, a2, 0, 0, st16, 0) + C(0xe370, STHY, RXY_a, LD, r1_o, a2, 0, 0, st16, 0) +/* STORE HALFWORD HIGH */ + C(0xe3c7, STHH, RXY_a, HW, r1_sr32, a2, 0, 0, st16, 0) +/* STORE HALFWORD RELATIVE LONG */ + C(0xc407, STHRL, RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0) +/* STORE HIGH */ + C(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0) +/* STORE ON CONDITION */ + D(0xebf3, STOC, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0) + D(0xebe3, STOCG, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1) +/* STORE HIGH ON CONDITION */ + D(0xebe1, STOCFH, RSY_b, LOC2, 0, 0, 0, 0, soc, 0, 2) +/* STORE REVERSED */ + C(0xe33f, STRVH, RXY_a, Z, la2, r1_16u, new, m1_16, rev16, 0) + C(0xe33e, STRV, RXY_a, Z, la2, r1_32u, new, m1_32, rev32, 0) + C(0xe32f, STRVG, RXY_a, Z, la2, r1_o, new, m1_64, rev64, 0) + +/* STORE CLOCK */ + C(0xb205, STCK, S, Z, la2, 0, new, m1_64, stck, 0) + C(0xb27c, STCKF, S, SCF, la2, 0, new, m1_64, stck, 0) +/* STORE CLOCK EXTENDED */ + C(0xb278, STCKE, S, Z, 0, a2, 0, 0, stcke, 0) + +/* STORE FACILITY LIST EXTENDED */ + C(0xb2b0, STFLE, S, SFLE, 0, a2, 0, 0, stfle, 0) +/* STORE FPC */ + F(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0, IF_BFP) + +/* STORE MULTIPLE */ + D(0x9000, STM, RS_a, Z, 0, a2, 0, 0, stm, 0, 4) + D(0xeb90, STMY, RSY_a, LD, 0, a2, 0, 0, stm, 0, 4) + D(0xeb24, STMG, RSY_a, Z, 0, a2, 0, 0, stm, 0, 8) +/* STORE MULTIPLE HIGH */ + C(0xeb26, STMH, RSY_a, Z, 0, a2, 0, 0, stmh, 0) +/* STORE ACCESS MULTIPLE */ + C(0x9b00, STAM, RS_a, Z, 0, a2, 0, 0, stam, 0) + C(0xeb9b, STAMY, RSY_a, LD, 0, a2, 0, 0, stam, 0) +/* STORE PAIR TO QUADWORD */ + C(0xe38e, STPQ, RXY_a, Z, 0, a2, r1_P, 0, stpq, 0) + +/* SUBTRACT */ + C(0x1b00, SR, RR_a, Z, r1, r2, new, r1_32, sub, subs32) + C(0xb9f9, SRK, RRF_a, DO, r2, r3, new, r1_32, sub, subs32) + C(0x5b00, S, RX_a, Z, r1, m2_32s, new, r1_32, sub, subs32) + C(0xe35b, SY, RXY_a, LD, r1, m2_32s, new, r1_32, sub, subs32) + C(0xb909, SGR, RRE, Z, r1, r2, r1, 0, sub, subs64) + C(0xb919, SGFR, RRE, Z, r1, r2_32s, r1, 0, sub, subs64) + C(0xb9e9, SGRK, RRF_a, DO, r2, r3, r1, 0, sub, subs64) + C(0xe309, SG, RXY_a, Z, r1, m2_64, r1, 0, sub, subs64) + C(0xe319, SGF, RXY_a, Z, r1, m2_32s, r1, 0, sub, subs64) + F(0xb30b, SEBR, RRE, Z, e1, e2, new, e1, seb, f32, IF_BFP) + F(0xb31b, SDBR, RRE, Z, f1, f2, new, f1, sdb, f64, IF_BFP) + F(0xb34b, SXBR, RRE, Z, x2h, x2l, x1, x1, sxb, f128, IF_BFP) + F(0xed0b, SEB, RXE, Z, e1, m2_32u, new, e1, seb, f32, IF_BFP) + F(0xed1b, SDB, RXE, Z, f1, m2_64, new, f1, sdb, f64, IF_BFP) +/* SUBTRACT HALFWORD */ + C(0x4b00, SH, RX_a, Z, r1, m2_16s, new, r1_32, sub, subs32) + C(0xe37b, SHY, RXY_a, LD, r1, m2_16s, new, r1_32, sub, subs32) +/* SUBTRACT HIGH */ + C(0xb9c9, SHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subs32) + C(0xb9d9, SHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subs32) +/* SUBTRACT LOGICAL */ + C(0x1f00, SLR, RR_a, Z, r1, r2, new, r1_32, sub, subu32) + C(0xb9fb, SLRK, RRF_a, DO, r2, r3, new, r1_32, sub, subu32) + C(0x5f00, SL, RX_a, Z, r1, m2_32u, new, r1_32, sub, subu32) + C(0xe35f, SLY, RXY_a, LD, r1, m2_32u, new, r1_32, sub, subu32) + C(0xb90b, SLGR, RRE, Z, r1, r2, r1, 0, sub, subu64) + C(0xb91b, SLGFR, RRE, Z, r1, r2_32u, r1, 0, sub, subu64) + C(0xb9eb, SLGRK, RRF_a, DO, r2, r3, r1, 0, sub, subu64) + C(0xe30b, SLG, RXY_a, Z, r1, m2_64, r1, 0, sub, subu64) + C(0xe31b, SLGF, RXY_a, Z, r1, m2_32u, r1, 0, sub, subu64) +/* SUBTRACT LOCICAL HIGH */ + C(0xb9cb, SLHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subu32) + C(0xb9db, SLHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subu32) +/* SUBTRACT LOGICAL IMMEDIATE */ + C(0xc205, SLFI, RIL_a, EI, r1, i2_32u, new, r1_32, sub, subu32) + C(0xc204, SLGFI, RIL_a, EI, r1, i2_32u, r1, 0, sub, subu64) +/* SUBTRACT LOGICAL WITH BORROW */ + C(0xb999, SLBR, RRE, Z, r1, r2, new, r1_32, subb, subb32) + C(0xb989, SLBGR, RRE, Z, r1, r2, r1, 0, subb, subb64) + C(0xe399, SLB, RXY_a, Z, r1, m2_32u, new, r1_32, subb, subb32) + C(0xe389, SLBG, RXY_a, Z, r1, m2_64, r1, 0, subb, subb64) + +/* SUPERVISOR CALL */ + C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0) + +/* TEST ADDRESSING MODE */ + C(0x010b, TAM, E, Z, 0, 0, 0, 0, tam, 0) + +/* TEST AND SET */ + C(0x9300, TS, S, Z, 0, a2, 0, 0, ts, 0) + +/* TEST DATA CLASS */ + F(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0, IF_BFP) + F(0xed11, TCDB, RXE, Z, f1, a2, 0, 0, tcdb, 0, IF_BFP) + F(0xed12, TCXB, RXE, Z, 0, a2, x1, 0, tcxb, 0, IF_BFP) + +/* TEST DECIMAL */ + C(0xebc0, TP, RSL, E2, la1, 0, 0, 0, tp, 0) + +/* TEST UNDER MASK */ + C(0x9100, TM, SI, Z, m1_8u, i2_8u, 0, 0, 0, tm32) + C(0xeb51, TMY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, tm32) + D(0xa702, TMHH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 48) + D(0xa703, TMHL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 32) + D(0xa700, TMLH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 16) + D(0xa701, TMLL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 0) + +/* TRANSLATE */ + C(0xdc00, TR, SS_a, Z, la1, a2, 0, 0, tr, 0) +/* TRANSLATE AND TEST */ + C(0xdd00, TRT, SS_a, Z, la1, a2, 0, 0, trt, 0) +/* TRANSLATE AND TEST REVERSE */ + C(0xd000, TRTR, SS_a, ETF3, la1, a2, 0, 0, trtr, 0) +/* TRANSLATE EXTENDED */ + C(0xb2a5, TRE, RRE, Z, 0, r2, r1_P, 0, tre, 0) + +/* TRANSLATE ONE TO ONE */ + C(0xb993, TROO, RRF_c, E2, 0, 0, 0, 0, trXX, 0) +/* TRANSLATE ONE TO TWO */ + C(0xb992, TROT, RRF_c, E2, 0, 0, 0, 0, trXX, 0) +/* TRANSLATE TWO TO ONE */ + C(0xb991, TRTO, RRF_c, E2, 0, 0, 0, 0, trXX, 0) +/* TRANSLATE TWO TO TWO */ + C(0xb990, TRTT, RRF_c, E2, 0, 0, 0, 0, trXX, 0) + +/* UNPACK */ + /* Really format SS_b, but we pack both lengths into one argument + for the helper call, so we might as well leave one 8-bit field. */ + C(0xf300, UNPK, SS_a, Z, la1, a2, 0, 0, unpk, 0) +/* UNPACK ASCII */ + C(0xea00, UNPKA, SS_a, E2, la1, a2, 0, 0, unpka, 0) +/* UNPACK UNICODE */ + C(0xe200, UNPKU, SS_a, E2, la1, a2, 0, 0, unpku, 0) + +/* MSA Instructions */ + D(0xb91e, KMAC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMAC) + D(0xb928, PCKMO, RRE, MSA3, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCKMO) + D(0xb92a, KMF, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMF) + D(0xb92b, KMO, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMO) + D(0xb92c, PCC, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCC) + D(0xb92d, KMCTR, RRF_b, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMCTR) + D(0xb92e, KM, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KM) + D(0xb92f, KMC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMC) + D(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO) + D(0xb93e, KIMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KIMD) + D(0xb93f, KLMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KLMD) + +/* === Vector Support Instructions === */ + +/* VECTOR GATHER ELEMENT */ + E(0xe713, VGEF, VRV, V, la2, 0, 0, 0, vge, 0, ES_32, IF_VEC) + E(0xe712, VGEG, VRV, V, la2, 0, 0, 0, vge, 0, ES_64, IF_VEC) +/* VECTOR GENERATE BYTE MASK */ + F(0xe744, VGBM, VRI_a, V, 0, 0, 0, 0, vgbm, 0, IF_VEC) +/* VECTOR GENERATE MASK */ + F(0xe746, VGM, VRI_b, V, 0, 0, 0, 0, vgm, 0, IF_VEC) +/* VECTOR LOAD */ + F(0xe706, VL, VRX, V, la2, 0, 0, 0, vl, 0, IF_VEC) + F(0xe756, VLR, VRR_a, V, 0, 0, 0, 0, vlr, 0, IF_VEC) +/* VECTOR LOAD AND REPLICATE */ + F(0xe705, VLREP, VRX, V, la2, 0, 0, 0, vlrep, 0, IF_VEC) +/* VECTOR LOAD ELEMENT */ + E(0xe700, VLEB, VRX, V, la2, 0, 0, 0, vle, 0, ES_8, IF_VEC) + E(0xe701, VLEH, VRX, V, la2, 0, 0, 0, vle, 0, ES_16, IF_VEC) + E(0xe703, VLEF, VRX, V, la2, 0, 0, 0, vle, 0, ES_32, IF_VEC) + E(0xe702, VLEG, VRX, V, la2, 0, 0, 0, vle, 0, ES_64, IF_VEC) +/* VECTOR LOAD ELEMENT IMMEDIATE */ + E(0xe740, VLEIB, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_8, IF_VEC) + E(0xe741, VLEIH, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_16, IF_VEC) + E(0xe743, VLEIF, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_32, IF_VEC) + E(0xe742, VLEIG, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_64, IF_VEC) +/* VECTOR LOAD GR FROM VR ELEMENT */ + F(0xe721, VLGV, VRS_c, V, la2, 0, r1, 0, vlgv, 0, IF_VEC) +/* VECTOR LOAD LOGICAL ELEMENT AND ZERO */ + F(0xe704, VLLEZ, VRX, V, la2, 0, 0, 0, vllez, 0, IF_VEC) +/* VECTOR LOAD MULTIPLE */ + F(0xe736, VLM, VRS_a, V, la2, 0, 0, 0, vlm, 0, IF_VEC) +/* VECTOR LOAD TO BLOCK BOUNDARY */ + F(0xe707, VLBB, VRX, V, la2, 0, 0, 0, vlbb, 0, IF_VEC) +/* VECTOR LOAD VR ELEMENT FROM GR */ + F(0xe722, VLVG, VRS_b, V, la2, r3, 0, 0, vlvg, 0, IF_VEC) +/* VECTOR LOAD VR FROM GRS DISJOINT */ + F(0xe762, VLVGP, VRR_f, V, r2, r3, 0, 0, vlvgp, 0, IF_VEC) +/* VECTOR LOAD WITH LENGTH */ + F(0xe737, VLL, VRS_b, V, la2, r3_32u, 0, 0, vll, 0, IF_VEC) +/* VECTOR MERGE HIGH */ + F(0xe761, VMRH, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC) +/* VECTOR MERGE LOW */ + F(0xe760, VMRL, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC) +/* VECTOR PACK */ + F(0xe794, VPK, VRR_c, V, 0, 0, 0, 0, vpk, 0, IF_VEC) +/* VECTOR PACK SATURATE */ + F(0xe797, VPKS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC) +/* VECTOR PACK LOGICAL SATURATE */ + F(0xe795, VPKLS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC) + F(0xe78c, VPERM, VRR_e, V, 0, 0, 0, 0, vperm, 0, IF_VEC) +/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ + F(0xe784, VPDI, VRR_c, V, 0, 0, 0, 0, vpdi, 0, IF_VEC) +/* VECTOR REPLICATE */ + F(0xe74d, VREP, VRI_c, V, 0, 0, 0, 0, vrep, 0, IF_VEC) +/* VECTOR REPLICATE IMMEDIATE */ + F(0xe745, VREPI, VRI_a, V, 0, 0, 0, 0, vrepi, 0, IF_VEC) +/* VECTOR SCATTER ELEMENT */ + E(0xe71b, VSCEF, VRV, V, la2, 0, 0, 0, vsce, 0, ES_32, IF_VEC) + E(0xe71a, VSCEG, VRV, V, la2, 0, 0, 0, vsce, 0, ES_64, IF_VEC) +/* VECTOR SELECT */ + F(0xe78d, VSEL, VRR_e, V, 0, 0, 0, 0, vsel, 0, IF_VEC) +/* VECTOR SIGN EXTEND TO DOUBLEWORD */ + F(0xe75f, VSEG, VRR_a, V, 0, 0, 0, 0, vseg, 0, IF_VEC) +/* VECTOR STORE */ + F(0xe70e, VST, VRX, V, la2, 0, 0, 0, vst, 0, IF_VEC) +/* VECTOR STORE ELEMENT */ + E(0xe708, VSTEB, VRX, V, la2, 0, 0, 0, vste, 0, ES_8, IF_VEC) + E(0xe709, VSTEH, VRX, V, la2, 0, 0, 0, vste, 0, ES_16, IF_VEC) + E(0xe70b, VSTEF, VRX, V, la2, 0, 0, 0, vste, 0, ES_32, IF_VEC) + E(0xe70a, VSTEG, VRX, V, la2, 0, 0, 0, vste, 0, ES_64, IF_VEC) +/* VECTOR STORE MULTIPLE */ + F(0xe73e, VSTM, VRS_a, V, la2, 0, 0, 0, vstm, 0, IF_VEC) +/* VECTOR STORE WITH LENGTH */ + F(0xe73f, VSTL, VRS_b, V, la2, r3_32u, 0, 0, vstl, 0, IF_VEC) +/* VECTOR UNPACK HIGH */ + F(0xe7d7, VUPH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) +/* VECTOR UNPACK LOGICAL HIGH */ + F(0xe7d5, VUPLH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) +/* VECTOR UNPACK LOW */ + F(0xe7d6, VUPL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) +/* VECTOR UNPACK LOGICAL LOW */ + F(0xe7d4, VUPLL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) + +/* === Vector Integer Instructions === */ + +/* VECTOR ADD */ + F(0xe7f3, VA, VRR_c, V, 0, 0, 0, 0, va, 0, IF_VEC) +/* VECTOR ADD COMPUTE CARRY */ + F(0xe7f1, VACC, VRR_c, V, 0, 0, 0, 0, vacc, 0, IF_VEC) +/* VECTOR ADD WITH CARRY */ + F(0xe7bb, VAC, VRR_d, V, 0, 0, 0, 0, vac, 0, IF_VEC) +/* VECTOR ADD WITH CARRY COMPUTE CARRY */ + F(0xe7b9, VACCC, VRR_d, V, 0, 0, 0, 0, vaccc, 0, IF_VEC) +/* VECTOR AND */ + F(0xe768, VN, VRR_c, V, 0, 0, 0, 0, vn, 0, IF_VEC) +/* VECTOR AND WITH COMPLEMENT */ + F(0xe769, VNC, VRR_c, V, 0, 0, 0, 0, vnc, 0, IF_VEC) +/* VECTOR AVERAGE */ + F(0xe7f2, VAVG, VRR_c, V, 0, 0, 0, 0, vavg, 0, IF_VEC) +/* VECTOR AVERAGE LOGICAL */ + F(0xe7f0, VAVGL, VRR_c, V, 0, 0, 0, 0, vavgl, 0, IF_VEC) +/* VECTOR CHECKSUM */ + F(0xe766, VCKSM, VRR_c, V, 0, 0, 0, 0, vcksm, 0, IF_VEC) +/* VECTOR ELEMENT COMPARE */ + F(0xe7db, VEC, VRR_a, V, 0, 0, 0, 0, vec, cmps64, IF_VEC) +/* VECTOR ELEMENT COMPARE LOGICAL */ + F(0xe7d9, VECL, VRR_a, V, 0, 0, 0, 0, vec, cmpu64, IF_VEC) +/* VECTOR COMPARE EQUAL */ + E(0xe7f8, VCEQ, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_EQ, IF_VEC) +/* VECTOR COMPARE HIGH */ + E(0xe7fb, VCH, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GT, IF_VEC) +/* VECTOR COMPARE HIGH LOGICAL */ + E(0xe7f9, VCHL, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GTU, IF_VEC) +/* VECTOR COUNT LEADING ZEROS */ + F(0xe753, VCLZ, VRR_a, V, 0, 0, 0, 0, vclz, 0, IF_VEC) +/* VECTOR COUNT TRAILING ZEROS */ + F(0xe752, VCTZ, VRR_a, V, 0, 0, 0, 0, vctz, 0, IF_VEC) +/* VECTOR EXCLUSIVE OR */ + F(0xe76d, VX, VRR_c, V, 0, 0, 0, 0, vx, 0, IF_VEC) +/* VECTOR GALOIS FIELD MULTIPLY SUM */ + F(0xe7b4, VGFM, VRR_c, V, 0, 0, 0, 0, vgfm, 0, IF_VEC) +/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ + F(0xe7bc, VGFMA, VRR_d, V, 0, 0, 0, 0, vgfma, 0, IF_VEC) +/* VECTOR LOAD COMPLEMENT */ + F(0xe7de, VLC, VRR_a, V, 0, 0, 0, 0, vlc, 0, IF_VEC) +/* VECTOR LOAD POSITIVE */ + F(0xe7df, VLP, VRR_a, V, 0, 0, 0, 0, vlp, 0, IF_VEC) +/* VECTOR MAXIMUM */ + F(0xe7ff, VMX, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +/* VECTOR MAXIMUM LOGICAL */ + F(0xe7fd, VMXL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +/* VECTOR MINIMUM */ + F(0xe7fe, VMN, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +/* VECTOR MINIMUM LOGICAL */ + F(0xe7fc, VMNL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD LOW */ + F(0xe7aa, VMAL, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD HIGH */ + F(0xe7ab, VMAH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD LOGICAL HIGH */ + F(0xe7a9, VMALH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD EVEN */ + F(0xe7ae, VMAE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD LOGICAL EVEN */ + F(0xe7ac, VMALE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD ODD */ + F(0xe7af, VMAO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY AND ADD LOGICAL ODD */ + F(0xe7ad, VMALO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +/* VECTOR MULTIPLY HIGH */ + F(0xe7a3, VMH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY LOGICAL HIGH */ + F(0xe7a1, VMLH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY LOW */ + F(0xe7a2, VML, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY EVEN */ + F(0xe7a6, VME, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY LOGICAL EVEN */ + F(0xe7a4, VMLE, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY ODD */ + F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR MULTIPLY LOGICAL ODD */ + F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +/* VECTOR NAND */ + F(0xe76e, VNN, VRR_c, VE, 0, 0, 0, 0, vnn, 0, IF_VEC) +/* VECTOR NOR */ + F(0xe76b, VNO, VRR_c, V, 0, 0, 0, 0, vno, 0, IF_VEC) +/* VECTOR NOT EXCLUSIVE OR */ + F(0xe76c, VNX, VRR_c, VE, 0, 0, 0, 0, vnx, 0, IF_VEC) +/* VECTOR OR */ + F(0xe76a, VO, VRR_c, V, 0, 0, 0, 0, vo, 0, IF_VEC) +/* VECTOR OR WITH COMPLEMENT */ + F(0xe76f, VOC, VRR_c, VE, 0, 0, 0, 0, voc, 0, IF_VEC) +/* VECTOR POPULATION COUNT */ + F(0xe750, VPOPCT, VRR_a, V, 0, 0, 0, 0, vpopct, 0, IF_VEC) +/* VECTOR ELEMENT ROTATE LEFT LOGICAL */ + F(0xe773, VERLLV, VRR_c, V, 0, 0, 0, 0, verllv, 0, IF_VEC) + F(0xe733, VERLL, VRS_a, V, la2, 0, 0, 0, verll, 0, IF_VEC) +/* VECTOR ELEMENT ROTATE AND INSERT UNDER MASK */ + F(0xe772, VERIM, VRI_d, V, 0, 0, 0, 0, verim, 0, IF_VEC) +/* VECTOR ELEMENT SHIFT LEFT */ + F(0xe770, VESLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) + F(0xe730, VESL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ + F(0xe77a, VESRAV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) + F(0xe73a, VESRA, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +/* VECTOR ELEMENT SHIFT RIGHT LOGICAL */ + F(0xe778, VESRLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) + F(0xe738, VESRL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +/* VECTOR SHIFT LEFT */ + F(0xe774, VSL, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) +/* VECTOR SHIFT LEFT BY BYTE */ + F(0xe775, VSLB, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) +/* VECTOR SHIFT LEFT DOUBLE BY BYTE */ + F(0xe777, VSLDB, VRI_d, V, 0, 0, 0, 0, vsldb, 0, IF_VEC) +/* VECTOR SHIFT RIGHT ARITHMETIC */ + F(0xe77e, VSRA, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) +/* VECTOR SHIFT RIGHT ARITHMETIC BY BYTE */ + F(0xe77f, VSRAB, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) +/* VECTOR SHIFT RIGHT LOGICAL */ + F(0xe77c, VSRL, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) +/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ + F(0xe77d, VSRLB, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) +/* VECTOR SUBTRACT */ + F(0xe7f7, VS, VRR_c, V, 0, 0, 0, 0, vs, 0, IF_VEC) +/* VECTOR SUBTRACT COMPUTE BORROW INDICATION */ + F(0xe7f5, VSCBI, VRR_c, V, 0, 0, 0, 0, vscbi, 0, IF_VEC) +/* VECTOR SUBTRACT WITH BORROW INDICATION */ + F(0xe7bf, VSBI, VRR_d, V, 0, 0, 0, 0, vsbi, 0, IF_VEC) +/* VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION */ + F(0xe7bd, VSBCBI, VRR_d, V, 0, 0, 0, 0, vsbcbi, 0, IF_VEC) +/* VECTOR SUM ACROSS DOUBLEWORD */ + F(0xe765, VSUMG, VRR_c, V, 0, 0, 0, 0, vsumg, 0, IF_VEC) +/* VECTOR SUM ACROSS QUADWORD */ + F(0xe767, VSUMQ, VRR_c, V, 0, 0, 0, 0, vsumq, 0, IF_VEC) +/* VECTOR SUM ACROSS WORD */ + F(0xe764, VSUM, VRR_c, V, 0, 0, 0, 0, vsum, 0, IF_VEC) +/* VECTOR TEST UNDER MASK */ + F(0xe7d8, VTM, VRR_a, V, 0, 0, 0, 0, vtm, 0, IF_VEC) + +/* === Vector String Instructions === */ + +/* VECTOR FIND ANY ELEMENT EQUAL */ + F(0xe782, VFAE, VRR_b, V, 0, 0, 0, 0, vfae, 0, IF_VEC) +/* VECTOR FIND ELEMENT EQUAL */ + F(0xe780, VFEE, VRR_b, V, 0, 0, 0, 0, vfee, 0, IF_VEC) +/* VECTOR FIND ELEMENT NOT EQUAL */ + F(0xe781, VFENE, VRR_b, V, 0, 0, 0, 0, vfene, 0, IF_VEC) +/* VECTOR ISOLATE STRING */ + F(0xe75c, VISTR, VRR_a, V, 0, 0, 0, 0, vistr, 0, IF_VEC) +/* VECTOR STRING RANGE COMPARE */ + F(0xe78a, VSTRC, VRR_d, V, 0, 0, 0, 0, vstrc, 0, IF_VEC) + +/* === Vector Floating-Point Instructions */ + +/* VECTOR FP ADD */ + F(0xe7e3, VFA, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) +/* VECTOR FP COMPARE SCALAR */ + F(0xe7cb, WFC, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC) +/* VECTOR FP COMPARE AND SIGNAL SCALAR */ + F(0xe7ca, WFK, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC) +/* VECTOR FP COMPARE EQUAL */ + F(0xe7e8, VFCE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) +/* VECTOR FP COMPARE HIGH */ + F(0xe7eb, VFCH, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) +/* VECTOR FP COMPARE HIGH OR EQUAL */ + F(0xe7ea, VFCHE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) +/* VECTOR FP CONVERT FROM FIXED 64-BIT */ + F(0xe7c3, VCDG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +/* VECTOR FP CONVERT FROM LOGICAL 64-BIT */ + F(0xe7c1, VCDLG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +/* VECTOR FP CONVERT TO FIXED 64-BIT */ + F(0xe7c2, VCGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +/* VECTOR FP CONVERT TO LOGICAL 64-BIT */ + F(0xe7c0, VCLGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +/* VECTOR FP DIVIDE */ + F(0xe7e5, VFD, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) +/* VECTOR LOAD FP INTEGER */ + F(0xe7c7, VFI, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +/* VECTOR LOAD LENGTHENED */ + F(0xe7c4, VFLL, VRR_a, V, 0, 0, 0, 0, vfll, 0, IF_VEC) +/* VECTOR LOAD ROUNDED */ + F(0xe7c5, VFLR, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +/* VECTOR FP MULTIPLY */ + F(0xe7e7, VFM, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) +/* VECTOR FP MULTIPLY AND ADD */ + F(0xe78f, VFMA, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) +/* VECTOR FP MULTIPLY AND SUBTRACT */ + F(0xe78e, VFMS, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) +/* VECTOR FP PERFORM SIGN OPERATION */ + F(0xe7cc, VFPSO, VRR_a, V, 0, 0, 0, 0, vfpso, 0, IF_VEC) +/* VECTOR FP SQUARE ROOT */ + F(0xe7ce, VFSQ, VRR_a, V, 0, 0, 0, 0, vfsq, 0, IF_VEC) +/* VECTOR FP SUBTRACT */ + F(0xe7e2, VFS, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) +/* VECTOR FP TEST DATA CLASS IMMEDIATE */ + F(0xe74a, VFTCI, VRI_e, V, 0, 0, 0, 0, vftci, 0, IF_VEC) + +#ifndef CONFIG_USER_ONLY +/* COMPARE AND SWAP AND PURGE */ + E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV) + E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV) +/* DIAGNOSE (KVM hypercall) */ + F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV) +/* INSERT STORAGE KEY EXTENDED */ + F(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0, IF_PRIV) +/* INVALIDATE DAT TABLE ENTRY */ + F(0xb98e, IPDE, RRF_b, Z, r1_o, r2_o, 0, 0, idte, 0, IF_PRIV) +/* INVALIDATE PAGE TABLE ENTRY */ + F(0xb221, IPTE, RRF_a, Z, r1_o, r2_o, 0, 0, ipte, 0, IF_PRIV) +/* LOAD CONTROL */ + F(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0, IF_PRIV) + F(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0, IF_PRIV) +/* LOAD PROGRAM PARAMETER */ + F(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0, IF_PRIV) +/* LOAD PSW */ + F(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0, IF_PRIV) +/* LOAD PSW EXTENDED */ + F(0xb2b2, LPSWE, S, Z, 0, a2, 0, 0, lpswe, 0, IF_PRIV) +/* LOAD REAL ADDRESS */ + F(0xb100, LRA, RX_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) + F(0xe313, LRAY, RXY_a, LD, 0, a2, r1, 0, lra, 0, IF_PRIV) + F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) +/* LOAD USING REAL ADDRESS */ + E(0xb24b, LURA, RRE, Z, 0, 0, new, r1_32, lura, 0, MO_TEUL, IF_PRIV) + E(0xb905, LURAG, RRE, Z, 0, 0, r1, 0, lura, 0, MO_TEQ, IF_PRIV) +/* MOVE TO PRIMARY */ + F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV) +/* MOVE TO SECONDARY */ + F(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0, IF_PRIV) +/* PURGE TLB */ + F(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0, IF_PRIV) +/* RESET REFERENCE BIT EXTENDED */ + F(0xb22a, RRBE, RRE, Z, 0, r2_o, 0, 0, rrbe, 0, IF_PRIV) +/* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */ + F(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0, IF_PRIV) +/* SET ADDRESS SPACE CONTROL FAST */ + F(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0, IF_PRIV) +/* SET CLOCK */ + F(0xb204, SCK, S, Z, la2, 0, 0, 0, sck, 0, IF_PRIV) +/* SET CLOCK COMPARATOR */ + F(0xb206, SCKC, S, Z, 0, m2_64a, 0, 0, sckc, 0, IF_PRIV) +/* SET CLOCK PROGRAMMABLE FIELD */ + F(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0, IF_PRIV) +/* SET CPU TIMER */ + F(0xb208, SPT, S, Z, 0, m2_64a, 0, 0, spt, 0, IF_PRIV) +/* SET PREFIX */ + F(0xb210, SPX, S, Z, 0, m2_32ua, 0, 0, spx, 0, IF_PRIV) +/* SET PSW KEY FROM ADDRESS */ + F(0xb20a, SPKA, S, Z, 0, a2, 0, 0, spka, 0, IF_PRIV) +/* SET STORAGE KEY EXTENDED */ + F(0xb22b, SSKE, RRF_c, Z, r1_o, r2_o, 0, 0, sske, 0, IF_PRIV) +/* SET SYSTEM MASK */ + F(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0, IF_PRIV) +/* SIGNAL PROCESSOR */ + F(0xae00, SIGP, RS_a, Z, 0, a2, 0, 0, sigp, 0, IF_PRIV) +/* STORE CLOCK COMPARATOR */ + F(0xb207, STCKC, S, Z, la2, 0, new, m1_64a, stckc, 0, IF_PRIV) +/* STORE CONTROL */ + F(0xb600, STCTL, RS_a, Z, 0, a2, 0, 0, stctl, 0, IF_PRIV) + F(0xeb25, STCTG, RSY_a, Z, 0, a2, 0, 0, stctg, 0, IF_PRIV) +/* STORE CPU ADDRESS */ + F(0xb212, STAP, S, Z, la2, 0, new, m1_16a, stap, 0, IF_PRIV) +/* STORE CPU ID */ + F(0xb202, STIDP, S, Z, la2, 0, new, m1_64a, stidp, 0, IF_PRIV) +/* STORE CPU TIMER */ + F(0xb209, STPT, S, Z, la2, 0, new, m1_64a, stpt, 0, IF_PRIV) +/* STORE FACILITY LIST */ + F(0xb2b1, STFL, S, Z, 0, 0, 0, 0, stfl, 0, IF_PRIV) +/* STORE PREFIX */ + F(0xb211, STPX, S, Z, la2, 0, new, m1_32a, stpx, 0, IF_PRIV) +/* STORE SYSTEM INFORMATION */ + F(0xb27d, STSI, S, Z, 0, a2, 0, 0, stsi, 0, IF_PRIV) +/* STORE THEN AND SYSTEM MASK */ + F(0xac00, STNSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) +/* STORE THEN OR SYSTEM MASK */ + F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) +/* STORE USING REAL ADDRESS */ + E(0xb246, STURA, RRE, Z, r1_o, 0, 0, 0, stura, 0, MO_TEUL, IF_PRIV) + E(0xb925, STURG, RRE, Z, r1_o, 0, 0, 0, stura, 0, MO_TEQ, IF_PRIV) +/* TEST BLOCK */ + F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV) +/* TEST PROTECTION */ + C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0) + +/* CCW I/O Instructions */ + F(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0, IF_PRIV) + F(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0, IF_PRIV) + F(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0, IF_PRIV) + F(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0, IF_PRIV) + F(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0, IF_PRIV) + F(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0, IF_PRIV) + F(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0, IF_PRIV) + F(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0, IF_PRIV) + F(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0, IF_PRIV) + F(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0, IF_PRIV) + F(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0, IF_PRIV) + F(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0, IF_PRIV) + F(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0, IF_PRIV) + F(0xb236, TPI , S, Z, la2, 0, 0, 0, tpi, 0, IF_PRIV) + F(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0, IF_PRIV) + /* ??? Not listed in PoO ninth edition, but there's a linux driver that + uses it: "A CHSC subchannel is usually present on LPAR only." */ + F(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0, IF_PRIV) + +/* zPCI Instructions */ + /* None of these instructions are documented in the PoP, so this is all + based upon target/s390x/kvm.c and Linux code and likely incomplete */ + F(0xebd0, PCISTB, RSY_a, PCI, la2, 0, 0, 0, pcistb, 0, IF_PRIV) + F(0xebd1, SIC, RSY_a, AIS, r1, r3, 0, 0, sic, 0, IF_PRIV) + F(0xb9a0, CLP, RRF_c, PCI, 0, 0, 0, 0, clp, 0, IF_PRIV) + F(0xb9d0, PCISTG, RRE, PCI, 0, 0, 0, 0, pcistg, 0, IF_PRIV) + F(0xb9d2, PCILG, RRE, PCI, 0, 0, 0, 0, pcilg, 0, IF_PRIV) + F(0xb9d3, RPCIT, RRE, PCI, 0, 0, 0, 0, rpcit, 0, IF_PRIV) + F(0xe3d0, MPCIFC, RXY_a, PCI, la2, 0, 0, 0, mpcifc, 0, IF_PRIV) + F(0xe3d4, STPCIFC, RXY_a, PCI, la2, 0, 0, 0, stpcifc, 0, IF_PRIV) + +#endif /* CONFIG_USER_ONLY */ diff --git a/qemu/target/s390x/insn-format.def b/qemu/target/s390x/insn-format.def new file mode 100644 index 00000000..6253edbd --- /dev/null +++ b/qemu/target/s390x/insn-format.def @@ -0,0 +1,81 @@ +/* Description of s390 insn formats. */ +/* NAME F1, F2... */ +F0(E) +F1(I, I(1, 8, 8)) +F2(RI_a, R(1, 8), I(2,16,16)) +F2(RI_b, R(1, 8), I(2,16,16)) +F2(RI_c, M(1, 8), I(2,16,16)) +F3(RIE_a, R(1, 8), I(2,16,16), M(3,32)) +F4(RIE_b, R(1, 8), R(2,12), M(3,32), I(4,16,16)) +F4(RIE_c, R(1, 8), I(2,32, 8), M(3,12), I(4,16,16)) +F3(RIE_d, R(1, 8), I(2,16,16), R(3,12)) +F3(RIE_e, R(1, 8), I(2,16,16), R(3,12)) +F5(RIE_f, R(1, 8), R(2,12), I(3,16,8), I(4,24,8), I(5,32,8)) +F3(RIE_g, R(1, 8), I(2,16,16), M(3,12)) +F2(RIL_a, R(1, 8), I(2,16,32)) +F2(RIL_b, R(1, 8), I(2,16,32)) +F2(RIL_c, M(1, 8), I(2,16,32)) +F4(RIS, R(1, 8), I(2,32, 8), M(3,12), BD(4,16,20)) +/* ??? The PoO does not call out subtypes _a and _b for RR, as it does + for e.g. RX. Our checking requires this for e.g. BCR. */ +F2(RR_a, R(1, 8), R(2,12)) +F2(RR_b, M(1, 8), R(2,12)) +F2(RRE, R(1,24), R(2,28)) +F3(RRD, R(1,16), R(2,28), R(3,24)) +F4(RRF_a, R(1,24), R(2,28), R(3,16), M(4,20)) +F4(RRF_b, R(1,24), R(2,28), R(3,16), M(4,20)) +F4(RRF_c, R(1,24), R(2,28), M(3,16), M(4,20)) +F4(RRF_d, R(1,24), R(2,28), M(3,16), M(4,20)) +F4(RRF_e, R(1,24), R(2,28), M(3,16), M(4,20)) +F4(RRS, R(1, 8), R(2,12), M(3,32), BD(4,16,20)) +F3(RS_a, R(1, 8), BD(2,16,20), R(3,12)) +F3(RS_b, R(1, 8), BD(2,16,20), M(3,12)) +F3(RSI, R(1, 8), I(2,16,16), R(3,12)) +F2(RSL, L(1, 8, 4), BD(1,16,20)) +F3(RSY_a, R(1, 8), BDL(2), R(3,12)) +F3(RSY_b, R(1, 8), BDL(2), M(3,12)) +F2(RX_a, R(1, 8), BXD(2)) +F2(RX_b, M(1, 8), BXD(2)) +F3(RXE, R(1, 8), BXD(2), M(3,32)) +F3(RXF, R(1,32), BXD(2), R(3, 8)) +F2(RXY_a, R(1, 8), BXDL(2)) +F2(RXY_b, M(1, 8), BXDL(2)) +F1(S, BD(2,16,20)) +F2(SI, BD(1,16,20), I(2,8,8)) +F2(SIL, BD(1,16,20), I(2,32,16)) +F2(SIY, BDL(1), I(2, 8, 8)) +F3(SS_a, L(1, 8, 8), BD(1,16,20), BD(2,32,36)) +F4(SS_b, L(1, 8, 4), BD(1,16,20), L(2,12,4), BD(2,32,36)) +F4(SS_c, L(1, 8, 4), BD(1,16,20), BD(2,32,36), I(3,12, 4)) +/* ??? Odd man out. The L1 field here is really a register, but the + easy way to compress the fields has R1 and B1 overlap. */ +F4(SS_d, L(1, 8, 4), BD(1,16,20), BD(2,32,36), R(3,12)) +F4(SS_e, R(1, 8), BD(2,16,20), R(3,12), BD(4,32,36)) +F3(SS_f, BD(1,16,20), L(2,8,8), BD(2,32,36)) +F2(SSE, BD(1,16,20), BD(2,32,36)) +F3(SSF, BD(1,16,20), BD(2,32,36), R(3,8)) +F3(VRI_a, V(1,8), I(2,16,16), M(3,32)) +F4(VRI_b, V(1,8), I(2,16,8), I(3,24,8), M(4,32)) +F4(VRI_c, V(1,8), V(3,12), I(2,16,16), M(4,32)) +F5(VRI_d, V(1,8), V(2,12), V(3,16), I(4,24,8), M(5,32)) +F5(VRI_e, V(1,8), V(2,12), I(3,16,12), M(5,28), M(4,32)) +F5(VRI_f, V(1,8), V(2,12), V(3,16), M(5,24), I(4,28,8)) +F5(VRI_g, V(1,8), V(2,12), I(4,16,8), M(5,24), I(3,28,8)) +F3(VRI_h, V(1,8), I(2,16,16), I(3,32,4)) +F4(VRI_i, V(1,8), R(2,12), M(4,24), I(3,28,8)) +F5(VRR_a, V(1,8), V(2,12), M(5,24), M(4,28), M(3,32)) +F5(VRR_b, V(1,8), V(2,12), V(3,16), M(5,24), M(4,32)) +F6(VRR_c, V(1,8), V(2,12), V(3,16), M(6,24), M(5,28), M(4,32)) +F6(VRR_d, V(1,8), V(2,12), V(3,16), M(5,20), M(6,24), V(4,32)) +F6(VRR_e, V(1,8), V(2,12), V(3,16), M(6,20), M(5,28), V(4,32)) +F3(VRR_f, V(1,8), R(2,12), R(3,16)) +F1(VRR_g, V(1,12)) +F3(VRR_h, V(1,12), V(2,16), M(3,24)) +F3(VRR_i, R(1,8), V(2,12), M(3,24)) +F4(VRS_a, V(1,8), V(3,12), BD(2,16,20), M(4,32)) +F4(VRS_b, V(1,8), R(3,12), BD(2,16,20), M(4,32)) +F4(VRS_c, R(1,8), V(3,12), BD(2,16,20), M(4,32)) +F3(VRS_d, R(3,12), BD(2,16,20), V(1,32)) +F4(VRV, V(1,8), V(2,12), BD(2,16,20), M(3,32)) +F3(VRX, V(1,8), BXD(2), M(3,32)) +F3(VSI, I(3,8,8), BD(2,16,20), V(1,32)) diff --git a/qemu/target/s390x/int_helper.c b/qemu/target/s390x/int_helper.c new file mode 100644 index 00000000..658507dd --- /dev/null +++ b/qemu/target/s390x/int_helper.c @@ -0,0 +1,148 @@ +/* + * S/390 integer helper routines + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2009 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "tcg_s390x.h" +#include "exec/exec-all.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" + +/* #define DEBUG_HELPER */ +#ifdef DEBUG_HELPER +#define HELPER_LOG(x...) qemu_log(x) +#else +#define HELPER_LOG(x...) +#endif + +/* 64/32 -> 32 signed division */ +int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64) +{ + int32_t ret, b = b64; + int64_t q; + + if (b == 0) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + + ret = q = a / b; + env->retxl = a % b; + + /* Catch non-representable quotient. */ + if (ret != q) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + + return ret; +} + +/* 64/32 -> 32 unsigned division */ +uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64) +{ + uint32_t ret, b = b64; + uint64_t q; + + if (b == 0) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + + ret = q = a / b; + env->retxl = a % b; + + /* Catch non-representable quotient. */ + if (ret != q) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + + return ret; +} + +/* 64/64 -> 64 signed division */ +int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b) +{ + /* Catch divide by zero, and non-representable quotient (MIN / -1). */ + if (b == 0 || (b == -1 && a == (1ll << 63))) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + env->retxl = a % b; + return a / b; +} + +/* 128 -> 64/64 unsigned division */ +uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al, + uint64_t b) +{ + uint64_t ret; + /* Signal divide by zero. */ + if (b == 0) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } + if (ah == 0) { + /* 64 -> 64/64 case */ + env->retxl = al % b; + ret = al / b; + } else { + /* ??? Move i386 idivq helper to host-utils. */ +#ifdef CONFIG_INT128 + __uint128_t a = ((__uint128_t)ah << 64) | al; + __uint128_t q = a / b; + env->retxl = a % b; + ret = q; + if (ret != q) { + tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); + } +#else + /* 32-bit hosts would need special wrapper functionality - just abort if + we encounter such a case; it's very unlikely anyways. */ + cpu_abort(env_cpu(env), "128 -> 64/64 division not implemented\n"); +#endif + } + return ret; +} + +uint64_t HELPER(cvd)(int32_t reg) +{ + /* positive 0 */ + uint64_t dec = 0x0c; + int64_t bin = reg; + int shift; + + if (bin < 0) { + bin = -bin; + dec = 0x0d; + } + + for (shift = 4; (shift < 64) && bin; shift += 4) { + dec |= (bin % 10) << shift; + bin /= 10; + } + + return dec; +} + +uint64_t HELPER(popcnt)(uint64_t val) +{ + /* Note that we don't fold past bytes. */ + val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); + val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL; + return val; +} diff --git a/qemu/target/s390x/internal.h b/qemu/target/s390x/internal.h new file mode 100644 index 00000000..fc497589 --- /dev/null +++ b/qemu/target/s390x/internal.h @@ -0,0 +1,366 @@ +/* + * s390x internal definitions and helpers + * + * Copyright (c) 2009 Ulrich Hecht + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef S390X_INTERNAL_H +#define S390X_INTERNAL_H + +#include "cpu.h" + +#ifndef CONFIG_USER_ONLY +typedef struct LowCore { + /* prefix area: defined by architecture */ + uint32_t ccw1[2]; /* 0x000 */ + uint32_t ccw2[4]; /* 0x008 */ + uint8_t pad1[0x80 - 0x18]; /* 0x018 */ + uint32_t ext_params; /* 0x080 */ + uint16_t cpu_addr; /* 0x084 */ + uint16_t ext_int_code; /* 0x086 */ + uint16_t svc_ilen; /* 0x088 */ + uint16_t svc_code; /* 0x08a */ + uint16_t pgm_ilen; /* 0x08c */ + uint16_t pgm_code; /* 0x08e */ + uint32_t data_exc_code; /* 0x090 */ + uint16_t mon_class_num; /* 0x094 */ + uint16_t per_perc_atmid; /* 0x096 */ + uint64_t per_address; /* 0x098 */ + uint8_t exc_access_id; /* 0x0a0 */ + uint8_t per_access_id; /* 0x0a1 */ + uint8_t op_access_id; /* 0x0a2 */ + uint8_t ar_access_id; /* 0x0a3 */ + uint8_t pad2[0xA8 - 0xA4]; /* 0x0a4 */ + uint64_t trans_exc_code; /* 0x0a8 */ + uint64_t monitor_code; /* 0x0b0 */ + uint16_t subchannel_id; /* 0x0b8 */ + uint16_t subchannel_nr; /* 0x0ba */ + uint32_t io_int_parm; /* 0x0bc */ + uint32_t io_int_word; /* 0x0c0 */ + uint8_t pad3[0xc8 - 0xc4]; /* 0x0c4 */ + uint32_t stfl_fac_list; /* 0x0c8 */ + uint8_t pad4[0xe8 - 0xcc]; /* 0x0cc */ + uint64_t mcic; /* 0x0e8 */ + uint8_t pad5[0xf4 - 0xf0]; /* 0x0f0 */ + uint32_t external_damage_code; /* 0x0f4 */ + uint64_t failing_storage_address; /* 0x0f8 */ + uint8_t pad6[0x110 - 0x100]; /* 0x100 */ + uint64_t per_breaking_event_addr; /* 0x110 */ + uint8_t pad7[0x120 - 0x118]; /* 0x118 */ + PSW restart_old_psw; /* 0x120 */ + PSW external_old_psw; /* 0x130 */ + PSW svc_old_psw; /* 0x140 */ + PSW program_old_psw; /* 0x150 */ + PSW mcck_old_psw; /* 0x160 */ + PSW io_old_psw; /* 0x170 */ + uint8_t pad8[0x1a0 - 0x180]; /* 0x180 */ + PSW restart_new_psw; /* 0x1a0 */ + PSW external_new_psw; /* 0x1b0 */ + PSW svc_new_psw; /* 0x1c0 */ + PSW program_new_psw; /* 0x1d0 */ + PSW mcck_new_psw; /* 0x1e0 */ + PSW io_new_psw; /* 0x1f0 */ + uint8_t pad13[0x11b0 - 0x200]; /* 0x200 */ + + uint64_t mcesad; /* 0x11B0 */ + + /* 64 bit extparam used for pfault, diag 250 etc */ + uint64_t ext_params2; /* 0x11B8 */ + + uint8_t pad14[0x1200 - 0x11C0]; /* 0x11C0 */ + + /* System info area */ + + uint64_t floating_pt_save_area[16]; /* 0x1200 */ + uint64_t gpregs_save_area[16]; /* 0x1280 */ + uint32_t st_status_fixed_logout[4]; /* 0x1300 */ + uint8_t pad15[0x1318 - 0x1310]; /* 0x1310 */ + uint32_t prefixreg_save_area; /* 0x1318 */ + uint32_t fpt_creg_save_area; /* 0x131c */ + uint8_t pad16[0x1324 - 0x1320]; /* 0x1320 */ + uint32_t tod_progreg_save_area; /* 0x1324 */ + uint64_t cpu_timer_save_area; /* 0x1328 */ + uint64_t clock_comp_save_area; /* 0x1330 */ + uint8_t pad17[0x1340 - 0x1338]; /* 0x1338 */ + uint32_t access_regs_save_area[16]; /* 0x1340 */ + uint64_t cregs_save_area[16]; /* 0x1380 */ + + /* align to the top of the prefix area */ + + uint8_t pad18[0x2000 - 0x1400]; /* 0x1400 */ +} QEMU_PACKED LowCore; +QEMU_BUILD_BUG_ON(sizeof(LowCore) != 8192); +#endif /* CONFIG_USER_ONLY */ + +#define MAX_ILEN 6 + +/* While the PoO talks about ILC (a number between 1-3) what is actually + stored in LowCore is shifted left one bit (an even between 2-6). As + this is the actual length of the insn and therefore more useful, that + is what we want to pass around and manipulate. To make sure that we + have applied this distinction universally, rename the "ILC" to "ILEN". */ +static inline int get_ilen(uint8_t opc) +{ + switch (opc >> 6) { + case 0: + return 2; + case 1: + case 2: + return 4; + default: + return 6; + } +} + +/* Compute the ATMID field that is stored in the per_perc_atmid lowcore + entry when a PER exception is triggered. */ +static inline uint8_t get_per_atmid(CPUS390XState *env) +{ + return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) | + (1 << 6) | + ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) | + ((env->psw.mask & PSW_MASK_DAT) ? (1 << 4) : 0) | + ((env->psw.mask & PSW_ASC_SECONDARY) ? (1 << 3) : 0) | + ((env->psw.mask & PSW_ASC_ACCREG) ? (1 << 2) : 0); +} + +static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a) +{ + if (!(env->psw.mask & PSW_MASK_64)) { + if (!(env->psw.mask & PSW_MASK_32)) { + /* 24-Bit mode */ + a &= 0x00ffffff; + } else { + /* 31-Bit mode */ + a &= 0x7fffffff; + } + } + return a; +} + +/* CC optimization */ + +/* Instead of computing the condition codes after each x86 instruction, + * QEMU just stores the result (called CC_DST), the type of operation + * (called CC_OP) and whatever operands are needed (CC_SRC and possibly + * CC_VR). When the condition codes are needed, the condition codes can + * be calculated using this information. Condition codes are not generated + * if they are only needed for conditional branches. + */ +enum cc_op { + CC_OP_CONST0 = 0, /* CC is 0 */ + CC_OP_CONST1, /* CC is 1 */ + CC_OP_CONST2, /* CC is 2 */ + CC_OP_CONST3, /* CC is 3 */ + + CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */ + CC_OP_STATIC, /* CC value is env->cc_op */ + + CC_OP_NZ, /* env->cc_dst != 0 */ + CC_OP_LTGT_32, /* signed less/greater than (32bit) */ + CC_OP_LTGT_64, /* signed less/greater than (64bit) */ + CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */ + CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */ + CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */ + CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */ + + CC_OP_ADD_64, /* overflow on add (64bit) */ + CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */ + CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */ + CC_OP_SUB_64, /* overflow on subtraction (64bit) */ + CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */ + CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */ + CC_OP_ABS_64, /* sign eval on abs (64bit) */ + CC_OP_NABS_64, /* sign eval on nabs (64bit) */ + + CC_OP_ADD_32, /* overflow on add (32bit) */ + CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */ + CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */ + CC_OP_SUB_32, /* overflow on subtraction (32bit) */ + CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */ + CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */ + CC_OP_ABS_32, /* sign eval on abs (64bit) */ + CC_OP_NABS_32, /* sign eval on nabs (64bit) */ + + CC_OP_COMP_32, /* complement */ + CC_OP_COMP_64, /* complement */ + + CC_OP_TM_32, /* test under mask (32bit) */ + CC_OP_TM_64, /* test under mask (64bit) */ + + CC_OP_NZ_F32, /* FP dst != 0 (32bit) */ + CC_OP_NZ_F64, /* FP dst != 0 (64bit) */ + CC_OP_NZ_F128, /* FP dst != 0 (128bit) */ + + CC_OP_ICM, /* insert characters under mask */ + CC_OP_SLA_32, /* Calculate shift left signed (32bit) */ + CC_OP_SLA_64, /* Calculate shift left signed (64bit) */ + CC_OP_FLOGR, /* find leftmost one */ + CC_OP_LCBB, /* load count to block boundary */ + CC_OP_VC, /* vector compare result */ + CC_OP_MAX +}; + +static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, + uint8_t *ar) +{ + hwaddr addr = 0; + uint8_t reg; + + reg = ipb >> 28; + if (reg > 0) { + addr = env->regs[reg]; + } + addr += (ipb >> 16) & 0xfff; + if (ar) { + *ar = reg; + } + + return addr; +} + +/* Base/displacement are at the same locations. */ +#define decode_basedisp_rs decode_basedisp_s + +/* cc_helper.c */ +const char *cc_name(enum cc_op cc_op); +void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr); +uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, + uint64_t vr); + + +/* cpu.c */ +unsigned int s390_cpu_halt(S390CPU *cpu); +void s390_cpu_unhalt(S390CPU *cpu); + + +/* cpu_models.c */ +void s390_cpu_model_register_props(CPUState *obj); +void s390_cpu_model_class_register_props(CPUClass *oc); +void s390_realize_cpu_model(CPUState *cs); +CPUClass *s390_cpu_class_by_name(const char *name); + + +/* excp_helper.c */ +void s390x_cpu_debug_excp_handler(CPUState *cs); +void s390_cpu_do_interrupt(CPUState *cpu); +bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req); +bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); + + +/* fpu_helper.c */ +uint32_t set_cc_nz_f32(float32 v); +uint32_t set_cc_nz_f64(float64 v); +uint32_t set_cc_nz_f128(float128 v); +#define S390_IEEE_MASK_INVALID 0x80 +#define S390_IEEE_MASK_DIVBYZERO 0x40 +#define S390_IEEE_MASK_OVERFLOW 0x20 +#define S390_IEEE_MASK_UNDERFLOW 0x10 +#define S390_IEEE_MASK_INEXACT 0x08 +#define S390_IEEE_MASK_QUANTUM 0x04 +uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); +int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); +void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); +int float_comp_to_cc(CPUS390XState *env, int float_compare); +uint16_t float32_dcmask(CPUS390XState *env, float32 f1); +uint16_t float64_dcmask(CPUS390XState *env, float64 f1); +uint16_t float128_dcmask(CPUS390XState *env, float128 f1); + + +/* gdbstub.c */ +int s390_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); +int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void s390_cpu_gdb_init(CPUState *cs); + + +/* helper.c */ +void s390_cpu_dump_state(CPUState *cpu, FILE *f, int flags); +hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr); +uint64_t get_psw_mask(CPUS390XState *env); +void s390_cpu_recompute_watchpoints(CPUState *cs); +void s390x_tod_timer(void *opaque); +void s390x_cpu_timer(void *opaque); +void do_restart_interrupt(CPUS390XState *env); +void s390_handle_wait(S390CPU *cpu); +#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area) +int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch); +int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len); +#ifndef CONFIG_USER_ONLY +LowCore *cpu_map_lowcore(CPUS390XState *env); +void cpu_unmap_lowcore(CPUS390XState *env, LowCore *lowcore); +#endif /* CONFIG_USER_ONLY */ + + +/* interrupt.c */ +void trigger_pgm_exception(CPUS390XState *env, uint32_t code); +void cpu_inject_clock_comparator(S390CPU *cpu); +void cpu_inject_cpu_timer(S390CPU *cpu); +void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr); +int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr); +bool s390_cpu_has_io_int(S390CPU *cpu); +bool s390_cpu_has_ext_int(S390CPU *cpu); +bool s390_cpu_has_mcck_int(S390CPU *cpu); +bool s390_cpu_has_int(S390CPU *cpu); +bool s390_cpu_has_restart_int(S390CPU *cpu); +bool s390_cpu_has_stop_int(S390CPU *cpu); +void cpu_inject_restart(S390CPU *cpu); +void cpu_inject_stop(S390CPU *cpu); + + +/* ioinst.c */ +void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, + uintptr_t ra); +void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, + uintptr_t ra); +void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra); +void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, + uintptr_t ra); +int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra); +void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra); +void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2, + uint32_t ipb, uintptr_t ra); +void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra); + + +/* mem_helper.c */ +target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr); +void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, + uintptr_t ra); + + +/* mmu_helper.c */ +int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, + target_ulong *raddr, int *flags, uint64_t *tec); +int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, + target_ulong *addr, int *flags, uint64_t *tec); + + +/* misc_helper.c */ +int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3); +void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, + uintptr_t ra); + + +/* translate.c */ +void s390x_translate_init(struct uc_struct *uc); + + +/* sigp.c */ +int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3); +void do_stop_interrupt(CPUS390XState *env); + +#endif /* S390X_INTERNAL_H */ diff --git a/qemu/target/s390x/interrupt.c b/qemu/target/s390x/interrupt.c new file mode 100644 index 00000000..877e5d88 --- /dev/null +++ b/qemu/target/s390x/interrupt.c @@ -0,0 +1,233 @@ +/* + * QEMU S/390 Interrupt support + * + * Copyright IBM Corp. 2012, 2014 + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at your + * option) any later version. See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" +#include "sysemu/tcg.h" +#include "hw/s390x/ioinst.h" +#include "tcg_s390x.h" +//#include "hw/s390x/s390_flic.h" + +/* Ensure to exit the TB after this call! */ +void trigger_pgm_exception(CPUS390XState *env, uint32_t code) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = EXCP_PGM; + env->int_pgm_code = code; + /* env->int_pgm_ilen is already set, or will be set during unwinding */ +} + +void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra) +{ + tcg_s390_program_interrupt(env, code, ra); +} + +void cpu_inject_clock_comparator(S390CPU *cpu) +{ + CPUS390XState *env = &cpu->env; + + env->pending_int |= INTERRUPT_EXT_CLOCK_COMPARATOR; + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); +} + +void cpu_inject_cpu_timer(S390CPU *cpu) +{ + CPUS390XState *env = &cpu->env; + + env->pending_int |= INTERRUPT_EXT_CPU_TIMER; + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); +} + +void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr) +{ + CPUS390XState *env = &cpu->env; + + g_assert(src_cpu_addr < S390_MAX_CPUS); + set_bit(src_cpu_addr, env->emergency_signals); + + env->pending_int |= INTERRUPT_EMERGENCY_SIGNAL; + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); +} + +int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr) +{ + CPUS390XState *env = &cpu->env; + + g_assert(src_cpu_addr < S390_MAX_CPUS); + if (env->pending_int & INTERRUPT_EXTERNAL_CALL) { + return -EBUSY; + } + env->external_call_addr = src_cpu_addr; + + env->pending_int |= INTERRUPT_EXTERNAL_CALL; + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); + return 0; +} + +void cpu_inject_restart(S390CPU *cpu) +{ + CPUS390XState *env = &cpu->env; + + env->pending_int |= INTERRUPT_RESTART; + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); +} + +void cpu_inject_stop(S390CPU *cpu) +{ + CPUS390XState *env = &cpu->env; + + env->pending_int |= INTERRUPT_STOP; + cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); +} + +/* + * All of the following interrupts are floating, i.e. not per-vcpu. + * We just need a dummy cpustate in order to be able to inject in the + * non-kvm case. + */ +void s390_sclp_extint(uint32_t parm) +{ +#if 0 + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + + fsc->inject_service(fs, parm); +#endif +} + +void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr, + uint32_t io_int_parm, uint32_t io_int_word) +{ +#if 0 + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + + fsc->inject_io(fs, subchannel_id, subchannel_nr, io_int_parm, io_int_word); +#endif +} + +void s390_crw_mchk(void) +{ +#if 0 + S390FLICState *fs = s390_get_flic(); + S390FLICStateClass *fsc = s390_get_flic_class(fs); + + fsc->inject_crw_mchk(fs); +#endif +} + +bool s390_cpu_has_mcck_int(S390CPU *cpu) +{ +#if 0 + QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); + CPUS390XState *env = &cpu->env; + + if (!(env->psw.mask & PSW_MASK_MCHECK)) { + return false; + } + + /* for now we only support channel report machine checks (floating) */ + if (qemu_s390_flic_has_crw_mchk(flic) && + (env->cregs[14] & CR14_CHANNEL_REPORT_SC)) { + return true; + } +#endif + + return false; +} + +bool s390_cpu_has_ext_int(S390CPU *cpu) +{ +#if 0 + QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); + CPUS390XState *env = &cpu->env; + + if (!(env->psw.mask & PSW_MASK_EXT)) { + return false; + } + + if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) && + (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) { + return true; + } + + if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && + (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { + return true; + } + + if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && + (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { + return true; + } + + if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) && + (env->cregs[0] & CR0_CKC_SC)) { + return true; + } + + if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) && + (env->cregs[0] & CR0_CPU_TIMER_SC)) { + return true; + } + + if (qemu_s390_flic_has_service(flic) && + (env->cregs[0] & CR0_SERVICE_SC)) { + return true; + } +#endif + + return false; +} + +bool s390_cpu_has_io_int(S390CPU *cpu) +{ +#if 0 + QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); + CPUS390XState *env = &cpu->env; + + if (!(env->psw.mask & PSW_MASK_IO)) { + return false; + } + + return qemu_s390_flic_has_io(flic, env->cregs[6]); +#endif + + return false; +} + +bool s390_cpu_has_restart_int(S390CPU *cpu) +{ + return false; +#if 0 + CPUS390XState *env = &cpu->env; + + return env->pending_int & INTERRUPT_RESTART; +#endif +} + +bool s390_cpu_has_stop_int(S390CPU *cpu) +{ + CPUS390XState *env = &cpu->env; + + return env->pending_int & INTERRUPT_STOP; +} + +bool s390_cpu_has_int(S390CPU *cpu) +{ + return s390_cpu_has_mcck_int(cpu) || + s390_cpu_has_ext_int(cpu) || + s390_cpu_has_io_int(cpu) || + s390_cpu_has_restart_int(cpu) || + s390_cpu_has_stop_int(cpu); +} diff --git a/qemu/target/s390x/ioinst.c b/qemu/target/s390x/ioinst.c new file mode 100644 index 00000000..9466411a --- /dev/null +++ b/qemu/target/s390x/ioinst.c @@ -0,0 +1,788 @@ +/* + * I/O instructions for S/390 + * + * Copyright 2012, 2015 IBM Corp. + * Author(s): Cornelia Huck + * + * This work is licensed under the terms of the GNU GPL, version 2 or (at + * your option) any later version. See the COPYING file in the top-level + * directory. + */ + +#include "qemu/osdep.h" + +#include "cpu.h" +#include "internal.h" +#include "hw/s390x/ioinst.h" +//#include "hw/s390x/s390-pci-bus.h" + +int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid, + int *schid) +{ + if (!IOINST_SCHID_ONE(value)) { + return -EINVAL; + } + if (!IOINST_SCHID_M(value)) { + if (IOINST_SCHID_CSSID(value)) { + return -EINVAL; + } + *cssid = 0; + *m = 0; + } else { + *cssid = IOINST_SCHID_CSSID(value); + *m = 1; + } + *ssid = IOINST_SCHID_SSID(value); + *schid = IOINST_SCHID_NR(value); + return 0; +} + +void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) +{ +#if 0 + int cssid, ssid, schid, m; + SubchDev *sch; + + if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { + s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); + return; + } + trace_ioinst_sch_id("xsch", cssid, ssid, schid); + sch = css_find_subch(m, cssid, ssid, schid); + if (!sch || !css_subch_visible(sch)) { + setcc(cpu, 3); + return; + } + setcc(cpu, css_do_xsch(sch)); +#endif +} + +void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) +{ +#if 0 + int cssid, ssid, schid, m; + SubchDev *sch; + + if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { + s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); + return; + } + trace_ioinst_sch_id("csch", cssid, ssid, schid); + sch = css_find_subch(m, cssid, ssid, schid); + if (!sch || !css_subch_visible(sch)) { + setcc(cpu, 3); + return; + } + setcc(cpu, css_do_csch(sch)); +#endif +} + +void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) +{ +#if 0 + int cssid, ssid, schid, m; + SubchDev *sch; + + if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { + s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); + return; + } + trace_ioinst_sch_id("hsch", cssid, ssid, schid); + sch = css_find_subch(m, cssid, ssid, schid); + if (!sch || !css_subch_visible(sch)) { + setcc(cpu, 3); + return; + } + setcc(cpu, css_do_hsch(sch)); +#endif +} + +static int ioinst_schib_valid(SCHIB *schib) +{ + if ((be16_to_cpu(schib->pmcw.flags) & PMCW_FLAGS_MASK_INVALID) || + (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_INVALID)) { + return 0; + } + /* Disallow extended measurements for now. */ + if (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_XMWME) { + return 0; + } + return 1; +} + +void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) +{ +#if 0 + int cssid, ssid, schid, m; + SubchDev *sch; + SCHIB schib; + uint64_t addr; + CPUS390XState *env = &cpu->env; + uint8_t ar; + + addr = decode_basedisp_s(env, ipb, &ar); + if (addr & 3) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return; + } + if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return; + } + if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || + !ioinst_schib_valid(&schib)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return; + } + trace_ioinst_sch_id("msch", cssid, ssid, schid); + sch = css_find_subch(m, cssid, ssid, schid); + if (!sch || !css_subch_visible(sch)) { + setcc(cpu, 3); + return; + } + setcc(cpu, css_do_msch(sch, &schib)); +#endif +} + +static void copy_orb_from_guest(ORB *dest, const ORB *src) +{ + dest->intparm = be32_to_cpu(src->intparm); + dest->ctrl0 = be16_to_cpu(src->ctrl0); + dest->lpm = src->lpm; + dest->ctrl1 = src->ctrl1; + dest->cpa = be32_to_cpu(src->cpa); +} + +static int ioinst_orb_valid(ORB *orb) +{ + if ((orb->ctrl0 & ORB_CTRL0_MASK_INVALID) || + (orb->ctrl1 & ORB_CTRL1_MASK_INVALID)) { + return 0; + } + /* We don't support MIDA. */ + if (orb->ctrl1 & ORB_CTRL1_MASK_MIDAW) { + return 0; + } + if ((orb->cpa & HIGH_ORDER_BIT) != 0) { + return 0; + } + return 1; +} + +void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) +{ +#if 0 + int cssid, ssid, schid, m; + SubchDev *sch; + ORB orig_orb, orb; + uint64_t addr; + CPUS390XState *env = &cpu->env; + uint8_t ar; + + addr = decode_basedisp_s(env, ipb, &ar); + if (addr & 3) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return; + } + if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return; + } + copy_orb_from_guest(&orb, &orig_orb); + if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) || + !ioinst_orb_valid(&orb)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return; + } + trace_ioinst_sch_id("ssch", cssid, ssid, schid); + sch = css_find_subch(m, cssid, ssid, schid); + if (!sch || !css_subch_visible(sch)) { + setcc(cpu, 3); + return; + } + setcc(cpu, css_do_ssch(sch, &orb)); +#endif +} + +void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra) +{ +#if 0 + CRW crw; + uint64_t addr; + int cc; + CPUS390XState *env = &cpu->env; + uint8_t ar; + + addr = decode_basedisp_s(env, ipb, &ar); + if (addr & 3) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return; + } + + cc = css_do_stcrw(&crw); + /* 0 - crw stored, 1 - zeroes stored */ + + if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) { + setcc(cpu, cc); + } else { + if (cc == 0) { + /* Write failed: requeue CRW since STCRW is suppressing */ + css_undo_stcrw(&crw); + } + s390_cpu_virt_mem_handle_exc(cpu, ra); + } +#endif +} + +void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, + uintptr_t ra) +{ +#if 0 + int cssid, ssid, schid, m; + SubchDev *sch; + uint64_t addr; + int cc; + SCHIB schib; + CPUS390XState *env = &cpu->env; + uint8_t ar; + + addr = decode_basedisp_s(env, ipb, &ar); + if (addr & 3) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return; + } + + if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { + /* + * As operand exceptions have a lower priority than access exceptions, + * we check whether the memory area is writeable (injecting the + * access execption if it is not) first. + */ + if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { + s390_program_interrupt(env, PGM_OPERAND, ra); + } else { + s390_cpu_virt_mem_handle_exc(cpu, ra); + } + return; + } + trace_ioinst_sch_id("stsch", cssid, ssid, schid); + sch = css_find_subch(m, cssid, ssid, schid); + if (sch) { + if (css_subch_visible(sch)) { + css_do_stsch(sch, &schib); + cc = 0; + } else { + /* Indicate no more subchannels in this css/ss */ + cc = 3; + } + } else { + if (css_schid_final(m, cssid, ssid, schid)) { + cc = 3; /* No more subchannels in this css/ss */ + } else { + /* Store an empty schib. */ + memset(&schib, 0, sizeof(schib)); + cc = 0; + } + } + if (cc != 3) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib, + sizeof(schib)) != 0) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return; + } + } else { + /* Access exceptions have a higher priority than cc3 */ + if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return; + } + } + setcc(cpu, cc); +#endif +} + +int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) +{ +#if 0 + CPUS390XState *env = &cpu->env; + int cssid, ssid, schid, m; + SubchDev *sch; + IRB irb; + uint64_t addr; + int cc, irb_len; + uint8_t ar; + + if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return -EIO; + } + trace_ioinst_sch_id("tsch", cssid, ssid, schid); + addr = decode_basedisp_s(env, ipb, &ar); + if (addr & 3) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return -EIO; + } + + sch = css_find_subch(m, cssid, ssid, schid); + if (sch && css_subch_visible(sch)) { + cc = css_do_tsch_get_irb(sch, &irb, &irb_len); + } else { + cc = 3; + } + /* 0 - status pending, 1 - not status pending, 3 - not operational */ + if (cc != 3) { + if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return -EFAULT; + } + css_do_tsch_update_subch(sch); + } else { + irb_len = sizeof(irb) - sizeof(irb.emw); + /* Access exceptions have a higher priority than cc3 */ + if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return -EFAULT; + } + } + + setcc(cpu, cc); +#endif + return 0; +} + +typedef struct ChscReq { + uint16_t len; + uint16_t command; + uint32_t param0; + uint32_t param1; + uint32_t param2; +} QEMU_PACKED ChscReq; + +typedef struct ChscResp { + uint16_t len; + uint16_t code; + uint32_t param; + char data[]; +} QEMU_PACKED ChscResp; + +#define CHSC_MIN_RESP_LEN 0x0008 + +#define CHSC_SCPD 0x0002 +#define CHSC_SCSC 0x0010 +#define CHSC_SDA 0x0031 +#define CHSC_SEI 0x000e + +#define CHSC_SCPD_0_M 0x20000000 +#define CHSC_SCPD_0_C 0x10000000 +#define CHSC_SCPD_0_FMT 0x0f000000 +#define CHSC_SCPD_0_CSSID 0x00ff0000 +#define CHSC_SCPD_0_RFMT 0x00000f00 +#define CHSC_SCPD_0_RES 0xc000f000 +#define CHSC_SCPD_1_RES 0xffffff00 +#define CHSC_SCPD_01_CHPID 0x000000ff +static void ioinst_handle_chsc_scpd(ChscReq *req, ChscResp *res) +{ +#if 0 + uint16_t len = be16_to_cpu(req->len); + uint32_t param0 = be32_to_cpu(req->param0); + uint32_t param1 = be32_to_cpu(req->param1); + uint16_t resp_code; + int rfmt; + uint16_t cssid; + uint8_t f_chpid, l_chpid; + int desc_size; + int m; + + rfmt = (param0 & CHSC_SCPD_0_RFMT) >> 8; + if ((rfmt == 0) || (rfmt == 1)) { + rfmt = !!(param0 & CHSC_SCPD_0_C); + } + if ((len != 0x0010) || (param0 & CHSC_SCPD_0_RES) || + (param1 & CHSC_SCPD_1_RES) || req->param2) { + resp_code = 0x0003; + goto out_err; + } + if (param0 & CHSC_SCPD_0_FMT) { + resp_code = 0x0007; + goto out_err; + } + cssid = (param0 & CHSC_SCPD_0_CSSID) >> 16; + m = param0 & CHSC_SCPD_0_M; + if (cssid != 0) { + if (!m || !css_present(cssid)) { + resp_code = 0x0008; + goto out_err; + } + } + f_chpid = param0 & CHSC_SCPD_01_CHPID; + l_chpid = param1 & CHSC_SCPD_01_CHPID; + if (l_chpid < f_chpid) { + resp_code = 0x0003; + goto out_err; + } + /* css_collect_chp_desc() is endian-aware */ + desc_size = css_collect_chp_desc(m, cssid, f_chpid, l_chpid, rfmt, + &res->data); + res->code = cpu_to_be16(0x0001); + res->len = cpu_to_be16(8 + desc_size); + res->param = cpu_to_be32(rfmt); + return; + + out_err: + res->code = cpu_to_be16(resp_code); + res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); + res->param = cpu_to_be32(rfmt); +#endif +} + +#define CHSC_SCSC_0_M 0x20000000 +#define CHSC_SCSC_0_FMT 0x000f0000 +#define CHSC_SCSC_0_CSSID 0x0000ff00 +#define CHSC_SCSC_0_RES 0xdff000ff +static void ioinst_handle_chsc_scsc(ChscReq *req, ChscResp *res) +{ +#if 0 + uint16_t len = be16_to_cpu(req->len); + uint32_t param0 = be32_to_cpu(req->param0); + uint8_t cssid; + uint16_t resp_code; + uint32_t general_chars[510]; + uint32_t chsc_chars[508]; + + if (len != 0x0010) { + resp_code = 0x0003; + goto out_err; + } + + if (param0 & CHSC_SCSC_0_FMT) { + resp_code = 0x0007; + goto out_err; + } + cssid = (param0 & CHSC_SCSC_0_CSSID) >> 8; + if (cssid != 0) { + if (!(param0 & CHSC_SCSC_0_M) || !css_present(cssid)) { + resp_code = 0x0008; + goto out_err; + } + } + if ((param0 & CHSC_SCSC_0_RES) || req->param1 || req->param2) { + resp_code = 0x0003; + goto out_err; + } + res->code = cpu_to_be16(0x0001); + res->len = cpu_to_be16(4080); + res->param = 0; + + memset(general_chars, 0, sizeof(general_chars)); + memset(chsc_chars, 0, sizeof(chsc_chars)); + + general_chars[0] = cpu_to_be32(0x03000000); + general_chars[1] = cpu_to_be32(0x00079000); + general_chars[3] = cpu_to_be32(0x00080000); + + chsc_chars[0] = cpu_to_be32(0x40000000); + chsc_chars[3] = cpu_to_be32(0x00040000); + + memcpy(res->data, general_chars, sizeof(general_chars)); + memcpy(res->data + sizeof(general_chars), chsc_chars, sizeof(chsc_chars)); + return; + + out_err: + res->code = cpu_to_be16(resp_code); + res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); + res->param = 0; +#endif +} + +#define CHSC_SDA_0_FMT 0x0f000000 +#define CHSC_SDA_0_OC 0x0000ffff +#define CHSC_SDA_0_RES 0xf0ff0000 +#define CHSC_SDA_OC_MCSSE 0x0 +#define CHSC_SDA_OC_MSS 0x2 +static void ioinst_handle_chsc_sda(ChscReq *req, ChscResp *res) +{ +#if 0 + uint16_t resp_code = 0x0001; + uint16_t len = be16_to_cpu(req->len); + uint32_t param0 = be32_to_cpu(req->param0); + uint16_t oc; + int ret; + + if ((len != 0x0400) || (param0 & CHSC_SDA_0_RES)) { + resp_code = 0x0003; + goto out; + } + + if (param0 & CHSC_SDA_0_FMT) { + resp_code = 0x0007; + goto out; + } + + oc = param0 & CHSC_SDA_0_OC; + switch (oc) { + case CHSC_SDA_OC_MCSSE: + ret = css_enable_mcsse(); + if (ret == -EINVAL) { + resp_code = 0x0101; + goto out; + } + break; + case CHSC_SDA_OC_MSS: + ret = css_enable_mss(); + if (ret == -EINVAL) { + resp_code = 0x0101; + goto out; + } + break; + default: + resp_code = 0x0003; + goto out; + } + +out: + res->code = cpu_to_be16(resp_code); + res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); + res->param = 0; +#endif +} + +static int chsc_sei_nt0_get_event(void *res) +{ + /* no events yet */ + return 1; +} + +static int chsc_sei_nt0_have_event(void) +{ + /* no events yet */ + return 0; +} + +#if 0 +static int chsc_sei_nt2_get_event(void *res) +{ + if (s390_has_feat(uc, S390_FEAT_ZPCI)) { + // return pci_chsc_sei_nt2_get_event(res); + } + return 1; +} + +static int chsc_sei_nt2_have_event(void) +{ + if (s390_has_feat(uc, S390_FEAT_ZPCI)) { + // return pci_chsc_sei_nt2_have_event(); + } + return 0; +} +#endif + +#define CHSC_SEI_NT0 (1ULL << 63) +#define CHSC_SEI_NT2 (1ULL << 61) +static void ioinst_handle_chsc_sei(ChscReq *req, ChscResp *res) +{ +#if 0 + uint64_t selection_mask = ldq_p(&req->param1); + uint8_t *res_flags = (uint8_t *)res->data; + int have_event = 0; + int have_more = 0; + + /* regarding architecture nt0 can not be masked */ + have_event = !chsc_sei_nt0_get_event(res); + have_more = chsc_sei_nt0_have_event(); + + if (selection_mask & CHSC_SEI_NT2) { + if (!have_event) { + have_event = !chsc_sei_nt2_get_event(res); + } + + if (!have_more) { + have_more = chsc_sei_nt2_have_event(); + } + } + + if (have_event) { + res->code = cpu_to_be16(0x0001); + if (have_more) { + (*res_flags) |= 0x80; + } else { + (*res_flags) &= ~0x80; + css_clear_sei_pending(); + } + } else { + res->code = cpu_to_be16(0x0005); + res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); + } +#endif +} + +static void ioinst_handle_chsc_unimplemented(ChscResp *res) +{ + res->len = cpu_to_be16(CHSC_MIN_RESP_LEN); + res->code = cpu_to_be16(0x0004); + res->param = 0; +} + +void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra) +{ + ChscReq *req; + ChscResp *res; + uint64_t addr; + int reg; + uint16_t len; + uint16_t command; + CPUS390XState *env = &cpu->env; + uint8_t buf[TARGET_PAGE_SIZE]; + + reg = (ipb >> 20) & 0x00f; + addr = env->regs[reg]; + /* Page boundary? */ + if (addr & 0xfff) { + s390_program_interrupt(env, PGM_SPECIFICATION, ra); + return; + } + /* + * Reading sizeof(ChscReq) bytes is currently enough for all of our + * present CHSC sub-handlers ... if we ever need more, we should take + * care of req->len here first. + */ + if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + return; + } + req = (ChscReq *)buf; + len = be16_to_cpu(req->len); + /* Length field valid? */ + if ((len < 16) || (len > 4088) || (len & 7)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return; + } + memset((char *)req + len, 0, TARGET_PAGE_SIZE - len); + res = (void *)((char *)req + len); + command = be16_to_cpu(req->command); + switch (command) { + case CHSC_SCSC: + ioinst_handle_chsc_scsc(req, res); + break; + case CHSC_SCPD: + ioinst_handle_chsc_scpd(req, res); + break; + case CHSC_SDA: + ioinst_handle_chsc_sda(req, res); + break; + case CHSC_SEI: + ioinst_handle_chsc_sei(req, res); + break; + default: + ioinst_handle_chsc_unimplemented(res); + break; + } + + if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res, + be16_to_cpu(res->len))) { + setcc(cpu, 0); /* Command execution complete */ + } else { + s390_cpu_virt_mem_handle_exc(cpu, ra); + } +} + +#define SCHM_REG1_RES(_reg) (_reg & 0x000000000ffffffc) +#define SCHM_REG1_MBK(_reg) ((_reg & 0x00000000f0000000) >> 28) +#define SCHM_REG1_UPD(_reg) ((_reg & 0x0000000000000002) >> 1) +#define SCHM_REG1_DCT(_reg) (_reg & 0x0000000000000001) + +void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2, + uint32_t ipb, uintptr_t ra) +{ +#if 0 + uint8_t mbk; + int update; + int dct; + CPUS390XState *env = &cpu->env; + + if (SCHM_REG1_RES(reg1)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return; + } + + mbk = SCHM_REG1_MBK(reg1); + update = SCHM_REG1_UPD(reg1); + dct = SCHM_REG1_DCT(reg1); + + if (update && (reg2 & 0x000000000000001f)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return; + } + + css_do_schm(mbk, update, dct, update ? reg2 : 0); +#endif +} + +void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) +{ +#if 0 + int cssid, ssid, schid, m; + SubchDev *sch; + + if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { + s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); + return; + } + trace_ioinst_sch_id("rsch", cssid, ssid, schid); + sch = css_find_subch(m, cssid, ssid, schid); + if (!sch || !css_subch_visible(sch)) { + setcc(cpu, 3); + return; + } + setcc(cpu, css_do_rsch(sch)); +#endif +} + +#define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00) +#define RCHP_REG1_CSSID(_reg) ((_reg & 0x0000000000ff0000) >> 16) +#define RCHP_REG1_CHPID(_reg) (_reg & 0x00000000000000ff) +void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra) +{ +#if 0 + int cc; + uint8_t cssid; + uint8_t chpid; + int ret; + CPUS390XState *env = &cpu->env; + + if (RCHP_REG1_RES(reg1)) { + s390_program_interrupt(env, PGM_OPERAND, ra); + return; + } + + cssid = RCHP_REG1_CSSID(reg1); + chpid = RCHP_REG1_CHPID(reg1); + + ret = css_do_rchp(cssid, chpid); + + switch (ret) { + case -ENODEV: + cc = 3; + break; + case -EBUSY: + cc = 2; + break; + case 0: + cc = 0; + break; + default: + /* Invalid channel subsystem. */ + s390_program_interrupt(env, PGM_OPERAND, ra); + return; + } + setcc(cpu, cc); +#endif +} + +#define SAL_REG1_INVALID(_reg) (_reg & 0x0000000080000000) +void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra) +{ + /* We do not provide address limit checking, so let's suppress it. */ + if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) { + s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); + } +} diff --git a/qemu/target/s390x/mem_helper.c b/qemu/target/s390x/mem_helper.c new file mode 100644 index 00000000..3a313d51 --- /dev/null +++ b/qemu/target/s390x/mem_helper.c @@ -0,0 +1,2892 @@ +/* + * S/390 memory access helper routines + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2009 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "tcg_s390x.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "qemu/int128.h" +#include "qemu/atomic128.h" +#include "tcg/tcg.h" + +#include "hw/s390x/storage-keys.h" + +/*****************************************************************************/ +/* Softmmu support */ + +/* #define DEBUG_HELPER */ +#ifdef DEBUG_HELPER +#define HELPER_LOG(x...) qemu_log(x) +#else +#define HELPER_LOG(x...) +#endif + +static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key) +{ + uint16_t pkm = env->cregs[3] >> 16; + + if (env->psw.mask & PSW_MASK_PSTATE) { + /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */ + return pkm & (0x80 >> psw_key); + } + return true; +} + +static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest, + uint64_t src, uint32_t len) +{ + if (!len || src == dest) { + return false; + } + /* Take care of wrapping at the end of address space. */ + if (unlikely(wrap_address(env, src + len - 1) < src)) { + return dest > src || dest <= wrap_address(env, src + len - 1); + } + return dest > src && dest <= src + len - 1; +} + +/* Trigger a SPECIFICATION exception if an address or a length is not + naturally aligned. */ +static inline void check_alignment(CPUS390XState *env, uint64_t v, + int wordsize, uintptr_t ra) +{ + if (v % wordsize) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } +} + +/* Load a value from memory according to its size. */ +static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr, + int wordsize, uintptr_t ra) +{ + switch (wordsize) { + case 1: + return cpu_ldub_data_ra(env, addr, ra); + case 2: + return cpu_lduw_data_ra(env, addr, ra); + default: + abort(); + } +} + +/* Store a to memory according to its size. */ +static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr, + uint64_t value, int wordsize, + uintptr_t ra) +{ + switch (wordsize) { + case 1: + cpu_stb_data_ra(env, addr, value, ra); + break; + case 2: + cpu_stw_data_ra(env, addr, value, ra); + break; + default: + abort(); + } +} + +/* An access covers at most 4096 bytes and therefore at most two pages. */ +typedef struct S390Access { + target_ulong vaddr1; + target_ulong vaddr2; + char *haddr1; + char *haddr2; + uint16_t size1; + uint16_t size2; + /* + * If we can't access the host page directly, we'll have to do I/O access + * via ld/st helpers. These are internal details, so we store the + * mmu idx to do the access here instead of passing it around in the + * helpers. Maybe, one day we can get rid of ld/st access - once we can + * handle TLB_NOTDIRTY differently. We don't expect these special accesses + * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP + * pages, we might trigger a new MMU translation - very unlikely that + * the mapping changes in between and we would trigger a fault. + */ + int mmu_idx; +} S390Access; + +static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, + MMUAccessType access_type, int mmu_idx, + uintptr_t ra) +{ + S390Access access = { + .vaddr1 = vaddr, + .size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)), + .mmu_idx = mmu_idx, + }; + + g_assert(size > 0 && size <= 4096); + access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type, + mmu_idx, ra); + + if (unlikely(access.size1 != size)) { + /* The access crosses page boundaries. */ + access.vaddr2 = wrap_address(env, vaddr + access.size1); + access.size2 = size - access.size1; + access.haddr2 = probe_access(env, access.vaddr2, access.size2, + access_type, mmu_idx, ra); + } + return access; +} + +/* Helper to handle memset on a single page. */ +static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, + uint8_t byte, uint16_t size, int mmu_idx, + uintptr_t ra) +{ + TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + int i; + + if (likely(haddr)) { + memset(haddr, byte, size); + } else { + /* + * Do a single access and test if we can then get access to the + * page. This is especially relevant to speed up TLB_NOTDIRTY. + */ + g_assert(size > 0); + helper_ret_stb_mmu(env, vaddr, byte, oi, ra); + haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); + if (likely(haddr)) { + memset(haddr + 1, byte, size - 1); + } else { + for (i = 1; i < size; i++) { + helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra); + } + } + } +} + +static void access_memset(CPUS390XState *env, S390Access *desta, + uint8_t byte, uintptr_t ra) +{ + + do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1, + desta->mmu_idx, ra); + if (likely(!desta->size2)) { + return; + } + do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2, + desta->mmu_idx, ra); +} + +static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, + int offset, int mmu_idx, uintptr_t ra) +{ + TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + uint8_t byte; + + if (likely(*haddr)) { + return ldub_p(*haddr + offset); + } + /* + * Do a single access and test if we can then get access to the + * page. This is especially relevant to speed up TLB_NOTDIRTY. + */ + byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra); + *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx); + return byte; +} + +static uint8_t access_get_byte(CPUS390XState *env, S390Access *access, + int offset, uintptr_t ra) +{ + if (offset < access->size1) { + return do_access_get_byte(env, access->vaddr1, &access->haddr1, + offset, access->mmu_idx, ra); + } + return do_access_get_byte(env, access->vaddr2, &access->haddr2, + offset - access->size1, access->mmu_idx, ra); +} + +static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, + int offset, uint8_t byte, int mmu_idx, + uintptr_t ra) +{ + TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + + if (likely(*haddr)) { + stb_p(*haddr + offset, byte); + return; + } + /* + * Do a single access and test if we can then get access to the + * page. This is especially relevant to speed up TLB_NOTDIRTY. + */ + helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra); + *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); +} + +static void access_set_byte(CPUS390XState *env, S390Access *access, + int offset, uint8_t byte, uintptr_t ra) +{ + if (offset < access->size1) { + do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte, + access->mmu_idx, ra); + } else { + do_access_set_byte(env, access->vaddr2, &access->haddr2, + offset - access->size1, byte, access->mmu_idx, ra); + } +} + +/* + * Move data with the same semantics as memmove() in case ranges don't overlap + * or src > dest. Undefined behavior on destructive overlaps. + */ +static void access_memmove(CPUS390XState *env, S390Access *desta, + S390Access *srca, uintptr_t ra) +{ + int diff; + + g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2); + + /* Fallback to slow access in case we don't have access to all host pages */ + if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) || + !srca->haddr1 || (srca->size2 && !srca->haddr2))) { + int i; + + for (i = 0; i < desta->size1 + desta->size2; i++) { + uint8_t byte = access_get_byte(env, srca, i, ra); + + access_set_byte(env, desta, i, byte, ra); + } + return; + } + + if (srca->size1 == desta->size1) { + memmove(desta->haddr1, srca->haddr1, srca->size1); + if (unlikely(srca->size2)) { + memmove(desta->haddr2, srca->haddr2, srca->size2); + } + } else if (srca->size1 < desta->size1) { + diff = desta->size1 - srca->size1; + memmove(desta->haddr1, srca->haddr1, srca->size1); + memmove(desta->haddr1 + srca->size1, srca->haddr2, diff); + if (likely(desta->size2)) { + memmove(desta->haddr2, srca->haddr2 + diff, desta->size2); + } + } else { + diff = srca->size1 - desta->size1; + memmove(desta->haddr1, srca->haddr1, desta->size1); + memmove(desta->haddr2, srca->haddr1 + desta->size1, diff); + if (likely(srca->size2)) { + memmove(desta->haddr2 + diff, srca->haddr2, srca->size2); + } + } +} + +static int mmu_idx_from_as(uint8_t as) +{ + switch (as) { + case AS_PRIMARY: + return MMU_PRIMARY_IDX; + case AS_SECONDARY: + return MMU_SECONDARY_IDX; + case AS_HOME: + return MMU_HOME_IDX; + default: + /* FIXME AS_ACCREG */ + g_assert_not_reached(); + } +} + +/* and on array */ +static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest, + uint64_t src, uintptr_t ra) +{ + const int mmu_idx = cpu_mmu_index(env, false); + S390Access srca1, srca2, desta; + uint32_t i; + uint8_t c = 0; + + HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", + __func__, l, dest, src); + + /* NC always processes one more byte than specified - maximum is 256 */ + l++; + + srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); + srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); + for (i = 0; i < l; i++) { + const uint8_t x = access_get_byte(env, &srca1, i, ra) & + access_get_byte(env, &srca2, i, ra); + + c |= x; + access_set_byte(env, &desta, i, x, ra); + } + return c != 0; +} + +uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest, + uint64_t src) +{ + return do_helper_nc(env, l, dest, src, GETPC()); +} + +/* xor on array */ +static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest, + uint64_t src, uintptr_t ra) +{ + const int mmu_idx = cpu_mmu_index(env, false); + S390Access srca1, srca2, desta; + uint32_t i; + uint8_t c = 0; + + HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", + __func__, l, dest, src); + + /* XC always processes one more byte than specified - maximum is 256 */ + l++; + + srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); + srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); + + /* xor with itself is the same as memset(0) */ + if (src == dest) { + access_memset(env, &desta, 0, ra); + return 0; + } + + for (i = 0; i < l; i++) { + const uint8_t x = access_get_byte(env, &srca1, i, ra) ^ + access_get_byte(env, &srca2, i, ra); + + c |= x; + access_set_byte(env, &desta, i, x, ra); + } + return c != 0; +} + +uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest, + uint64_t src) +{ + return do_helper_xc(env, l, dest, src, GETPC()); +} + +/* or on array */ +static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest, + uint64_t src, uintptr_t ra) +{ + const int mmu_idx = cpu_mmu_index(env, false); + S390Access srca1, srca2, desta; + uint32_t i; + uint8_t c = 0; + + HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", + __func__, l, dest, src); + + /* OC always processes one more byte than specified - maximum is 256 */ + l++; + + srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); + srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); + for (i = 0; i < l; i++) { + const uint8_t x = access_get_byte(env, &srca1, i, ra) | + access_get_byte(env, &srca2, i, ra); + + c |= x; + access_set_byte(env, &desta, i, x, ra); + } + return c != 0; +} + +uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest, + uint64_t src) +{ + return do_helper_oc(env, l, dest, src, GETPC()); +} + +/* memmove */ +static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest, + uint64_t src, uintptr_t ra) +{ + const int mmu_idx = cpu_mmu_index(env, false); + S390Access srca, desta; + uint32_t i; + + HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", + __func__, l, dest, src); + + /* MVC always copies one more byte than specified - maximum is 256 */ + l++; + + srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); + + /* + * "When the operands overlap, the result is obtained as if the operands + * were processed one byte at a time". Only non-destructive overlaps + * behave like memmove(). + */ + if (dest == src + 1) { + access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra); + } else if (!is_destructive_overlap(env, dest, src, l)) { + access_memmove(env, &desta, &srca, ra); + } else { + for (i = 0; i < l; i++) { + uint8_t byte = access_get_byte(env, &srca, i, ra); + + access_set_byte(env, &desta, i, byte, ra); + } + } + + return env->cc_op; +} + +void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +{ + do_helper_mvc(env, l, dest, src, GETPC()); +} + +/* move inverse */ +void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +{ + const int mmu_idx = cpu_mmu_index(env, false); + S390Access srca, desta; + uintptr_t ra = GETPC(); + int i; + + /* MVCIN always copies one more byte than specified - maximum is 256 */ + l++; + + src = wrap_address(env, src - l + 1); + srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); + for (i = 0; i < l; i++) { + const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra); + + access_set_byte(env, &desta, i, x, ra); + } +} + +/* move numerics */ +void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +{ + const int mmu_idx = cpu_mmu_index(env, false); + S390Access srca1, srca2, desta; + uintptr_t ra = GETPC(); + int i; + + /* MVN always copies one more byte than specified - maximum is 256 */ + l++; + + srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); + srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); + for (i = 0; i < l; i++) { + const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) | + (access_get_byte(env, &srca2, i, ra) & 0xf0); + + access_set_byte(env, &desta, i, x, ra); + } +} + +/* move with offset */ +void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +{ + const int mmu_idx = cpu_mmu_index(env, false); + /* MVO always processes one more byte than specified - maximum is 16 */ + const int len_dest = (l >> 4) + 1; + const int len_src = (l & 0xf) + 1; + uintptr_t ra = GETPC(); + uint8_t byte_dest, byte_src; + S390Access srca, desta; + int i, j; + + srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra); + + /* Handle rightmost byte */ + byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra); + byte_src = access_get_byte(env, &srca, len_src - 1, ra); + byte_dest = (byte_dest & 0x0f) | (byte_src << 4); + access_set_byte(env, &desta, len_dest - 1, byte_dest, ra); + + /* Process remaining bytes from right to left */ + for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) { + byte_dest = byte_src >> 4; + if (j >= 0) { + byte_src = access_get_byte(env, &srca, j, ra); + } else { + byte_src = 0; + } + byte_dest |= byte_src << 4; + access_set_byte(env, &desta, i, byte_dest, ra); + } +} + +/* move zones */ +void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +{ + const int mmu_idx = cpu_mmu_index(env, false); + S390Access srca1, srca2, desta; + uintptr_t ra = GETPC(); + int i; + + /* MVZ always copies one more byte than specified - maximum is 256 */ + l++; + + srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); + srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); + for (i = 0; i < l; i++) { + const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) | + (access_get_byte(env, &srca2, i, ra) & 0x0f); + + access_set_byte(env, &desta, i, x, ra); + } +} + +/* compare unsigned byte arrays */ +static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1, + uint64_t s2, uintptr_t ra) +{ + uint32_t i; + uint32_t cc = 0; + + HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n", + __func__, l, s1, s2); + + for (i = 0; i <= l; i++) { + uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra); + uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra); + HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y); + if (x < y) { + cc = 1; + break; + } else if (x > y) { + cc = 2; + break; + } + } + + HELPER_LOG("\n"); + return cc; +} + +uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2) +{ + return do_helper_clc(env, l, s1, s2, GETPC()); +} + +/* compare logical under mask */ +uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask, + uint64_t addr) +{ + uintptr_t ra = GETPC(); + uint32_t cc = 0; + + HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1, + mask, addr); + + while (mask) { + if (mask & 8) { + uint8_t d = cpu_ldub_data_ra(env, addr, ra); + uint8_t r = extract32(r1, 24, 8); + HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d, + addr); + if (r < d) { + cc = 1; + break; + } else if (r > d) { + cc = 2; + break; + } + addr++; + } + mask = (mask << 1) & 0xf; + r1 <<= 8; + } + + HELPER_LOG("\n"); + return cc; +} + +static inline uint64_t get_address(CPUS390XState *env, int reg) +{ + return wrap_address(env, env->regs[reg]); +} + +/* + * Store the address to the given register, zeroing out unused leftmost + * bits in bit positions 32-63 (24-bit and 31-bit mode only). + */ +static inline void set_address_zero(CPUS390XState *env, int reg, + uint64_t address) +{ + if (env->psw.mask & PSW_MASK_64) { + env->regs[reg] = address; + } else { + if (!(env->psw.mask & PSW_MASK_32)) { + address &= 0x00ffffff; + } else { + address &= 0x7fffffff; + } + env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); + } +} + +static inline void set_address(CPUS390XState *env, int reg, uint64_t address) +{ + if (env->psw.mask & PSW_MASK_64) { + /* 64-Bit mode */ + env->regs[reg] = address; + } else { + if (!(env->psw.mask & PSW_MASK_32)) { + /* 24-Bit mode. According to the PoO it is implementation + dependent if bits 32-39 remain unchanged or are set to + zeros. Choose the former so that the function can also be + used for TRT. */ + env->regs[reg] = deposit64(env->regs[reg], 0, 24, address); + } else { + /* 31-Bit mode. According to the PoO it is implementation + dependent if bit 32 remains unchanged or is set to zero. + Choose the latter so that the function can also be used for + TRT. */ + address &= 0x7fffffff; + env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); + } + } +} + +static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length) +{ + if (!(env->psw.mask & PSW_MASK_64)) { + return (uint32_t)length; + } + return length; +} + +static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length) +{ + if (!(env->psw.mask & PSW_MASK_64)) { + /* 24-Bit and 31-Bit mode */ + length &= 0x7fffffff; + } + return length; +} + +static inline uint64_t get_length(CPUS390XState *env, int reg) +{ + return wrap_length31(env, env->regs[reg]); +} + +static inline void set_length(CPUS390XState *env, int reg, uint64_t length) +{ + if (env->psw.mask & PSW_MASK_64) { + /* 64-Bit mode */ + env->regs[reg] = length; + } else { + /* 24-Bit and 31-Bit mode */ + env->regs[reg] = deposit64(env->regs[reg], 0, 32, length); + } +} + +/* search string (c is byte to search, r2 is string, r1 end of string) */ +void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2) +{ + uintptr_t ra = GETPC(); + uint64_t end, str; + uint32_t len; + uint8_t v, c = env->regs[0]; + + /* Bits 32-55 must contain all 0. */ + if (env->regs[0] & 0xffffff00u) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + str = get_address(env, r2); + end = get_address(env, r1); + + /* Lest we fail to service interrupts in a timely manner, limit the + amount of work we're willing to do. For now, let's cap at 8k. */ + for (len = 0; len < 0x2000; ++len) { + if (str + len == end) { + /* Character not found. R1 & R2 are unmodified. */ + env->cc_op = 2; + return; + } + v = cpu_ldub_data_ra(env, str + len, ra); + if (v == c) { + /* Character found. Set R1 to the location; R2 is unmodified. */ + env->cc_op = 1; + set_address(env, r1, str + len); + return; + } + } + + /* CPU-determined bytes processed. Advance R2 to next byte to process. */ + env->cc_op = 3; + set_address(env, r2, str + len); +} + +void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2) +{ + uintptr_t ra = GETPC(); + uint32_t len; + uint16_t v, c = env->regs[0]; + uint64_t end, str, adj_end; + + /* Bits 32-47 of R0 must be zero. */ + if (env->regs[0] & 0xffff0000u) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + str = get_address(env, r2); + end = get_address(env, r1); + + /* If the LSB of the two addresses differ, use one extra byte. */ + adj_end = end + ((str ^ end) & 1); + + /* Lest we fail to service interrupts in a timely manner, limit the + amount of work we're willing to do. For now, let's cap at 8k. */ + for (len = 0; len < 0x2000; len += 2) { + if (str + len == adj_end) { + /* End of input found. */ + env->cc_op = 2; + return; + } + v = cpu_lduw_data_ra(env, str + len, ra); + if (v == c) { + /* Character found. Set R1 to the location; R2 is unmodified. */ + env->cc_op = 1; + set_address(env, r1, str + len); + return; + } + } + + /* CPU-determined bytes processed. Advance R2 to next byte to process. */ + env->cc_op = 3; + set_address(env, r2, str + len); +} + +/* unsigned string compare (c is string terminator) */ +uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2) +{ + uintptr_t ra = GETPC(); + uint32_t len; + + c = c & 0xff; + s1 = wrap_address(env, s1); + s2 = wrap_address(env, s2); + + /* Lest we fail to service interrupts in a timely manner, limit the + amount of work we're willing to do. For now, let's cap at 8k. */ + for (len = 0; len < 0x2000; ++len) { + uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra); + uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra); + if (v1 == v2) { + if (v1 == c) { + /* Equal. CC=0, and don't advance the registers. */ + env->cc_op = 0; + env->retxl = s2; + return s1; + } + } else { + /* Unequal. CC={1,2}, and advance the registers. Note that + the terminator need not be zero, but the string that contains + the terminator is by definition "low". */ + env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2); + env->retxl = s2 + len; + return s1 + len; + } + } + + /* CPU-determined bytes equal; advance the registers. */ + env->cc_op = 3; + env->retxl = s2 + len; + return s1 + len; +} + +/* move page */ +uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2) +{ + const int mmu_idx = cpu_mmu_index(env, false); + const bool f = extract64(r0, 11, 1); + const bool s = extract64(r0, 10, 1); + uintptr_t ra = GETPC(); + S390Access srca, desta; + + if ((f && s) || extract64(r0, 12, 4)) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); + } + + r1 = wrap_address(env, r1 & TARGET_PAGE_MASK); + r2 = wrap_address(env, r2 & TARGET_PAGE_MASK); + + /* + * TODO: + * - Access key handling + * - CC-option with surpression of page-translation exceptions + * - Store r1/r2 register identifiers at real location 162 + */ + srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx, + ra); + desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx, + ra); + access_memmove(env, &desta, &srca, ra); + return 0; /* data moved */ +} + +/* string copy */ +uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2) +{ + const int mmu_idx = cpu_mmu_index(env, false); + const uint64_t d = get_address(env, r1); + const uint64_t s = get_address(env, r2); + const uint8_t c = env->regs[0]; + const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK)); + S390Access srca, desta; + uintptr_t ra = GETPC(); + int i; + + if (env->regs[0] & 0xffffff00ull) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + /* + * Our access should not exceed single pages, as we must not report access + * exceptions exceeding the actually copied range (which we don't know at + * this point). We might over-indicate watchpoints within the pages + * (if we ever care, we have to limit processing to a single byte). + */ + srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra); + for (i = 0; i < len; i++) { + const uint8_t v = access_get_byte(env, &srca, i, ra); + + access_set_byte(env, &desta, i, v, ra); + if (v == c) { + set_address_zero(env, r1, d + i); + return 1; + } + } + set_address_zero(env, r1, d + len); + set_address_zero(env, r2, s + len); + return 3; +} + +/* load access registers r1 to r3 from memory at a2 */ +void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +{ + uintptr_t ra = GETPC(); + int i; + + if (a2 & 0x3) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + for (i = r1;; i = (i + 1) % 16) { + env->aregs[i] = cpu_ldl_data_ra(env, a2, ra); + a2 += 4; + + if (i == r3) { + break; + } + } +} + +/* store access registers r1 to r3 in memory at a2 */ +void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +{ + uintptr_t ra = GETPC(); + int i; + + if (a2 & 0x3) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + for (i = r1;; i = (i + 1) % 16) { + cpu_stl_data_ra(env, a2, env->aregs[i], ra); + a2 += 4; + + if (i == r3) { + break; + } + } +} + +/* move long helper */ +static inline uint32_t do_mvcl(CPUS390XState *env, + uint64_t *dest, uint64_t *destlen, + uint64_t *src, uint64_t *srclen, + uint16_t pad, int wordsize, uintptr_t ra) +{ + const int mmu_idx = cpu_mmu_index(env, false); + int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK)); + S390Access srca, desta; + int i, cc; + + if (*destlen == *srclen) { + cc = 0; + } else if (*destlen < *srclen) { + cc = 1; + } else { + cc = 2; + } + + if (!*destlen) { + return cc; + } + + /* + * Only perform one type of type of operation (move/pad) at a time. + * Stay within single pages. + */ + if (*srclen) { + /* Copy the src array */ + len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len); + *destlen -= len; + *srclen -= len; + srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra); + desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); + access_memmove(env, &desta, &srca, ra); + *src = wrap_address(env, *src + len); + *dest = wrap_address(env, *dest + len); + } else if (wordsize == 1) { + /* Pad the remaining area */ + *destlen -= len; + desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); + access_memset(env, &desta, pad, ra); + *dest = wrap_address(env, *dest + len); + } else { + desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); + + /* The remaining length selects the padding byte. */ + for (i = 0; i < len; (*destlen)--, i++) { + if (*destlen & 1) { + access_set_byte(env, &desta, i, pad, ra); + } else { + access_set_byte(env, &desta, i, pad >> 8, ra); + } + } + *dest = wrap_address(env, *dest + len); + } + + return *destlen ? 3 : cc; +} + +/* move long */ +uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) +{ + const int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + uint64_t destlen = env->regs[r1 + 1] & 0xffffff; + uint64_t dest = get_address(env, r1); + uint64_t srclen = env->regs[r2 + 1] & 0xffffff; + uint64_t src = get_address(env, r2); + uint8_t pad = env->regs[r2 + 1] >> 24; + CPUState *cs = env_cpu(env); + S390Access srca, desta; + uint32_t cc, cur_len; + + if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) { + cc = 3; + } else if (srclen == destlen) { + cc = 0; + } else if (destlen < srclen) { + cc = 1; + } else { + cc = 2; + } + + /* We might have to zero-out some bits even if there was no action. */ + if (unlikely(!destlen || cc == 3)) { + set_address_zero(env, r2, src); + set_address_zero(env, r1, dest); + return cc; + } else if (!srclen) { + set_address_zero(env, r2, src); + } + + /* + * Only perform one type of type of operation (move/pad) in one step. + * Stay within single pages. + */ + while (destlen) { + cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK)); + if (!srclen) { + desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx, + ra); + access_memset(env, &desta, pad, ra); + } else { + cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len); + + srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx, + ra); + desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx, + ra); + access_memmove(env, &desta, &srca, ra); + src = wrap_address(env, src + cur_len); + srclen -= cur_len; + env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen); + set_address_zero(env, r2, src); + } + dest = wrap_address(env, dest + cur_len); + destlen -= cur_len; + env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen); + set_address_zero(env, r1, dest); + + /* + * MVCL is interruptible. Return to the main loop if requested after + * writing back all state to registers. If no interrupt will get + * injected, we'll end up back in this handler and continue processing + * the remaining parts. + */ + if (destlen && unlikely(cpu_loop_exit_requested(cs))) { + cpu_loop_exit_restore(cs, ra); + } + } + return cc; +} + +/* move long extended */ +uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, + uint32_t r3) +{ + uintptr_t ra = GETPC(); + uint64_t destlen = get_length(env, r1 + 1); + uint64_t dest = get_address(env, r1); + uint64_t srclen = get_length(env, r3 + 1); + uint64_t src = get_address(env, r3); + uint8_t pad = a2; + uint32_t cc; + + cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra); + + set_length(env, r1 + 1, destlen); + set_length(env, r3 + 1, srclen); + set_address(env, r1, dest); + set_address(env, r3, src); + + return cc; +} + +/* move long unicode */ +uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, + uint32_t r3) +{ + uintptr_t ra = GETPC(); + uint64_t destlen = get_length(env, r1 + 1); + uint64_t dest = get_address(env, r1); + uint64_t srclen = get_length(env, r3 + 1); + uint64_t src = get_address(env, r3); + uint16_t pad = a2; + uint32_t cc; + + cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra); + + set_length(env, r1 + 1, destlen); + set_length(env, r3 + 1, srclen); + set_address(env, r1, dest); + set_address(env, r3, src); + + return cc; +} + +/* compare logical long helper */ +static inline uint32_t do_clcl(CPUS390XState *env, + uint64_t *src1, uint64_t *src1len, + uint64_t *src3, uint64_t *src3len, + uint16_t pad, uint64_t limit, + int wordsize, uintptr_t ra) +{ + uint64_t len = MAX(*src1len, *src3len); + uint32_t cc = 0; + + check_alignment(env, *src1len | *src3len, wordsize, ra); + + if (!len) { + return cc; + } + + /* Lest we fail to service interrupts in a timely manner, limit the + amount of work we're willing to do. */ + if (len > limit) { + len = limit; + cc = 3; + } + + for (; len; len -= wordsize) { + uint16_t v1 = pad; + uint16_t v3 = pad; + + if (*src1len) { + v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra); + } + if (*src3len) { + v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra); + } + + if (v1 != v3) { + cc = (v1 < v3) ? 1 : 2; + break; + } + + if (*src1len) { + *src1 += wordsize; + *src1len -= wordsize; + } + if (*src3len) { + *src3 += wordsize; + *src3len -= wordsize; + } + } + + return cc; +} + + +/* compare logical long */ +uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) +{ + uintptr_t ra = GETPC(); + uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24); + uint64_t src1 = get_address(env, r1); + uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24); + uint64_t src3 = get_address(env, r2); + uint8_t pad = env->regs[r2 + 1] >> 24; + uint32_t cc; + + cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra); + + env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len); + env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len); + set_address(env, r1, src1); + set_address(env, r2, src3); + + return cc; +} + +/* compare logical long extended memcompare insn with padding */ +uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, + uint32_t r3) +{ + uintptr_t ra = GETPC(); + uint64_t src1len = get_length(env, r1 + 1); + uint64_t src1 = get_address(env, r1); + uint64_t src3len = get_length(env, r3 + 1); + uint64_t src3 = get_address(env, r3); + uint8_t pad = a2; + uint32_t cc; + + cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra); + + set_length(env, r1 + 1, src1len); + set_length(env, r3 + 1, src3len); + set_address(env, r1, src1); + set_address(env, r3, src3); + + return cc; +} + +/* compare logical long unicode memcompare insn with padding */ +uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, + uint32_t r3) +{ + uintptr_t ra = GETPC(); + uint64_t src1len = get_length(env, r1 + 1); + uint64_t src1 = get_address(env, r1); + uint64_t src3len = get_length(env, r3 + 1); + uint64_t src3 = get_address(env, r3); + uint16_t pad = a2; + uint32_t cc = 0; + + cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra); + + set_length(env, r1 + 1, src1len); + set_length(env, r3 + 1, src3len); + set_address(env, r1, src1); + set_address(env, r3, src3); + + return cc; +} + +/* checksum */ +uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1, + uint64_t src, uint64_t src_len) +{ + uintptr_t ra = GETPC(); + uint64_t max_len, len; + uint64_t cksm = (uint32_t)r1; + + /* Lest we fail to service interrupts in a timely manner, limit the + amount of work we're willing to do. For now, let's cap at 8k. */ + max_len = (src_len > 0x2000 ? 0x2000 : src_len); + + /* Process full words as available. */ + for (len = 0; len + 4 <= max_len; len += 4, src += 4) { + cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra); + } + + switch (max_len - len) { + case 1: + cksm += cpu_ldub_data_ra(env, src, ra) << 24; + len += 1; + break; + case 2: + cksm += cpu_lduw_data_ra(env, src, ra) << 16; + len += 2; + break; + case 3: + cksm += cpu_lduw_data_ra(env, src, ra) << 16; + cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8; + len += 3; + break; + } + + /* Fold the carry from the checksum. Note that we can see carry-out + during folding more than once (but probably not more than twice). */ + while (cksm > 0xffffffffull) { + cksm = (uint32_t)cksm + (cksm >> 32); + } + + /* Indicate whether or not we've processed everything. */ + env->cc_op = (len == src_len ? 0 : 3); + + /* Return both cksm and processed length. */ + env->retxl = cksm; + return len; +} + +void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src) +{ + uintptr_t ra = GETPC(); + int len_dest = len >> 4; + int len_src = len & 0xf; + uint8_t b; + + dest += len_dest; + src += len_src; + + /* last byte is special, it only flips the nibbles */ + b = cpu_ldub_data_ra(env, src, ra); + cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); + src--; + len_src--; + + /* now pack every value */ + while (len_dest > 0) { + b = 0; + + if (len_src >= 0) { + b = cpu_ldub_data_ra(env, src, ra) & 0x0f; + src--; + len_src--; + } + if (len_src >= 0) { + b |= cpu_ldub_data_ra(env, src, ra) << 4; + src--; + len_src--; + } + + len_dest--; + dest--; + cpu_stb_data_ra(env, dest, b, ra); + } +} + +static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src, + uint32_t srclen, int ssize, uintptr_t ra) +{ + int i; + /* The destination operand is always 16 bytes long. */ + const int destlen = 16; + + /* The operands are processed from right to left. */ + src += srclen - 1; + dest += destlen - 1; + + for (i = 0; i < destlen; i++) { + uint8_t b = 0; + + /* Start with a positive sign */ + if (i == 0) { + b = 0xc; + } else if (srclen > ssize) { + b = cpu_ldub_data_ra(env, src, ra) & 0x0f; + src -= ssize; + srclen -= ssize; + } + + if (srclen > ssize) { + b |= cpu_ldub_data_ra(env, src, ra) << 4; + src -= ssize; + srclen -= ssize; + } + + cpu_stb_data_ra(env, dest, b, ra); + dest--; + } +} + + +void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src, + uint32_t srclen) +{ + do_pkau(env, dest, src, srclen, 1, GETPC()); +} + +void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src, + uint32_t srclen) +{ + do_pkau(env, dest, src, srclen, 2, GETPC()); +} + +void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest, + uint64_t src) +{ + uintptr_t ra = GETPC(); + int len_dest = len >> 4; + int len_src = len & 0xf; + uint8_t b; + int second_nibble = 0; + + dest += len_dest; + src += len_src; + + /* last byte is special, it only flips the nibbles */ + b = cpu_ldub_data_ra(env, src, ra); + cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); + src--; + len_src--; + + /* now pad every nibble with 0xf0 */ + + while (len_dest > 0) { + uint8_t cur_byte = 0; + + if (len_src > 0) { + cur_byte = cpu_ldub_data_ra(env, src, ra); + } + + len_dest--; + dest--; + + /* only advance one nibble at a time */ + if (second_nibble) { + cur_byte >>= 4; + len_src--; + src--; + } + second_nibble = !second_nibble; + + /* digit */ + cur_byte = (cur_byte & 0xf); + /* zone bits */ + cur_byte |= 0xf0; + + cpu_stb_data_ra(env, dest, cur_byte, ra); + } +} + +static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest, + uint32_t destlen, int dsize, uint64_t src, + uintptr_t ra) +{ + int i; + uint32_t cc; + uint8_t b; + /* The source operand is always 16 bytes long. */ + const int srclen = 16; + + /* The operands are processed from right to left. */ + src += srclen - 1; + dest += destlen - dsize; + + /* Check for the sign. */ + b = cpu_ldub_data_ra(env, src, ra); + src--; + switch (b & 0xf) { + case 0xa: + case 0xc: + case 0xe ... 0xf: + cc = 0; /* plus */ + break; + case 0xb: + case 0xd: + cc = 1; /* minus */ + break; + default: + case 0x0 ... 0x9: + cc = 3; /* invalid */ + break; + } + + /* Now pad every nibble with 0x30, advancing one nibble at a time. */ + for (i = 0; i < destlen; i += dsize) { + if (i == (31 * dsize)) { + /* If length is 32/64 bytes, the leftmost byte is 0. */ + b = 0; + } else if (i % (2 * dsize)) { + b = cpu_ldub_data_ra(env, src, ra); + src--; + } else { + b >>= 4; + } + cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra); + dest -= dsize; + } + + return cc; +} + +uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen, + uint64_t src) +{ + return do_unpkau(env, dest, destlen, 1, src, GETPC()); +} + +uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen, + uint64_t src) +{ + return do_unpkau(env, dest, destlen, 2, src, GETPC()); +} + +uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen) +{ + uintptr_t ra = GETPC(); + uint32_t cc = 0; + int i; + + for (i = 0; i < destlen; i++) { + uint8_t b = cpu_ldub_data_ra(env, dest + i, ra); + /* digit */ + cc |= (b & 0xf0) > 0x90 ? 2 : 0; + + if (i == (destlen - 1)) { + /* sign */ + cc |= (b & 0xf) < 0xa ? 1 : 0; + } else { + /* digit */ + cc |= (b & 0xf) > 0x9 ? 2 : 0; + } + } + + return cc; +} + +static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array, + uint64_t trans, uintptr_t ra) +{ + uint32_t i; + + for (i = 0; i <= len; i++) { + uint8_t byte = cpu_ldub_data_ra(env, array + i, ra); + uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra); + cpu_stb_data_ra(env, array + i, new_byte, ra); + } + + return env->cc_op; +} + +void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array, + uint64_t trans) +{ + do_helper_tr(env, len, array, trans, GETPC()); +} + +uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array, + uint64_t len, uint64_t trans) +{ + uintptr_t ra = GETPC(); + uint8_t end = env->regs[0] & 0xff; + uint64_t l = len; + uint64_t i; + uint32_t cc = 0; + + if (!(env->psw.mask & PSW_MASK_64)) { + array &= 0x7fffffff; + l = (uint32_t)l; + } + + /* Lest we fail to service interrupts in a timely manner, limit the + amount of work we're willing to do. For now, let's cap at 8k. */ + if (l > 0x2000) { + l = 0x2000; + cc = 3; + } + + for (i = 0; i < l; i++) { + uint8_t byte, new_byte; + + byte = cpu_ldub_data_ra(env, array + i, ra); + + if (byte == end) { + cc = 1; + break; + } + + new_byte = cpu_ldub_data_ra(env, trans + byte, ra); + cpu_stb_data_ra(env, array + i, new_byte, ra); + } + + env->cc_op = cc; + env->retxl = len - i; + return array + i; +} + +static inline uint32_t do_helper_trt(CPUS390XState *env, int len, + uint64_t array, uint64_t trans, + int inc, uintptr_t ra) +{ + int i; + + for (i = 0; i <= len; i++) { + uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra); + uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra); + + if (sbyte != 0) { + set_address(env, 1, array + i * inc); + env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte); + return (i == len) ? 2 : 1; + } + } + + return 0; +} + +static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len, + uint64_t array, uint64_t trans, + uintptr_t ra) +{ + return do_helper_trt(env, len, array, trans, 1, ra); +} + +uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array, + uint64_t trans) +{ + return do_helper_trt(env, len, array, trans, 1, GETPC()); +} + +static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len, + uint64_t array, uint64_t trans, + uintptr_t ra) +{ + return do_helper_trt(env, len, array, trans, -1, ra); +} + +uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array, + uint64_t trans) +{ + return do_helper_trt(env, len, array, trans, -1, GETPC()); +} + +/* Translate one/two to one/two */ +uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2, + uint32_t tst, uint32_t sizes) +{ + uintptr_t ra = GETPC(); + int dsize = (sizes & 1) ? 1 : 2; + int ssize = (sizes & 2) ? 1 : 2; + uint64_t tbl = get_address(env, 1); + uint64_t dst = get_address(env, r1); + uint64_t len = get_length(env, r1 + 1); + uint64_t src = get_address(env, r2); + uint32_t cc = 3; + int i; + + /* The lower address bits of TBL are ignored. For TROO, TROT, it's + the low 3 bits (double-word aligned). For TRTO, TRTT, it's either + the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */ + if (ssize == 2 && !s390_has_feat(env->uc, S390_FEAT_ETF2_ENH)) { + tbl &= -4096; + } else { + tbl &= -8; + } + + check_alignment(env, len, ssize, ra); + + /* Lest we fail to service interrupts in a timely manner, */ + /* limit the amount of work we're willing to do. */ + for (i = 0; i < 0x2000; i++) { + uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra); + uint64_t tble = tbl + (sval * dsize); + uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra); + if (dval == tst) { + cc = 1; + break; + } + cpu_stsize_data_ra(env, dst, dval, dsize, ra); + + len -= ssize; + src += ssize; + dst += dsize; + + if (len == 0) { + cc = 0; + break; + } + } + + set_address(env, r1, dst); + set_length(env, r1 + 1, len); + set_address(env, r2, src); + + return cc; +} + +void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, + uint32_t r1, uint32_t r3) +{ + uintptr_t ra = GETPC(); + Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); + Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); + Int128 oldv; + uint64_t oldh, oldl; + bool fail; + + check_alignment(env, addr, 16, ra); + + oldh = cpu_ldq_data_ra(env, addr + 0, ra); + oldl = cpu_ldq_data_ra(env, addr + 8, ra); + + oldv = int128_make128(oldl, oldh); + fail = !int128_eq(oldv, cmpv); + if (fail) { + newv = oldv; + } + + cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); + cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); + + env->cc_op = fail; + env->regs[r1] = int128_gethi(oldv); + env->regs[r1 + 1] = int128_getlo(oldv); +} + +void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, + uint32_t r1, uint32_t r3) +{ + uintptr_t ra = GETPC(); + Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); + Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); + int mem_idx; + TCGMemOpIdx oi; + Int128 oldv; + bool fail; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); + fail = !int128_eq(oldv, cmpv); + + env->cc_op = fail; + env->regs[r1] = int128_gethi(oldv); + env->regs[r1 + 1] = int128_getlo(oldv); +} + +static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, + uint64_t a2, bool parallel) +{ + uint32_t mem_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + uint32_t fc = extract32(env->regs[0], 0, 8); + uint32_t sc = extract32(env->regs[0], 8, 8); + uint64_t pl = get_address(env, 1) & -16; + uint64_t svh, svl; + uint32_t cc; + + /* Sanity check the function code and storage characteristic. */ + if (fc > 1 || sc > 3) { + if (!s390_has_feat(env->uc, S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) { + goto spec_exception; + } + if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) { + goto spec_exception; + } + } + + /* Sanity check the alignments. */ + if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) { + goto spec_exception; + } + + /* Sanity check writability of the store address. */ + probe_write(env, a2, 1 << sc, mem_idx, ra); + + /* + * Note that the compare-and-swap is atomic, and the store is atomic, + * but the complete operation is not. Therefore we do not need to + * assert serial context in order to implement this. That said, + * restart early if we can't support either operation that is supposed + * to be atomic. + */ + if (parallel) { + uint32_t max = 2; +#ifdef CONFIG_ATOMIC64 + max = 3; +#endif + if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || + (HAVE_ATOMIC128 ? 0 : sc > max)) { + cpu_loop_exit_atomic(env_cpu(env), ra); + } + } + + /* All loads happen before all stores. For simplicity, load the entire + store value area from the parameter list. */ + svh = cpu_ldq_data_ra(env, pl + 16, ra); + svl = cpu_ldq_data_ra(env, pl + 24, ra); + + switch (fc) { + case 0: + { + uint32_t nv = cpu_ldl_data_ra(env, pl, ra); + uint32_t cv = env->regs[r3]; + uint32_t ov; + + if (parallel) { + TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); + ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); + } else { + ov = cpu_ldl_data_ra(env, a1, ra); + cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra); + } + cc = (ov != cv); + env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov); + } + break; + + case 1: + { + uint64_t nv = cpu_ldq_data_ra(env, pl, ra); + uint64_t cv = env->regs[r3]; + uint64_t ov; + + if (parallel) { +#ifdef CONFIG_ATOMIC64 + TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); + ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); +#else + /* Note that we asserted !parallel above. */ + g_assert_not_reached(); +#endif + } else { + ov = cpu_ldq_data_ra(env, a1, ra); + cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra); + } + cc = (ov != cv); + env->regs[r3] = ov; + } + break; + + case 2: + { + uint64_t nvh = cpu_ldq_data_ra(env, pl, ra); + uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra); + Int128 nv = int128_make128(nvl, nvh); + Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); + Int128 ov; + + if (!parallel) { + uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); + uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); + + ov = int128_make128(ol, oh); + cc = !int128_eq(ov, cv); + if (cc) { + nv = ov; + } + + cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); + cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); + } else if (HAVE_CMPXCHG128) { + TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); + cc = !int128_eq(ov, cv); + } else { + /* Note that we asserted !parallel above. */ + g_assert_not_reached(); + } + + env->regs[r3 + 0] = int128_gethi(ov); + env->regs[r3 + 1] = int128_getlo(ov); + } + break; + + default: + g_assert_not_reached(); + } + + /* Store only if the comparison succeeded. Note that above we use a pair + of 64-bit big-endian loads, so for sc < 3 we must extract the value + from the most-significant bits of svh. */ + if (cc == 0) { + switch (sc) { + case 0: + cpu_stb_data_ra(env, a2, svh >> 56, ra); + break; + case 1: + cpu_stw_data_ra(env, a2, svh >> 48, ra); + break; + case 2: + cpu_stl_data_ra(env, a2, svh >> 32, ra); + break; + case 3: + cpu_stq_data_ra(env, a2, svh, ra); + break; + case 4: + if (!parallel) { + cpu_stq_data_ra(env, a2 + 0, svh, ra); + cpu_stq_data_ra(env, a2 + 8, svl, ra); + } else if (HAVE_ATOMIC128) { + TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + Int128 sv = int128_make128(svl, svh); + helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); + } else { + /* Note that we asserted !parallel above. */ + g_assert_not_reached(); + } + break; + default: + g_assert_not_reached(); + } + } + + return cc; + + spec_exception: + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +} + +uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2) +{ + return do_csst(env, r3, a1, a2, false); +} + +uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1, + uint64_t a2) +{ + return do_csst(env, r3, a1, a2, true); +} + +void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +{ + uintptr_t ra = GETPC(); + bool PERchanged = false; + uint64_t src = a2; + uint32_t i; + + if (src & 0x7) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + for (i = r1;; i = (i + 1) % 16) { + uint64_t val = cpu_ldq_data_ra(env, src, ra); + if (env->cregs[i] != val && i >= 9 && i <= 11) { + PERchanged = true; + } + env->cregs[i] = val; + HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n", + i, src, val); + src += sizeof(uint64_t); + + if (i == r3) { + break; + } + } + + if (PERchanged && env->psw.mask & PSW_MASK_PER) { + s390_cpu_recompute_watchpoints(env_cpu(env)); + } + + tlb_flush(env_cpu(env)); +} + +void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +{ + uintptr_t ra = GETPC(); + bool PERchanged = false; + uint64_t src = a2; + uint32_t i; + + if (src & 0x3) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + for (i = r1;; i = (i + 1) % 16) { + uint32_t val = cpu_ldl_data_ra(env, src, ra); + if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) { + PERchanged = true; + } + env->cregs[i] = deposit64(env->cregs[i], 0, 32, val); + HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val); + src += sizeof(uint32_t); + + if (i == r3) { + break; + } + } + + if (PERchanged && env->psw.mask & PSW_MASK_PER) { + s390_cpu_recompute_watchpoints(env_cpu(env)); + } + + tlb_flush(env_cpu(env)); +} + +void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +{ + uintptr_t ra = GETPC(); + uint64_t dest = a2; + uint32_t i; + + if (dest & 0x7) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + for (i = r1;; i = (i + 1) % 16) { + cpu_stq_data_ra(env, dest, env->cregs[i], ra); + dest += sizeof(uint64_t); + + if (i == r3) { + break; + } + } +} + +void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +{ + uintptr_t ra = GETPC(); + uint64_t dest = a2; + uint32_t i; + + if (dest & 0x3) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + for (i = r1;; i = (i + 1) % 16) { + cpu_stl_data_ra(env, dest, env->cregs[i], ra); + dest += sizeof(uint32_t); + + if (i == r3) { + break; + } + } +} + +uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr) +{ + uintptr_t ra = GETPC(); + int i; + + real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK; + + for (i = 0; i < TARGET_PAGE_SIZE; i += 8) { + cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra); + } + + return 0; +} + +uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) +{ + S390CPU *cpu = env_archcpu(env); + CPUState *cs = env_cpu(env); + + /* + * TODO: we currently don't handle all access protection types + * (including access-list and key-controlled) as well as AR mode. + */ + if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) { + /* Fetching permitted; storing permitted */ + return 0; + } + + if (env->int_pgm_code == PGM_PROTECTION) { + /* retry if reading is possible */ + cs->exception_index = -1; + if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) { + /* Fetching permitted; storing not permitted */ + return 1; + } + } + + switch (env->int_pgm_code) { + case PGM_PROTECTION: + /* Fetching not permitted; storing not permitted */ + cs->exception_index = -1; + return 2; + case PGM_ADDRESSING: + case PGM_TRANS_SPEC: + /* exceptions forwarded to the guest */ + s390_cpu_virt_mem_handle_exc(cpu, GETPC()); + return 0; + } + + /* Translation not available */ + cs->exception_index = -1; + return 3; +} + +/* insert storage key extended */ +uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) +{ + return 0; + +#if 0 + static S390SKeysState *ss; + static S390SKeysClass *skeyclass; + uint64_t addr = wrap_address(env, r2); + uint8_t key; + +#if 0 + if (addr > ram_size) { + return 0; + } +#endif + + if (unlikely(!ss)) { + ss = s390_get_skeys_device(); + skeyclass = S390_SKEYS_GET_CLASS(ss); + } + + if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) { + return 0; + } + return key; +#endif +} + +/* set storage key extended */ +void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) +{ +#if 0 + static S390SKeysState *ss; + static S390SKeysClass *skeyclass; + uint64_t addr = wrap_address(env, r2); + uint8_t key; + +#if 0 + if (addr > ram_size) { + return; + } +#endif + + if (unlikely(!ss)) { + ss = s390_get_skeys_device(); + skeyclass = S390_SKEYS_GET_CLASS(ss); + } + + key = (uint8_t) r1; + skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + /* + * As we can only flush by virtual address and not all the entries + * that point to a physical address we have to flush the whole TLB. + */ + tlb_flush_all_cpus_synced(env_cpu(env)); +#endif +} + +/* reset reference bit extended */ +uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) +{ + return 0; +#if 0 + static S390SKeysState *ss; + static S390SKeysClass *skeyclass; + uint8_t re, key; + +#if 0 + if (r2 > ram_size) { + return 0; + } +#endif + + if (unlikely(!ss)) { + ss = s390_get_skeys_device(); + skeyclass = S390_SKEYS_GET_CLASS(ss); + } + + if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { + return 0; + } + + re = key & (SK_R | SK_C); + key &= ~SK_R; + + if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { + return 0; + } + /* + * As we can only flush by virtual address and not all the entries + * that point to a physical address we have to flush the whole TLB. + */ + tlb_flush_all_cpus_synced(env_cpu(env)); + + /* + * cc + * + * 0 Reference bit zero; change bit zero + * 1 Reference bit zero; change bit one + * 2 Reference bit one; change bit zero + * 3 Reference bit one; change bit one + */ + + return re >> 1; +#endif +} + +uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) +{ + const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; + S390Access srca, desta; + uintptr_t ra = GETPC(); + int cc = 0; + + HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", + __func__, l, a1, a2); + + if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || + psw_as == AS_HOME || psw_as == AS_ACCREG) { + s390_program_interrupt(env, PGM_SPECIAL_OP, ra); + } + + l = wrap_length32(env, l); + if (l > 256) { + /* max 256 */ + l = 256; + cc = 3; + } else if (!l) { + return cc; + } + + /* TODO: Access key handling */ + srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra); + desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra); + access_memmove(env, &desta, &srca, ra); + return cc; +} + +uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) +{ + const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; + S390Access srca, desta; + uintptr_t ra = GETPC(); + int cc = 0; + + HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", + __func__, l, a1, a2); + + if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || + psw_as == AS_HOME || psw_as == AS_ACCREG) { + s390_program_interrupt(env, PGM_SPECIAL_OP, ra); + } + + l = wrap_length32(env, l); + if (l > 256) { + /* max 256 */ + l = 256; + cc = 3; + } else if (!l) { + return cc; + } + + /* TODO: Access key handling */ + srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra); + desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra); + access_memmove(env, &desta, &srca, ra); + return cc; +} + +void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4) +{ + CPUState *cs = env_cpu(env); + const uintptr_t ra = GETPC(); + uint64_t table, entry, raddr; + uint16_t entries, i, index = 0; + + if (r2 & 0xff000) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + if (!(r2 & 0x800)) { + /* invalidation-and-clearing operation */ + table = r1 & ASCE_ORIGIN; + entries = (r2 & 0x7ff) + 1; + + switch (r1 & ASCE_TYPE_MASK) { + case ASCE_TYPE_REGION1: + index = (r2 >> 53) & 0x7ff; + break; + case ASCE_TYPE_REGION2: + index = (r2 >> 42) & 0x7ff; + break; + case ASCE_TYPE_REGION3: + index = (r2 >> 31) & 0x7ff; + break; + case ASCE_TYPE_SEGMENT: + index = (r2 >> 20) & 0x7ff; + break; + } + for (i = 0; i < entries; i++) { + /* addresses are not wrapped in 24/31bit mode but table index is */ + raddr = table + ((index + i) & 0x7ff) * sizeof(entry); + entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra); + if (!(entry & REGION_ENTRY_I)) { + /* we are allowed to not store if already invalid */ + entry |= REGION_ENTRY_I; + cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra); + } + } + } + + /* We simply flush the complete tlb, therefore we can ignore r3. */ + if (m4 & 1) { + tlb_flush(cs); + } else { + tlb_flush_all_cpus_synced(cs); + } +} + +/* invalidate pte */ +void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr, + uint32_t m4) +{ + CPUState *cs = env_cpu(env); + const uintptr_t ra = GETPC(); + uint64_t page = vaddr & TARGET_PAGE_MASK; + uint64_t pte_addr, pte; + + /* Compute the page table entry address */ + pte_addr = (pto & SEGMENT_ENTRY_ORIGIN); + pte_addr += VADDR_PAGE_TX(vaddr) * 8; + + /* Mark the page table entry as invalid */ + pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra); + pte |= PAGE_ENTRY_I; + cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra); + + /* XXX we exploit the fact that Linux passes the exact virtual + address here - it's not obliged to! */ + if (m4 & 1) { + if (vaddr & ~VADDR_PAGE_TX_MASK) { + tlb_flush_page(cs, page); + /* XXX 31-bit hack */ + tlb_flush_page(cs, page ^ 0x80000000); + } else { + /* looks like we don't have a valid virtual address */ + tlb_flush(cs); + } + } else { + if (vaddr & ~VADDR_PAGE_TX_MASK) { + tlb_flush_page_all_cpus_synced(cs, page); + /* XXX 31-bit hack */ + tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000); + } else { + /* looks like we don't have a valid virtual address */ + tlb_flush_all_cpus_synced(cs); + } + } +} + +/* flush local tlb */ +void HELPER(ptlb)(CPUS390XState *env) +{ + tlb_flush(env_cpu(env)); +} + +/* flush global tlb */ +void HELPER(purge)(CPUS390XState *env) +{ + tlb_flush_all_cpus_synced(env_cpu(env)); +} + +/* load real address */ +uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) +{ + uint64_t asc = env->psw.mask & PSW_MASK_ASC; + uint64_t ret, tec; + int flags, exc, cc; + + /* XXX incomplete - has more corner cases */ + if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) { + tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC()); + } + + exc = mmu_translate(env, addr, 0, asc, &ret, &flags, &tec); + if (exc) { + cc = 3; + ret = exc | 0x80000000; + } else { + cc = 0; + ret |= addr & ~TARGET_PAGE_MASK; + } + + env->cc_op = cc; + return ret; +} + +/* load pair from quadword */ +uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) +{ + uintptr_t ra = GETPC(); + uint64_t hi, lo; + + check_alignment(env, addr, 16, ra); + hi = cpu_ldq_data_ra(env, addr + 0, ra); + lo = cpu_ldq_data_ra(env, addr + 8, ra); + + env->retxl = lo; + return hi; +} + +uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) +{ + uintptr_t ra = GETPC(); + uint64_t hi, lo; + int mem_idx; + TCGMemOpIdx oi; + Int128 v; + + assert(HAVE_ATOMIC128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); + hi = int128_gethi(v); + lo = int128_getlo(v); + + env->retxl = lo; + return hi; +} + +/* store pair to quadword */ +void HELPER(stpq)(CPUS390XState *env, uint64_t addr, + uint64_t low, uint64_t high) +{ + uintptr_t ra = GETPC(); + + check_alignment(env, addr, 16, ra); + cpu_stq_data_ra(env, addr + 0, high, ra); + cpu_stq_data_ra(env, addr + 8, low, ra); +} + +void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, + uint64_t low, uint64_t high) +{ + uintptr_t ra = GETPC(); + int mem_idx; + TCGMemOpIdx oi; + Int128 v; + + assert(HAVE_ATOMIC128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + v = int128_make128(low, high); + helper_atomic_sto_be_mmu(env, addr, v, oi, ra); +} + +/* Execute instruction. This instruction executes an insn modified with + the contents of r1. It does not change the executed instruction in memory; + it does not change the program counter. + + Perform this by recording the modified instruction in env->ex_value. + This will be noticed by cpu_get_tb_cpu_state and thus tb translation. +*/ +void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) +{ + uint64_t insn = cpu_lduw_code(env, addr); + uint8_t opc = insn >> 8; + + /* Or in the contents of R1[56:63]. */ + insn |= r1 & 0xff; + + /* Load the rest of the instruction. */ + insn <<= 48; + switch (get_ilen(opc)) { + case 2: + break; + case 4: + insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32; + break; + case 6: + insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16; + break; + default: + g_assert_not_reached(); + } + + /* The very most common cases can be sped up by avoiding a new TB. */ + if ((opc & 0xf0) == 0xd0) { + typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t, + uint64_t, uintptr_t); + static const dx_helper dx[16] = { + [0x0] = do_helper_trt_bkwd, + [0x2] = do_helper_mvc, + [0x4] = do_helper_nc, + [0x5] = do_helper_clc, + [0x6] = do_helper_oc, + [0x7] = do_helper_xc, + [0xc] = do_helper_tr, + [0xd] = do_helper_trt_fwd, + }; + dx_helper helper = dx[opc & 0xf]; + + if (helper) { + uint32_t l = extract64(insn, 48, 8); + uint32_t b1 = extract64(insn, 44, 4); + uint32_t d1 = extract64(insn, 32, 12); + uint32_t b2 = extract64(insn, 28, 4); + uint32_t d2 = extract64(insn, 16, 12); + uint64_t a1 = wrap_address(env, env->regs[b1] + d1); + uint64_t a2 = wrap_address(env, env->regs[b2] + d2); + + env->cc_op = helper(env, l, a1, a2, 0); + env->psw.addr += ilen; + return; + } + } else if (opc == 0x0a) { + env->int_svc_code = extract64(insn, 48, 8); + env->int_svc_ilen = ilen; + helper_exception(env, EXCP_SVC); + g_assert_not_reached(); + } + + /* Record the insn we want to execute as well as the ilen to use + during the execution of the target insn. This will also ensure + that ex_value is non-zero, which flags that we are in a state + that requires such execution. */ + env->ex_value = insn | ilen; +} + +uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, + uint64_t len) +{ + const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY; + const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; + const uint64_t r0 = env->regs[0]; + const uintptr_t ra = GETPC(); + uint8_t dest_key, dest_as, dest_k, dest_a; + uint8_t src_key, src_as, src_k, src_a; + uint64_t val; + int cc = 0; + + HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n", + __func__, dest, src, len); + + if (!(env->psw.mask & PSW_MASK_DAT)) { + tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); + } + + /* OAC (operand access control) for the first operand -> dest */ + val = (r0 & 0xffff0000ULL) >> 16; + dest_key = (val >> 12) & 0xf; + dest_as = (val >> 6) & 0x3; + dest_k = (val >> 1) & 0x1; + dest_a = val & 0x1; + + /* OAC (operand access control) for the second operand -> src */ + val = (r0 & 0x0000ffffULL); + src_key = (val >> 12) & 0xf; + src_as = (val >> 6) & 0x3; + src_k = (val >> 1) & 0x1; + src_a = val & 0x1; + + if (!dest_k) { + dest_key = psw_key; + } + if (!src_k) { + src_key = psw_key; + } + if (!dest_a) { + dest_as = psw_as; + } + if (!src_a) { + src_as = psw_as; + } + + if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) { + tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); + } + if (!(env->cregs[0] & CR0_SECONDARY) && + (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) { + tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); + } + if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) { + tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra); + } + + len = wrap_length32(env, len); + if (len > 4096) { + cc = 3; + len = 4096; + } + + /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */ + if (src_as == AS_ACCREG || dest_as == AS_ACCREG || + (env->psw.mask & PSW_MASK_PSTATE)) { + qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n", + __func__); + tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra); + } + + /* FIXME: Access using correct keys and AR-mode */ + if (len) { + S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD, + mmu_idx_from_as(src_as), ra); + S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE, + mmu_idx_from_as(dest_as), ra); + + access_memmove(env, &desta, &srca, ra); + } + + return cc; +} + +/* Decode a Unicode character. A return value < 0 indicates success, storing + the UTF-32 result into OCHAR and the input length into OLEN. A return + value >= 0 indicates failure, and the CC value to be returned. */ +typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr, + uint64_t ilen, bool enh_check, uintptr_t ra, + uint32_t *ochar, uint32_t *olen); + +/* Encode a Unicode character. A return value < 0 indicates success, storing + the bytes into ADDR and the output length into OLEN. A return value >= 0 + indicates failure, and the CC value to be returned. */ +typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr, + uint64_t ilen, uintptr_t ra, uint32_t c, + uint32_t *olen); + +static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, + bool enh_check, uintptr_t ra, + uint32_t *ochar, uint32_t *olen) +{ + uint8_t s0, s1, s2, s3; + uint32_t c, l; + + if (ilen < 1) { + return 0; + } + s0 = cpu_ldub_data_ra(env, addr, ra); + if (s0 <= 0x7f) { + /* one byte character */ + l = 1; + c = s0; + } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) { + /* invalid character */ + return 2; + } else if (s0 <= 0xdf) { + /* two byte character */ + l = 2; + if (ilen < 2) { + return 0; + } + s1 = cpu_ldub_data_ra(env, addr + 1, ra); + c = s0 & 0x1f; + c = (c << 6) | (s1 & 0x3f); + if (enh_check && (s1 & 0xc0) != 0x80) { + return 2; + } + } else if (s0 <= 0xef) { + /* three byte character */ + l = 3; + if (ilen < 3) { + return 0; + } + s1 = cpu_ldub_data_ra(env, addr + 1, ra); + s2 = cpu_ldub_data_ra(env, addr + 2, ra); + c = s0 & 0x0f; + c = (c << 6) | (s1 & 0x3f); + c = (c << 6) | (s2 & 0x3f); + /* Fold the byte-by-byte range descriptions in the PoO into + tests against the complete value. It disallows encodings + that could be smaller, and the UTF-16 surrogates. */ + if (enh_check + && ((s1 & 0xc0) != 0x80 + || (s2 & 0xc0) != 0x80 + || c < 0x1000 + || (c >= 0xd800 && c <= 0xdfff))) { + return 2; + } + } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) { + /* four byte character */ + l = 4; + if (ilen < 4) { + return 0; + } + s1 = cpu_ldub_data_ra(env, addr + 1, ra); + s2 = cpu_ldub_data_ra(env, addr + 2, ra); + s3 = cpu_ldub_data_ra(env, addr + 3, ra); + c = s0 & 0x07; + c = (c << 6) | (s1 & 0x3f); + c = (c << 6) | (s2 & 0x3f); + c = (c << 6) | (s3 & 0x3f); + /* See above. */ + if (enh_check + && ((s1 & 0xc0) != 0x80 + || (s2 & 0xc0) != 0x80 + || (s3 & 0xc0) != 0x80 + || c < 0x010000 + || c > 0x10ffff)) { + return 2; + } + } else { + /* invalid character */ + return 2; + } + + *ochar = c; + *olen = l; + return -1; +} + +static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, + bool enh_check, uintptr_t ra, + uint32_t *ochar, uint32_t *olen) +{ + uint16_t s0, s1; + uint32_t c, l; + + if (ilen < 2) { + return 0; + } + s0 = cpu_lduw_data_ra(env, addr, ra); + if ((s0 & 0xfc00) != 0xd800) { + /* one word character */ + l = 2; + c = s0; + } else { + /* two word character */ + l = 4; + if (ilen < 4) { + return 0; + } + s1 = cpu_lduw_data_ra(env, addr + 2, ra); + c = extract32(s0, 6, 4) + 1; + c = (c << 6) | (s0 & 0x3f); + c = (c << 10) | (s1 & 0x3ff); + if (enh_check && (s1 & 0xfc00) != 0xdc00) { + /* invalid surrogate character */ + return 2; + } + } + + *ochar = c; + *olen = l; + return -1; +} + +static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, + bool enh_check, uintptr_t ra, + uint32_t *ochar, uint32_t *olen) +{ + uint32_t c; + + if (ilen < 4) { + return 0; + } + c = cpu_ldl_data_ra(env, addr, ra); + if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) { + /* invalid unicode character */ + return 2; + } + + *ochar = c; + *olen = 4; + return -1; +} + +static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, + uintptr_t ra, uint32_t c, uint32_t *olen) +{ + uint8_t d[4]; + uint32_t l, i; + + if (c <= 0x7f) { + /* one byte character */ + l = 1; + d[0] = c; + } else if (c <= 0x7ff) { + /* two byte character */ + l = 2; + d[1] = 0x80 | extract32(c, 0, 6); + d[0] = 0xc0 | extract32(c, 6, 5); + } else if (c <= 0xffff) { + /* three byte character */ + l = 3; + d[2] = 0x80 | extract32(c, 0, 6); + d[1] = 0x80 | extract32(c, 6, 6); + d[0] = 0xe0 | extract32(c, 12, 4); + } else { + /* four byte character */ + l = 4; + d[3] = 0x80 | extract32(c, 0, 6); + d[2] = 0x80 | extract32(c, 6, 6); + d[1] = 0x80 | extract32(c, 12, 6); + d[0] = 0xf0 | extract32(c, 18, 3); + } + + if (ilen < l) { + return 1; + } + for (i = 0; i < l; ++i) { + cpu_stb_data_ra(env, addr + i, d[i], ra); + } + + *olen = l; + return -1; +} + +static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, + uintptr_t ra, uint32_t c, uint32_t *olen) +{ + uint16_t d0, d1; + + if (c <= 0xffff) { + /* one word character */ + if (ilen < 2) { + return 1; + } + cpu_stw_data_ra(env, addr, c, ra); + *olen = 2; + } else { + /* two word character */ + if (ilen < 4) { + return 1; + } + d1 = 0xdc00 | extract32(c, 0, 10); + d0 = 0xd800 | extract32(c, 10, 6); + d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1); + cpu_stw_data_ra(env, addr + 0, d0, ra); + cpu_stw_data_ra(env, addr + 2, d1, ra); + *olen = 4; + } + + return -1; +} + +static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, + uintptr_t ra, uint32_t c, uint32_t *olen) +{ + if (ilen < 4) { + return 1; + } + cpu_stl_data_ra(env, addr, c, ra); + *olen = 4; + return -1; +} + +static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1, + uint32_t r2, uint32_t m3, uintptr_t ra, + decode_unicode_fn decode, + encode_unicode_fn encode) +{ + uint64_t dst = get_address(env, r1); + uint64_t dlen = get_length(env, r1 + 1); + uint64_t src = get_address(env, r2); + uint64_t slen = get_length(env, r2 + 1); + bool enh_check = m3 & 1; + int cc, i; + + /* Lest we fail to service interrupts in a timely manner, limit the + amount of work we're willing to do. For now, let's cap at 256. */ + for (i = 0; i < 256; ++i) { + uint32_t c, ilen, olen; + + cc = decode(env, src, slen, enh_check, ra, &c, &ilen); + if (unlikely(cc >= 0)) { + break; + } + cc = encode(env, dst, dlen, ra, c, &olen); + if (unlikely(cc >= 0)) { + break; + } + + src += ilen; + slen -= ilen; + dst += olen; + dlen -= olen; + cc = 3; + } + + set_address(env, r1, dst); + set_length(env, r1 + 1, dlen); + set_address(env, r2, src); + set_length(env, r2 + 1, slen); + + return cc; +} + +uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +{ + return convert_unicode(env, r1, r2, m3, GETPC(), + decode_utf8, encode_utf16); +} + +uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +{ + return convert_unicode(env, r1, r2, m3, GETPC(), + decode_utf8, encode_utf32); +} + +uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +{ + return convert_unicode(env, r1, r2, m3, GETPC(), + decode_utf16, encode_utf8); +} + +uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +{ + return convert_unicode(env, r1, r2, m3, GETPC(), + decode_utf16, encode_utf32); +} + +uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +{ + return convert_unicode(env, r1, r2, m3, GETPC(), + decode_utf32, encode_utf8); +} + +uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +{ + return convert_unicode(env, r1, r2, m3, GETPC(), + decode_utf32, encode_utf16); +} + +void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, + uintptr_t ra) +{ + /* test the actual access, not just any access to the page due to LAP */ + while (len) { + const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); + const uint64_t curlen = MIN(pagelen, len); + + probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra); + addr = wrap_address(env, addr + curlen); + len -= curlen; + } +} + +void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len) +{ + probe_write_access(env, addr, len, GETPC()); +} diff --git a/qemu/target/s390x/misc_helper.c b/qemu/target/s390x/misc_helper.c new file mode 100644 index 00000000..53a34cc6 --- /dev/null +++ b/qemu/target/s390x/misc_helper.c @@ -0,0 +1,815 @@ +/* + * S/390 misc helper routines + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2009 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/memory.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "qemu/timer.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "tcg_s390x.h" +#include "s390-tod.h" + +#include "sysemu/cpus.h" +#include "sysemu/sysemu.h" +#include "hw/s390x/ebcdic.h" +//#include "hw/s390x/sclp.h" +//#include "hw/s390x/s390_flic.h" +#include "hw/s390x/ioinst.h" +//#include "hw/s390x/s390-pci-inst.h" +//#include "hw/s390x/tod.h" + +/* #define DEBUG_HELPER */ +#ifdef DEBUG_HELPER +#define HELPER_LOG(x...) qemu_log(x) +#else +#define HELPER_LOG(x...) +#endif + +/* Raise an exception statically from a TB. */ +void HELPER(exception)(CPUS390XState *env, uint32_t excp) +{ + CPUState *cs = env_cpu(env); + + HELPER_LOG("%s: exception %d\n", __func__, excp); + cs->exception_index = excp; + cpu_loop_exit(cs); +} + +/* Store CPU Timer (also used for EXTRACT CPU TIME) */ +uint64_t HELPER(stpt)(CPUS390XState *env) +{ + return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +} + +/* Store Clock */ +uint64_t HELPER(stck)(CPUS390XState *env) +{ +#if 0 + S390TODState *td = s390_get_todstate(); + S390TODClass *tdc = S390_TOD_GET_CLASS(td); + S390TOD tod; + + tdc->get(td, &tod, &error_abort); + return tod.low; +#endif + return 0; +} + +/* SCLP service call */ +uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) +{ +#if 0 + qemu_mutex_lock_iothread(); + int r = sclp_service_call(env, r1, r2); + qemu_mutex_unlock_iothread(); + if (r < 0) { + tcg_s390_program_interrupt(env, -r, GETPC()); + } + return r; +#endif + return 0; +} + +void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) +{ +#if 0 + uint64_t r; + + switch (num) { + case 0x500: + /* KVM hypercall */ + qemu_mutex_lock_iothread(); + r = s390_virtio_hypercall(env); + qemu_mutex_unlock_iothread(); + break; + case 0x44: + /* yield */ + r = 0; + break; + case 0x308: + /* ipl */ + qemu_mutex_lock_iothread(); + handle_diag_308(env, r1, r3, GETPC()); + qemu_mutex_unlock_iothread(); + r = 0; + break; + case 0x288: + /* time bomb (watchdog) */ + r = handle_diag_288(env, r1, r3); + break; + default: + r = -1; + break; + } + + if (r) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); + } +#endif +} + +/* Set Prefix */ +void HELPER(spx)(CPUS390XState *env, uint64_t a1) +{ + CPUState *cs = env_cpu(env); + uint32_t prefix = a1 & 0x7fffe000; + + env->psa = prefix; + HELPER_LOG("prefix: %#x\n", prefix); + tlb_flush_page(cs, 0); + tlb_flush_page(cs, TARGET_PAGE_SIZE); +} + +static void update_ckc_timer(CPUS390XState *env) +{ +#if 0 + S390TODState *td = s390_get_todstate(); + uint64_t time; + + /* stop the timer and remove pending CKC IRQs */ + timer_del(env->tod_timer); + g_assert(qemu_mutex_iothread_locked()); + env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; + + /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */ + if (env->ckc == -1ULL) { + return; + } + + /* difference between origins */ + time = env->ckc - td->base.low; + + /* nanoseconds */ + time = tod2time(time); + + timer_mod(env->tod_timer, time); +#endif +} + +/* Set Clock Comparator */ +void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) +{ +#if 0 + env->ckc = ckc; + + qemu_mutex_lock_iothread(); + update_ckc_timer(env); + qemu_mutex_unlock_iothread(); +#endif +} + +void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) +{ + S390CPU *cpu = S390_CPU(cs); + + update_ckc_timer(&cpu->env); +} + +/* Set Clock */ +uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low) +{ +#if 0 + S390TODState *td = s390_get_todstate(); + S390TODClass *tdc = S390_TOD_GET_CLASS(td); + S390TOD tod = { + .high = 0, + .low = tod_low, + }; + + qemu_mutex_lock_iothread(); + tdc->set(td, &tod, &error_abort); + qemu_mutex_unlock_iothread(); +#endif + + return 0; +} + +/* Set Tod Programmable Field */ +void HELPER(sckpf)(CPUS390XState *env, uint64_t r0) +{ + uint32_t val = r0; + + if (val & 0xffff0000) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); + } + env->todpr = val; +} + +/* Store Clock Comparator */ +uint64_t HELPER(stckc)(CPUS390XState *env) +{ + return env->ckc; +} + +/* Set CPU Timer */ +void HELPER(spt)(CPUS390XState *env, uint64_t time) +{ + if (time == -1ULL) { + return; + } + + /* nanoseconds */ + time = tod2time(time); + + env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time; + + // timer_mod(env->cpu_timer, env->cputm); +} + +/* Store System Information */ +uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1) +{ +#if 0 + const uintptr_t ra = GETPC(); + const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK; + const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK; + const MachineState *ms = MACHINE(qdev_get_machine()); + uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0; + S390CPU *cpu = env_archcpu(env); + SysIB sysib = { }; + int i, cc = 0; + + if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) { + /* invalid function code: no other checks are performed */ + return 3; + } + + if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) { + /* query the current level: no further checks are performed */ + env->regs[0] = STSI_R0_FC_LEVEL_3; + return 0; + } + + if (a0 & ~TARGET_PAGE_MASK) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + /* count the cpus and split them into configured and reserved ones */ + for (i = 0; i < ms->possible_cpus->len; i++) { + total_cpus++; + if (ms->possible_cpus->cpus[i].cpu) { + conf_cpus++; + } else { + reserved_cpus++; + } + } + + /* + * In theory, we could report Level 1 / Level 2 as current. However, + * the Linux kernel will detect this as running under LPAR and assume + * that we have a sclp linemode console (which is always present on + * LPAR, but not the default for QEMU), therefore not displaying boot + * messages and making booting a Linux kernel under TCG harder. + * + * For now we fake the same SMP configuration on all levels. + * + * TODO: We could later make the level configurable via the machine + * and change defaults (linemode console) based on machine type + * and accelerator. + */ + switch (r0 & STSI_R0_FC_MASK) { + case STSI_R0_FC_LEVEL_1: + if ((sel1 == 1) && (sel2 == 1)) { + /* Basic Machine Configuration */ + char type[5] = {}; + + ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16); + /* same as machine type number in STORE CPU ID, but in EBCDIC */ + snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type); + ebcdic_put(sysib.sysib_111.type, type, 4); + /* model number (not stored in STORE CPU ID for z/Architecure) */ + ebcdic_put(sysib.sysib_111.model, "QEMU ", 16); + ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16); + ebcdic_put(sysib.sysib_111.plant, "QEMU", 4); + } else if ((sel1 == 2) && (sel2 == 1)) { + /* Basic Machine CPU */ + ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16); + ebcdic_put(sysib.sysib_121.plant, "QEMU", 4); + sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id); + } else if ((sel1 == 2) && (sel2 == 2)) { + /* Basic Machine CPUs */ + sysib.sysib_122.capability = cpu_to_be32(0x443afc29); + sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus); + sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus); + sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus); + } else { + cc = 3; + } + break; + case STSI_R0_FC_LEVEL_2: + if ((sel1 == 2) && (sel2 == 1)) { + /* LPAR CPU */ + ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16); + ebcdic_put(sysib.sysib_221.plant, "QEMU", 4); + sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id); + } else if ((sel1 == 2) && (sel2 == 2)) { + /* LPAR CPUs */ + sysib.sysib_222.lcpuc = 0x80; /* dedicated */ + sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus); + sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus); + sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus); + ebcdic_put(sysib.sysib_222.name, "QEMU ", 8); + sysib.sysib_222.caf = cpu_to_be32(1000); + sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus); + } else { + cc = 3; + } + break; + case STSI_R0_FC_LEVEL_3: + if ((sel1 == 2) && (sel2 == 2)) { + /* VM CPUs */ + sysib.sysib_322.count = 1; + sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus); + sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus); + sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus); + sysib.sysib_322.vm[0].caf = cpu_to_be32(1000); + /* Linux kernel uses this to distinguish us from z/VM */ + ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16); + sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */ + + /* If our VM has a name, use the real name */ + if (qemu_name) { + memset(sysib.sysib_322.vm[0].name, 0x40, + sizeof(sysib.sysib_322.vm[0].name)); + ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name, + MIN(sizeof(sysib.sysib_322.vm[0].name), + strlen(qemu_name))); + strncpy((char *)sysib.sysib_322.ext_names[0], qemu_name, + sizeof(sysib.sysib_322.ext_names[0])); + } else { + ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8); + strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest"); + } + + /* add the uuid */ + memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid, + sizeof(sysib.sysib_322.vm[0].uuid)); + } else { + cc = 3; + } + break; + } + + if (cc == 0) { + if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) { + s390_cpu_virt_mem_handle_exc(cpu, ra); + } + } + + return cc; +#endif + + return 0; +} + +uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, + uint32_t r3) +{ +#if 0 + int cc; + + /* TODO: needed to inject interrupts - push further down */ + qemu_mutex_lock_iothread(); + cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3); + qemu_mutex_unlock_iothread(); + + return cc; +#endif + return 0; +} + +void HELPER(xsch)(CPUS390XState *env, uint64_t r1) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_xsch(cpu, r1, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(csch)(CPUS390XState *env, uint64_t r1) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_csch(cpu, r1, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(hsch)(CPUS390XState *env, uint64_t r1) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_hsch(cpu, r1, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(rchp)(CPUS390XState *env, uint64_t r1) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_rchp(cpu, r1, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(rsch)(CPUS390XState *env, uint64_t r1) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_rsch(cpu, r1, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(sal)(CPUS390XState *env, uint64_t r1) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + ioinst_handle_sal(cpu, r1, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) +{ +#if 0 + const uintptr_t ra = GETPC(); + S390CPU *cpu = env_archcpu(env); + QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); + QEMUS390FlicIO *io = NULL; + LowCore *lowcore; + + if (addr & 0x3) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + qemu_mutex_lock_iothread(); + io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); + if (!io) { + qemu_mutex_unlock_iothread(); + return 0; + } + + if (addr) { + struct { + uint16_t id; + uint16_t nr; + uint32_t parm; + } intc = { + .id = cpu_to_be16(io->id), + .nr = cpu_to_be16(io->nr), + .parm = cpu_to_be32(io->parm), + }; + + if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) { + /* writing failed, reinject and properly clean up */ + s390_io_interrupt(io->id, io->nr, io->parm, io->word); + qemu_mutex_unlock_iothread(); + g_free(io); + s390_cpu_virt_mem_handle_exc(cpu, ra); + return 0; + } + } else { + /* no protection applies */ + lowcore = cpu_map_lowcore(env); + lowcore->subchannel_id = cpu_to_be16(io->id); + lowcore->subchannel_nr = cpu_to_be16(io->nr); + lowcore->io_int_parm = cpu_to_be32(io->parm); + lowcore->io_int_word = cpu_to_be32(io->word); + cpu_unmap_lowcore(env, lowcore); + } + + g_free(io); + qemu_mutex_unlock_iothread(); +#endif + return 1; +} + +void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(chsc)(CPUS390XState *env, uint64_t inst) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + ioinst_handle_chsc(cpu, inst >> 16, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(per_check_exception)(CPUS390XState *env) +{ + if (env->per_perc_atmid) { + tcg_s390_program_interrupt(env, PGM_PER, GETPC()); + } +} + +/* Check if an address is within the PER starting address and the PER + ending address. The address range might loop. */ +static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr) +{ + if (env->cregs[10] <= env->cregs[11]) { + return env->cregs[10] <= addr && addr <= env->cregs[11]; + } else { + return env->cregs[10] <= addr || addr <= env->cregs[11]; + } +} + +void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to) +{ + if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) { + if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS) + || get_per_in_range(env, to)) { + env->per_address = from; + env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env); + } + } +} + +void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr) +{ + if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) { + env->per_address = addr; + env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env); + + /* If the instruction has to be nullified, trigger the + exception immediately. */ + if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) { + CPUState *cs = env_cpu(env); + + env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION; + env->int_pgm_code = PGM_PER; + env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr)); + + cs->exception_index = EXCP_PGM; + cpu_loop_exit(cs); + } + } +} + +void HELPER(per_store_real)(CPUS390XState *env) +{ + if ((env->cregs[9] & PER_CR9_EVENT_STORE) && + (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) { + /* PSW is saved just before calling the helper. */ + env->per_address = env->psw.addr; + env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env); + } +} + +static uint8_t stfl_bytes[2048]; +static unsigned int used_stfl_bytes; + +static void prepare_stfl(void) +{ +#if 0 + static bool initialized; + int i; + + /* racy, but we don't care, the same values are always written */ + if (initialized) { + return; + } + + s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes); + for (i = 0; i < sizeof(stfl_bytes); i++) { + if (stfl_bytes[i]) { + used_stfl_bytes = i + 1; + } + } + initialized = true; +#endif +} + +void HELPER(stfl)(CPUS390XState *env) +{ + LowCore *lowcore; + + lowcore = cpu_map_lowcore(env); + prepare_stfl(); + memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list)); + cpu_unmap_lowcore(env, lowcore); +} + +uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr) +{ + const uintptr_t ra = GETPC(); + const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8; + int max_bytes; + int i; + + if (addr & 0x7) { + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); + } + + prepare_stfl(); + max_bytes = ROUND_UP(used_stfl_bytes, 8); + + /* + * The PoP says that doublewords beyond the highest-numbered facility + * bit may or may not be stored. However, existing hardware appears to + * not store the words, and existing software depend on that. + */ + for (i = 0; i < MIN(count_bytes, max_bytes); ++i) { + cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra); + } + + env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1); + return count_bytes >= max_bytes ? 0 : 3; +} + +/* + * Note: we ignore any return code of the functions called for the pci + * instructions, as the only time they return !0 is when the stub is + * called, and in that case we didn't even offer the zpci facility. + * The only exception is SIC, where program checks need to be handled + * by the caller. + */ +void HELPER(clp)(CPUS390XState *env, uint32_t r2) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + clp_service_call(cpu, r2, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + pcilg_service_call(cpu, r1, r2, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + pcistg_service_call(cpu, r1, r2, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, + uint32_t ar) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) +{ +#if 0 + int r; + + qemu_mutex_lock_iothread(); + r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff); + qemu_mutex_unlock_iothread(); + /* css_do_sic() may actually return a PGM_xxx value to inject */ + if (r) { + tcg_s390_program_interrupt(env, -r, GETPC()); + } +#endif +} + +void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + rpcit_service_call(cpu, r1, r2, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, + uint64_t gaddr, uint32_t ar) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} + +void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, + uint32_t ar) +{ +#if 0 + S390CPU *cpu = env_archcpu(env); + + qemu_mutex_lock_iothread(); + mpcifc_service_call(cpu, r1, fiba, ar, GETPC()); + qemu_mutex_unlock_iothread(); +#endif +} diff --git a/qemu/target/s390x/mmu_helper.c b/qemu/target/s390x/mmu_helper.c new file mode 100644 index 00000000..f9dc73e8 --- /dev/null +++ b/qemu/target/s390x/mmu_helper.c @@ -0,0 +1,554 @@ +/* + * S390x MMU related functions + * + * Copyright (c) 2011 Alexander Graf + * Copyright (c) 2015 Thomas Huth, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qemu/osdep.h" +//#include "exec/address-spaces.h" +#include "cpu.h" +#include "internal.h" +#include "sysemu/tcg.h" +#include "exec/exec-all.h" +//#include "hw/hw.h" +#include "hw/s390x/storage-keys.h" + +/* Fetch/store bits in the translation exception code: */ +#define FS_READ 0x800 +#define FS_WRITE 0x400 + +static void trigger_access_exception(CPUS390XState *env, uint32_t type, + uint64_t tec) +{ + CPUState *cs = env_cpu(env); + if (type != PGM_ADDRESSING) { +#ifdef UNICORN_ARCH_POSTFIX + glue(stq_phys, UNICORN_ARCH_POSTFIX)(env->uc, cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec); +#else + stq_phys(env->uc, cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec); +#endif + } + trigger_pgm_exception(env, type); +} + +/* check whether the address would be proteted by Low-Address Protection */ +static bool is_low_address(uint64_t addr) +{ + return addr <= 511 || (addr >= 4096 && addr <= 4607); +} + +/* check whether Low-Address Protection is enabled for mmu_translate() */ +static bool lowprot_enabled(const CPUS390XState *env, uint64_t asc) +{ + if (!(env->cregs[0] & CR0_LOWPROT)) { + return false; + } + if (!(env->psw.mask & PSW_MASK_DAT)) { + return true; + } + + /* Check the private-space control bit */ + switch (asc) { + case PSW_ASC_PRIMARY: + return !(env->cregs[1] & ASCE_PRIVATE_SPACE); + case PSW_ASC_SECONDARY: + return !(env->cregs[7] & ASCE_PRIVATE_SPACE); + case PSW_ASC_HOME: + return !(env->cregs[13] & ASCE_PRIVATE_SPACE); + default: + /* We don't support access register mode */ + // error_report("unsupported addressing mode"); + exit(1); + } +} + +/** + * Translate real address to absolute (= physical) + * address by taking care of the prefix mapping. + */ +target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr) +{ + if (raddr < 0x2000) { + return raddr + env->psa; /* Map the lowcore. */ + } else if (raddr >= env->psa && raddr < env->psa + 0x2000) { + return raddr - env->psa; /* Map the 0 page. */ + } + return raddr; +} + +static inline bool read_table_entry(CPUS390XState *env, hwaddr gaddr, + uint64_t *entry) +{ + CPUState *cs = env_cpu(env); + + /* + * According to the PoP, these table addresses are "unpredictably real + * or absolute". Also, "it is unpredictable whether the address wraps + * or an addressing exception is recognized". + * + * We treat them as absolute addresses and don't wrap them. + */ + if (unlikely(address_space_read(cs->as, gaddr, MEMTXATTRS_UNSPECIFIED, + entry, sizeof(*entry)) != + MEMTX_OK)) { + return false; + } + *entry = be64_to_cpu(*entry); + return true; +} + +static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, + uint64_t asc, uint64_t asce, target_ulong *raddr, + int *flags, int rw) +{ + const bool edat1 = (env->cregs[0] & CR0_EDAT) && + s390_has_feat(env->uc, S390_FEAT_EDAT); + const bool edat2 = edat1 && s390_has_feat(env->uc, S390_FEAT_EDAT_2); + const bool iep = (env->cregs[0] & CR0_IEP) && + s390_has_feat(env->uc, S390_FEAT_INSTRUCTION_EXEC_PROT); + const int asce_tl = asce & ASCE_TABLE_LENGTH; + const int asce_p = asce & ASCE_PRIVATE_SPACE; + hwaddr gaddr = asce & ASCE_ORIGIN; + uint64_t entry; + + if (asce & ASCE_REAL_SPACE) { + /* direct mapping */ + *raddr = vaddr; + return 0; + } + + switch (asce & ASCE_TYPE_MASK) { + case ASCE_TYPE_REGION1: + if (VADDR_REGION1_TL(vaddr) > asce_tl) { + return PGM_REG_FIRST_TRANS; + } + gaddr += VADDR_REGION1_TX(vaddr) * 8; + break; + case ASCE_TYPE_REGION2: + if (VADDR_REGION1_TX(vaddr)) { + return PGM_ASCE_TYPE; + } + if (VADDR_REGION2_TL(vaddr) > asce_tl) { + return PGM_REG_SEC_TRANS; + } + gaddr += VADDR_REGION2_TX(vaddr) * 8; + break; + case ASCE_TYPE_REGION3: + if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr)) { + return PGM_ASCE_TYPE; + } + if (VADDR_REGION3_TL(vaddr) > asce_tl) { + return PGM_REG_THIRD_TRANS; + } + gaddr += VADDR_REGION3_TX(vaddr) * 8; + break; + case ASCE_TYPE_SEGMENT: + if (VADDR_REGION1_TX(vaddr) || VADDR_REGION2_TX(vaddr) || + VADDR_REGION3_TX(vaddr)) { + return PGM_ASCE_TYPE; + } + if (VADDR_SEGMENT_TL(vaddr) > asce_tl) { + return PGM_SEGMENT_TRANS; + } + gaddr += VADDR_SEGMENT_TX(vaddr) * 8; + break; + } + + switch (asce & ASCE_TYPE_MASK) { + case ASCE_TYPE_REGION1: + if (!read_table_entry(env, gaddr, &entry)) { + return PGM_ADDRESSING; + } + if (entry & REGION_ENTRY_I) { + return PGM_REG_FIRST_TRANS; + } + if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION1) { + return PGM_TRANS_SPEC; + } + if (VADDR_REGION2_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 || + VADDR_REGION2_TL(vaddr) > (entry & REGION_ENTRY_TL)) { + return PGM_REG_SEC_TRANS; + } + if (edat1 && (entry & REGION_ENTRY_P)) { + *flags &= ~PAGE_WRITE; + } + gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION2_TX(vaddr) * 8; + /* fall through */ + case ASCE_TYPE_REGION2: + if (!read_table_entry(env, gaddr, &entry)) { + return PGM_ADDRESSING; + } + if (entry & REGION_ENTRY_I) { + return PGM_REG_SEC_TRANS; + } + if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION2) { + return PGM_TRANS_SPEC; + } + if (VADDR_REGION3_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 || + VADDR_REGION3_TL(vaddr) > (entry & REGION_ENTRY_TL)) { + return PGM_REG_THIRD_TRANS; + } + if (edat1 && (entry & REGION_ENTRY_P)) { + *flags &= ~PAGE_WRITE; + } + gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_REGION3_TX(vaddr) * 8; + /* fall through */ + case ASCE_TYPE_REGION3: + if (!read_table_entry(env, gaddr, &entry)) { + return PGM_ADDRESSING; + } + if (entry & REGION_ENTRY_I) { + return PGM_REG_THIRD_TRANS; + } + if ((entry & REGION_ENTRY_TT) != REGION_ENTRY_TT_REGION3) { + return PGM_TRANS_SPEC; + } + if (edat2 && (entry & REGION3_ENTRY_CR) && asce_p) { + return PGM_TRANS_SPEC; + } + if (edat1 && (entry & REGION_ENTRY_P)) { + *flags &= ~PAGE_WRITE; + } + if (edat2 && (entry & REGION3_ENTRY_FC)) { + if (iep && (entry & REGION3_ENTRY_IEP)) { + *flags &= ~PAGE_EXEC; + } + *raddr = (entry & REGION3_ENTRY_RFAA) | + (vaddr & ~REGION3_ENTRY_RFAA); + return 0; + } + if (VADDR_SEGMENT_TL(vaddr) < (entry & REGION_ENTRY_TF) >> 6 || + VADDR_SEGMENT_TL(vaddr) > (entry & REGION_ENTRY_TL)) { + return PGM_SEGMENT_TRANS; + } + gaddr = (entry & REGION_ENTRY_ORIGIN) + VADDR_SEGMENT_TX(vaddr) * 8; + /* fall through */ + case ASCE_TYPE_SEGMENT: + if (!read_table_entry(env, gaddr, &entry)) { + return PGM_ADDRESSING; + } + if (entry & SEGMENT_ENTRY_I) { + return PGM_SEGMENT_TRANS; + } + if ((entry & SEGMENT_ENTRY_TT) != SEGMENT_ENTRY_TT_SEGMENT) { + return PGM_TRANS_SPEC; + } + if ((entry & SEGMENT_ENTRY_CS) && asce_p) { + return PGM_TRANS_SPEC; + } + if (entry & SEGMENT_ENTRY_P) { + *flags &= ~PAGE_WRITE; + } + if (edat1 && (entry & SEGMENT_ENTRY_FC)) { + if (iep && (entry & SEGMENT_ENTRY_IEP)) { + *flags &= ~PAGE_EXEC; + } + *raddr = (entry & SEGMENT_ENTRY_SFAA) | + (vaddr & ~SEGMENT_ENTRY_SFAA); + return 0; + } + gaddr = (entry & SEGMENT_ENTRY_ORIGIN) + VADDR_PAGE_TX(vaddr) * 8; + break; + } + + if (!read_table_entry(env, gaddr, &entry)) { + return PGM_ADDRESSING; + } + if (entry & PAGE_ENTRY_I) { + return PGM_PAGE_TRANS; + } + if (entry & PAGE_ENTRY_0) { + return PGM_TRANS_SPEC; + } + if (entry & PAGE_ENTRY_P) { + *flags &= ~PAGE_WRITE; + } + if (iep && (entry & PAGE_ENTRY_IEP)) { + *flags &= ~PAGE_EXEC; + } + + *raddr = entry & TARGET_PAGE_MASK; + return 0; +} + +static void mmu_handle_skey(target_ulong addr, int rw, int *flags) +{ + static S390SKeysClass *skeyclass; + static S390SKeysState *ss; + uint8_t key; + int rc; + +#if 0 + if (unlikely(addr >= ram_size)) { + return; + } +#endif + + if (unlikely(!ss)) { + // ss = s390_get_skeys_device(); + // skeyclass = S390_SKEYS_GET_CLASS(ss); + } + + /* + * Whenever we create a new TLB entry, we set the storage key reference + * bit. In case we allow write accesses, we set the storage key change + * bit. Whenever the guest changes the storage key, we have to flush the + * TLBs of all CPUs (the whole TLB or all affected entries), so that the + * next reference/change will result in an MMU fault and make us properly + * update the storage key here. + * + * Note 1: "record of references ... is not necessarily accurate", + * "change bit may be set in case no storing has occurred". + * -> We can set reference/change bits even on exceptions. + * Note 2: certain accesses seem to ignore storage keys. For example, + * DAT translation does not set reference bits for table accesses. + * + * TODO: key-controlled protection. Only CPU accesses make use of the + * PSW key. CSS accesses are different - we have to pass in the key. + * + * TODO: we have races between getting and setting the key. + */ + rc = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + if (rc) { + // trace_get_skeys_nonzero(rc); + return; + } + + switch (rw) { + case MMU_DATA_LOAD: + case MMU_INST_FETCH: + /* + * The TLB entry has to remain write-protected on read-faults if + * the storage key does not indicate a change already. Otherwise + * we might miss setting the change bit on write accesses. + */ + if (!(key & SK_C)) { + *flags &= ~PAGE_WRITE; + } + break; + case MMU_DATA_STORE: + key |= SK_C; + break; + default: + g_assert_not_reached(); + } + + /* Any store/fetch sets the reference bit */ + key |= SK_R; + + rc = skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); + if (rc) { + // trace_set_skeys_nonzero(rc); + } +} + +/** + * Translate a virtual (logical) address into a physical (absolute) address. + * @param vaddr the virtual address + * @param rw 0 = read, 1 = write, 2 = code fetch + * @param asc address space control (one of the PSW_ASC_* modes) + * @param raddr the translated address is stored to this pointer + * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer + * @param exc true = inject a program check if a fault occurred + * @return 0 = success, != 0, the exception to raise + */ +int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, + target_ulong *raddr, int *flags, uint64_t *tec) +{ + uint64_t asce; + int r; + + *tec = (vaddr & TARGET_PAGE_MASK) | (asc >> 46) | + (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ); + *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + + if (is_low_address(vaddr & TARGET_PAGE_MASK) && lowprot_enabled(env, asc)) { + /* + * If any part of this page is currently protected, make sure the + * TLB entry will not be reused. + * + * As the protected range is always the first 512 bytes of the + * two first pages, we are able to catch all writes to these areas + * just by looking at the start address (triggering the tlb miss). + */ + *flags |= PAGE_WRITE_INV; + if (is_low_address(vaddr) && rw == MMU_DATA_STORE) { + /* LAP sets bit 56 */ + *tec |= 0x80; + return PGM_PROTECTION; + } + } + + vaddr &= TARGET_PAGE_MASK; + + if (!(env->psw.mask & PSW_MASK_DAT)) { + *raddr = vaddr; + goto nodat; + } + + switch (asc) { + case PSW_ASC_PRIMARY: + asce = env->cregs[1]; + break; + case PSW_ASC_HOME: + asce = env->cregs[13]; + break; + case PSW_ASC_SECONDARY: + asce = env->cregs[7]; + break; + case PSW_ASC_ACCREG: + default: + // hw_error("guest switched to unknown asc mode\n"); + break; + } + + /* perform the DAT translation */ + r = mmu_translate_asce(env, vaddr, asc, asce, raddr, flags, rw); + if (unlikely(r)) { + return r; + } + + /* check for DAT protection */ + if (unlikely(rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE))) { + /* DAT sets bit 61 only */ + *tec |= 0x4; + return PGM_PROTECTION; + } + + /* check for Instruction-Execution-Protection */ + if (unlikely(rw == MMU_INST_FETCH && !(*flags & PAGE_EXEC))) { + /* IEP sets bit 56 and 61 */ + *tec |= 0x84; + return PGM_PROTECTION; + } + +nodat: + /* Convert real address -> absolute address */ + *raddr = mmu_real2abs(env, *raddr); + + mmu_handle_skey(*raddr, rw, flags); + return 0; +} + +/** + * translate_pages: Translate a set of consecutive logical page addresses + * to absolute addresses. This function is used for TCG and old KVM without + * the MEMOP interface. + */ +static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages, + target_ulong *pages, bool is_write, uint64_t *tec) +{ + uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC; + CPUS390XState *env = &cpu->env; + int ret, i, pflags; + + for (i = 0; i < nr_pages; i++) { + ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, tec); + if (ret) { + return ret; + } + if (!address_space_access_valid(env_cpu(env)->as, pages[i], + TARGET_PAGE_SIZE, is_write, + MEMTXATTRS_UNSPECIFIED)) { + *tec = 0; /* unused */ + return PGM_ADDRESSING; + } + addr += TARGET_PAGE_SIZE; + } + + return 0; +} + +/** + * s390_cpu_virt_mem_rw: + * @laddr: the logical start address + * @ar: the access register number + * @hostbuf: buffer in host memory. NULL = do only checks w/o copying + * @len: length that should be transferred + * @is_write: true = write, false = read + * Returns: 0 on success, non-zero if an exception occurred + * + * Copy from/to guest memory using logical addresses. Note that we inject a + * program interrupt in case there is an error while accessing the memory. + * + * This function will always return (also for TCG), make sure to call + * s390_cpu_virt_mem_handle_exc() to properly exit the CPU loop. + */ +int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, + int len, bool is_write) +{ + int currlen, nr_pages, i; + target_ulong *pages; + uint64_t tec; + int ret; + CPUS390XState *env = &cpu->env; + + nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS) + + 1; + pages = g_malloc(nr_pages * sizeof(*pages)); + + ret = translate_pages(cpu, laddr, nr_pages, pages, is_write, &tec); + if (ret) { + trigger_access_exception(&cpu->env, ret, tec); + } else if (hostbuf != NULL) { + /* Copy data by stepping through the area page by page */ + for (i = 0; i < nr_pages; i++) { + currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE)); + cpu_physical_memory_rw(env_cpu(env)->as, pages[i] | (laddr & ~TARGET_PAGE_MASK), + hostbuf, currlen, is_write); + laddr += currlen; + hostbuf += currlen; + len -= currlen; + } + } + + g_free(pages); + return ret; +} + +void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra) +{ + /* KVM will handle the interrupt automatically, TCG has to exit the TB */ + cpu_loop_exit_restore(CPU(cpu), ra); +} + +/** + * Translate a real address into a physical (absolute) address. + * @param raddr the real address + * @param rw 0 = read, 1 = write, 2 = code fetch + * @param addr the translated address is stored to this pointer + * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer + * @return 0 = success, != 0, the exception to raise + */ +int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, + target_ulong *addr, int *flags, uint64_t *tec) +{ + const bool lowprot_enabled = env->cregs[0] & CR0_LOWPROT; + + *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + if (is_low_address(raddr & TARGET_PAGE_MASK) && lowprot_enabled) { + /* see comment in mmu_translate() how this works */ + *flags |= PAGE_WRITE_INV; + if (is_low_address(raddr) && rw == MMU_DATA_STORE) { + /* LAP sets bit 56 */ + *tec = (raddr & TARGET_PAGE_MASK) | FS_WRITE | 0x80; + return PGM_PROTECTION; + } + } + + *addr = mmu_real2abs(env, raddr & TARGET_PAGE_MASK); + + mmu_handle_skey(*addr, rw, flags); + return 0; +} diff --git a/qemu/target/s390x/s390-tod.h b/qemu/target/s390x/s390-tod.h new file mode 100644 index 00000000..8b74d6a6 --- /dev/null +++ b/qemu/target/s390x/s390-tod.h @@ -0,0 +1,29 @@ +/* + * TOD (Time Of Day) clock + * + * Copyright 2018 Red Hat, Inc. + * Author(s): David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TARGET_S390_TOD_H +#define TARGET_S390_TOD_H + +/* The value of the TOD clock for 1.1.1970. */ +#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL + +/* Converts ns to s390's clock format */ +static inline uint64_t time2tod(uint64_t ns) +{ + return (ns << 9) / 125 + (((ns & 0xff80000000000000ull) / 125) << 9); +} + +/* Converts s390's clock format to ns */ +static inline uint64_t tod2time(uint64_t t) +{ + return ((t >> 9) * 125) + (((t & 0x1ff) * 125) >> 9); +} + +#endif diff --git a/qemu/target/s390x/sigp.c b/qemu/target/s390x/sigp.c new file mode 100644 index 00000000..6401ed31 --- /dev/null +++ b/qemu/target/s390x/sigp.c @@ -0,0 +1,466 @@ +/* + * s390x SIGP instruction handling + * + * Copyright (c) 2009 Alexander Graf + * Copyright IBM Corp. 2012 + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +//#include "exec/address-spaces.h" +#include "exec/exec-all.h" +#include "sysemu/tcg.h" + +typedef struct SigpInfo { + uint64_t param; + int cc; + uint64_t *status_reg; +} SigpInfo; + +static void set_sigp_status(SigpInfo *si, uint64_t status) +{ + *si->status_reg &= 0xffffffff00000000ULL; + *si->status_reg |= status; + si->cc = SIGP_CC_STATUS_STORED; +} + +static void sigp_sense(S390CPU *dst_cpu, SigpInfo *si) +{ + uint8_t state = s390_cpu_get_state(dst_cpu); + bool ext_call = dst_cpu->env.pending_int & INTERRUPT_EXTERNAL_CALL; + uint64_t status = 0; + + /* sensing without locks is racy, but it's the same for real hw */ + if (state != S390_CPU_STATE_STOPPED && !ext_call) { + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; + } else { + if (ext_call) { + status |= SIGP_STAT_EXT_CALL_PENDING; + } + if (state == S390_CPU_STATE_STOPPED) { + status |= SIGP_STAT_STOPPED; + } + set_sigp_status(si, status); + } +} + +static void sigp_external_call(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si) +{ + int ret; + + ret = cpu_inject_external_call(dst_cpu, src_cpu->env.core_id); + if (!ret) { + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; + } else { + set_sigp_status(si, SIGP_STAT_EXT_CALL_PENDING); + } +} + +static void sigp_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, SigpInfo *si) +{ + cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id); + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +#if 0 +static void sigp_start(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + SigpInfo *si = arg.host_ptr; + + if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; + return; + } + + s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +static void sigp_stop(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + SigpInfo *si = arg.host_ptr; + + if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) { + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; + return; + } + + /* disabled wait - sleeping in user space */ + if (cs->halted) { + s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); + } else { + /* execute the stop function */ + cpu->env.sigp_order = SIGP_STOP; + cpu_inject_stop(cpu); + } + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + SigpInfo *si = arg.host_ptr; + + /* disabled wait - sleeping in user space */ + if (s390_cpu_get_state(cpu) == S390_CPU_STATE_OPERATING && cs->halted) { + s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); + } + + switch (s390_cpu_get_state(cpu)) { + case S390_CPU_STATE_OPERATING: + cpu->env.sigp_order = SIGP_STOP_STORE_STATUS; + cpu_inject_stop(cpu); + /* store will be performed in do_stop_interrup() */ + break; + case S390_CPU_STATE_STOPPED: + /* already stopped, just store the status */ + // cpu_synchronize_state(cs); + s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true); + break; + } + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + SigpInfo *si = arg.host_ptr; + uint32_t address = si->param & 0x7ffffe00u; + + /* cpu has to be stopped */ + if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { + set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); + return; + } + + // cpu_synchronize_state(cs); + + if (s390_store_status(cpu, address, false)) { + set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); + return; + } + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +#define ADTL_SAVE_LC_MASK 0xfUL +static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + SigpInfo *si = arg.host_ptr; + uint8_t lc = si->param & ADTL_SAVE_LC_MASK; + hwaddr addr = si->param & ~ADTL_SAVE_LC_MASK; + hwaddr len = 1UL << (lc ? lc : 10); + + if (!s390_has_feat(S390_FEAT_VECTOR) && + !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) { + set_sigp_status(si, SIGP_STAT_INVALID_ORDER); + return; + } + + /* cpu has to be stopped */ + if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { + set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); + return; + } + + /* address must be aligned to length */ + if (addr & (len - 1)) { + set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); + return; + } + + /* no GS: only lc == 0 is valid */ + if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) && + lc != 0) { + set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); + return; + } + + /* GS: 0, 10, 11, 12 are valid */ + if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && + lc != 0 && + lc != 10 && + lc != 11 && + lc != 12) { + set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); + return; + } + + // cpu_synchronize_state(cs); + + if (s390_store_adtl_status(cpu, addr, len)) { + set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); + return; + } + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +static void sigp_restart(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + SigpInfo *si = arg.host_ptr; + + switch (s390_cpu_get_state(cpu)) { + case S390_CPU_STATE_STOPPED: + /* the restart irq has to be delivered prior to any other pending irq */ + // cpu_synchronize_state(cs); + /* + * Set OPERATING (and unhalting) before loading the restart PSW. + * load_psw() will then properly halt the CPU again if necessary (TCG). + */ + s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); + do_restart_interrupt(&cpu->env); + break; + case S390_CPU_STATE_OPERATING: + cpu_inject_restart(cpu); + break; + } + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); + SigpInfo *si = arg.host_ptr; + + // cpu_synchronize_state(cs); + scc->reset(cs, S390_CPU_RESET_INITIAL); + // cpu_synchronize_post_reset(cs); + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); + SigpInfo *si = arg.host_ptr; + + // cpu_synchronize_state(cs); + scc->reset(cs, S390_CPU_RESET_NORMAL); + // cpu_synchronize_post_reset(cs); + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg) +{ + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + SigpInfo *si = arg.host_ptr; + uint32_t addr = si->param & 0x7fffe000u; + + // cpu_synchronize_state(cs); + + if (!address_space_access_valid(env_cpu(env)->as, addr, + sizeof(struct LowCore), false, + MEMTXATTRS_UNSPECIFIED)) { + set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); + return; + } + + /* cpu has to be stopped */ + if (s390_cpu_get_state(cpu) != S390_CPU_STATE_STOPPED) { + set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); + return; + } + + cpu->env.psa = addr; + tlb_flush(cs); + // cpu_synchronize_post_init(cs); + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} +#endif + +static void sigp_cond_emergency(S390CPU *src_cpu, S390CPU *dst_cpu, + SigpInfo *si) +{ + const uint64_t psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT; + uint16_t p_asn, s_asn, asn; + uint64_t psw_addr, psw_mask; + bool idle; + + /* this looks racy, but these values are only used when STOPPED */ + idle = CPU(dst_cpu)->halted; + psw_addr = dst_cpu->env.psw.addr; + psw_mask = dst_cpu->env.psw.mask; + asn = si->param; + p_asn = dst_cpu->env.cregs[4] & 0xffff; /* Primary ASN */ + s_asn = dst_cpu->env.cregs[3] & 0xffff; /* Secondary ASN */ + + if (s390_cpu_get_state(dst_cpu) != S390_CPU_STATE_STOPPED || + (psw_mask & psw_int_mask) != psw_int_mask || + (idle && psw_addr != 0) || + (!idle && (asn == p_asn || asn == s_asn))) { + cpu_inject_emergency_signal(dst_cpu, src_cpu->env.core_id); + } else { + set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); + } + + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; +} + +static void sigp_sense_running(S390CPU *dst_cpu, SigpInfo *si) +{ + /* sensing without locks is racy, but it's the same for real hw */ + //if (!s390_has_feat(S390_FEAT_SENSE_RUNNING_STATUS)) { + // set_sigp_status(si, SIGP_STAT_INVALID_ORDER); + // return; + //} + + /* If halted (which includes also STOPPED), it is not running */ + if (CPU(dst_cpu)->halted) { + set_sigp_status(si, SIGP_STAT_NOT_RUNNING); + } else { + si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; + } +} + +static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order, + uint64_t param, uint64_t *status_reg) +{ + SigpInfo si = { + .param = param, + .status_reg = status_reg, + }; + + /* cpu available? */ + if (dst_cpu == NULL) { + return SIGP_CC_NOT_OPERATIONAL; + } + + /* only resets can break pending orders */ + if (dst_cpu->env.sigp_order != 0 && + order != SIGP_CPU_RESET && + order != SIGP_INITIAL_CPU_RESET) { + return SIGP_CC_BUSY; + } + + switch (order) { + case SIGP_SENSE: + sigp_sense(dst_cpu, &si); + break; + case SIGP_EXTERNAL_CALL: + sigp_external_call(cpu, dst_cpu, &si); + break; + case SIGP_EMERGENCY: + sigp_emergency(cpu, dst_cpu, &si); + break; + case SIGP_START: + //run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_STOP: + //run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_RESTART: + //run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_STOP_STORE_STATUS: + //run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_STORE_STATUS_ADDR: + //run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_STORE_ADTL_STATUS: + //run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_SET_PREFIX: + //run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_INITIAL_CPU_RESET: + //run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_CPU_RESET: + //run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); + break; + case SIGP_COND_EMERGENCY: + sigp_cond_emergency(cpu, dst_cpu, &si); + break; + case SIGP_SENSE_RUNNING: + sigp_sense_running(dst_cpu, &si); + break; + default: + set_sigp_status(&si, SIGP_STAT_INVALID_ORDER); + } + + return si.cc; +} + +static int sigp_set_architecture(S390CPU *cpu, uint32_t param, + uint64_t *status_reg) +{ + bool all_stopped = true; + +#if 0 + CPU_FOREACH(cur_cs) { + cur_cpu = S390_CPU(cur_cs); + + if (cur_cpu == cpu) { + continue; + } + if (s390_cpu_get_state(cur_cpu) != S390_CPU_STATE_STOPPED) { + all_stopped = false; + } + } +#endif + + all_stopped = false; + *status_reg &= 0xffffffff00000000ULL; + + /* Reject set arch order, with czam we're always in z/Arch mode. */ + *status_reg |= (all_stopped ? SIGP_STAT_INVALID_PARAMETER : + SIGP_STAT_INCORRECT_STATE); + return SIGP_CC_STATUS_STORED; +} + +int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3) +{ + uint64_t *status_reg = &env->regs[r1]; + uint64_t param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; + S390CPU *cpu = env_archcpu(env); + S390CPU *dst_cpu = NULL; + int ret; + + switch (order) { + case SIGP_SET_ARCH: + ret = sigp_set_architecture(cpu, param, status_reg); + break; + default: + /* all other sigp orders target a single vcpu */ + dst_cpu = s390_cpu_addr2state(env->regs[r3]); + ret = handle_sigp_single_dst(cpu, dst_cpu, order, param, status_reg); + } + + return ret; +} + +int s390_cpu_restart(S390CPU *cpu) +{ + //SigpInfo si = {}; + + //run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); + return 0; +} + +void do_stop_interrupt(CPUS390XState *env) +{ + S390CPU *cpu = env_archcpu(env); + + if (s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu) == 0) { + // qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) { + s390_store_status(cpu, S390_STORE_STATUS_DEF_ADDR, true); + } + env->sigp_order = 0; + env->pending_int &= ~INTERRUPT_STOP; +} + +void s390_init_sigp(void) +{ +} diff --git a/qemu/target/s390x/tcg-stub.c b/qemu/target/s390x/tcg-stub.c new file mode 100644 index 00000000..d22c8988 --- /dev/null +++ b/qemu/target/s390x/tcg-stub.c @@ -0,0 +1,30 @@ +/* + * QEMU TCG support -- s390x specific function stubs. + * + * Copyright (C) 2018 Red Hat Inc + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "tcg_s390x.h" + +void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) +{ +} +void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, + uint32_t code, uintptr_t ra) +{ + g_assert_not_reached(); +} +void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, + uintptr_t ra) +{ + g_assert_not_reached(); +} diff --git a/qemu/target/s390x/tcg_s390x.h b/qemu/target/s390x/tcg_s390x.h new file mode 100644 index 00000000..2f54ccb0 --- /dev/null +++ b/qemu/target/s390x/tcg_s390x.h @@ -0,0 +1,24 @@ +/* + * QEMU TCG support -- s390x specific functions. + * + * Copyright 2018 Red Hat, Inc. + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef TCG_S390X_H +#define TCG_S390X_H + +void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque); +void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, + uint32_t code, uintptr_t ra); +void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, + uintptr_t ra); +void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, + uintptr_t ra); + +#endif /* TCG_S390X_H */ diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c new file mode 100644 index 00000000..4eed38c2 --- /dev/null +++ b/qemu/target/s390x/translate.c @@ -0,0 +1,6938 @@ +/* + * S/390 translation + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2010 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* #define DEBUG_INLINE_BRANCHES */ +#define S390X_DEBUG_DISAS +/* #define S390X_DEBUG_DISAS_VERBOSE */ + +#ifdef S390X_DEBUG_DISAS_VERBOSE +# define LOG_DISAS(...) qemu_log(__VA_ARGS__) +#else +# define LOG_DISAS(...) do { } while (0) +#endif + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "qemu/log.h" +#include "qemu/host-utils.h" +#include "exec/cpu_ldst.h" +#include "exec/gen-icount.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/translator.h" +#include "qemu/atomic128.h" + + +/* Information that (most) every instruction needs to manipulate. */ +typedef struct DisasContext DisasContext; +typedef struct DisasInsn DisasInsn; +typedef struct DisasFields DisasFields; + +/* + * Define a structure to hold the decoded fields. We'll store each inside + * an array indexed by an enum. In order to conserve memory, we'll arrange + * for fields that do not exist at the same time to overlap, thus the "C" + * for compact. For checking purposes there is an "O" for original index + * as well that will be applied to availability bitmaps. + */ + +enum DisasFieldIndexO { + FLD_O_r1, + FLD_O_r2, + FLD_O_r3, + FLD_O_m1, + FLD_O_m3, + FLD_O_m4, + FLD_O_m5, + FLD_O_m6, + FLD_O_b1, + FLD_O_b2, + FLD_O_b4, + FLD_O_d1, + FLD_O_d2, + FLD_O_d4, + FLD_O_x2, + FLD_O_l1, + FLD_O_l2, + FLD_O_i1, + FLD_O_i2, + FLD_O_i3, + FLD_O_i4, + FLD_O_i5, + FLD_O_v1, + FLD_O_v2, + FLD_O_v3, + FLD_O_v4, +}; + +enum DisasFieldIndexC { + FLD_C_r1 = 0, + FLD_C_m1 = 0, + FLD_C_b1 = 0, + FLD_C_i1 = 0, + FLD_C_v1 = 0, + + FLD_C_r2 = 1, + FLD_C_b2 = 1, + FLD_C_i2 = 1, + + FLD_C_r3 = 2, + FLD_C_m3 = 2, + FLD_C_i3 = 2, + FLD_C_v3 = 2, + + FLD_C_m4 = 3, + FLD_C_b4 = 3, + FLD_C_i4 = 3, + FLD_C_l1 = 3, + FLD_C_v4 = 3, + + FLD_C_i5 = 4, + FLD_C_d1 = 4, + FLD_C_m5 = 4, + + FLD_C_d2 = 5, + FLD_C_m6 = 5, + + FLD_C_d4 = 6, + FLD_C_x2 = 6, + FLD_C_l2 = 6, + FLD_C_v2 = 6, + + NUM_C_FIELD = 7 +}; + +struct DisasFields { + uint64_t raw_insn; + unsigned op:8; + unsigned op2:8; + unsigned presentC:16; + unsigned int presentO; + int c[NUM_C_FIELD]; +}; + +struct DisasContext { + DisasContextBase base; + const DisasInsn *insn; + DisasFields fields; + uint64_t ex_value; + /* + * During translate_one(), pc_tmp is used to determine the instruction + * to be executed after base.pc_next - e.g. next sequential instruction + * or a branch target. + */ + uint64_t pc_tmp; + uint32_t ilen; + enum cc_op cc_op; + bool do_debug; + + // Unicorn + struct uc_struct *uc; +}; + +/* Information carried about a condition to be evaluated. */ +typedef struct { + TCGCond cond:8; + bool is_64; + bool g1; + bool g2; + union { + struct { TCGv_i64 a, b; } s64; + struct { TCGv_i32 a, b; } s32; + } u; +} DisasCompare; + +#ifdef DEBUG_INLINE_BRANCHES +static uint64_t inline_branch_hit[CC_OP_MAX]; +static uint64_t inline_branch_miss[CC_OP_MAX]; +#endif + +static void pc_to_link_info(TCGContext *tcg_ctx, TCGv_i64 out, DisasContext *s, uint64_t pc) +{ + TCGv_i64 tmp; + + if (s->base.tb->flags & FLAG_MASK_32) { + if (s->base.tb->flags & FLAG_MASK_64) { + tcg_gen_movi_i64(tcg_ctx, out, pc); + return; + } + pc |= 0x80000000; + } + assert(!(s->base.tb->flags & FLAG_MASK_64)); + tmp = tcg_const_i64(tcg_ctx, pc); + tcg_gen_deposit_i64(tcg_ctx, out, out, tmp, 0, 32); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static TCGv_i64 psw_addr; +static TCGv_i64 psw_mask; +static TCGv_i64 gbea; + +static TCGv_i32 cc_op; +static TCGv_i64 cc_src; +static TCGv_i64 cc_dst; +static TCGv_i64 cc_vr; + +static char cpu_reg_names[16][4]; +static TCGv_i64 regs[16]; + +void s390x_translate_init(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + int i; + + psw_addr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUS390XState, psw.addr), + "psw_addr"); + psw_mask = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUS390XState, psw.mask), + "psw_mask"); + gbea = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUS390XState, gbea), + "gbea"); + + cc_op = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_op), + "cc_op"); + cc_src = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_src), + "cc_src"); + cc_dst = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_dst), + "cc_dst"); + cc_vr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_vr), + "cc_vr"); + + for (i = 0; i < 16; i++) { + snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i); + regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUS390XState, regs[i]), + cpu_reg_names[i]); + } +} + +static inline int vec_full_reg_offset(uint8_t reg) +{ + g_assert(reg < 32); + return offsetof(CPUS390XState, vregs[reg][0]); +} + +static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) +{ + /* Convert element size (es) - e.g. MO_8 - to bytes */ + const uint8_t bytes = 1 << es; + int offs = enr * bytes; + + /* + * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte + * of the 16 byte vector, on both, little and big endian systems. + * + * Big Endian (target/possible host) + * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] + * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] + * W: [ 0][ 1] - [ 2][ 3] + * DW: [ 0] - [ 1] + * + * Little Endian (possible host) + * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] + * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] + * W: [ 1][ 0] - [ 3][ 2] + * DW: [ 0] - [ 1] + * + * For 16 byte elements, the two 8 byte halves will not form a host + * int128 if the host is little endian, since they're in the wrong order. + * Some operations (e.g. xor) do not care. For operations like addition, + * the two 8 byte elements have to be loaded separately. Let's force all + * 16 byte operations to handle it in a special way. + */ + g_assert(es <= MO_64); +#ifndef HOST_WORDS_BIGENDIAN + offs ^= (8 - bytes); +#endif + return offs + vec_full_reg_offset(reg); +} + +static inline int freg64_offset(uint8_t reg) +{ + g_assert(reg < 16); + return vec_reg_offset(reg, 0, MO_64); +} + +static inline int freg32_offset(uint8_t reg) +{ + g_assert(reg < 16); + return vec_reg_offset(reg, 0, MO_32); +} + +static TCGv_i64 load_reg(TCGContext *tcg_ctx, int reg) +{ + TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); + tcg_gen_mov_i64(tcg_ctx, r, regs[reg]); + return r; +} + +static TCGv_i64 load_freg(TCGContext *tcg_ctx, int reg) +{ + TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld_i64(tcg_ctx, r, tcg_ctx->cpu_env, freg64_offset(reg)); + return r; +} + +static TCGv_i64 load_freg32_i64(TCGContext *tcg_ctx, int reg) +{ + TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld32u_i64(tcg_ctx, r, tcg_ctx->cpu_env, freg32_offset(reg)); + return r; +} + +static void store_reg(TCGContext *tcg_ctx, int reg, TCGv_i64 v) +{ + tcg_gen_mov_i64(tcg_ctx, regs[reg], v); +} + +static void store_freg(TCGContext *tcg_ctx, int reg, TCGv_i64 v) +{ + tcg_gen_st_i64(tcg_ctx, v, tcg_ctx->cpu_env, freg64_offset(reg)); +} + +static void store_reg32_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) +{ + /* 32 bit register writes keep the upper half */ + tcg_gen_deposit_i64(tcg_ctx, regs[reg], regs[reg], v, 0, 32); +} + +static void store_reg32h_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) +{ + tcg_gen_deposit_i64(tcg_ctx, regs[reg], regs[reg], v, 32, 32); +} + +static void store_freg32_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) +{ + tcg_gen_st32_i64(tcg_ctx, v, tcg_ctx->cpu_env, freg32_offset(reg)); +} + +static void return_low128(TCGContext *tcg_ctx, TCGv_i64 dest) +{ + tcg_gen_ld_i64(tcg_ctx, dest, tcg_ctx->cpu_env, offsetof(CPUS390XState, retxl)); +} + +static void update_psw_addr(DisasContext *s) +{ + /* psw.addr */ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i64(tcg_ctx, psw_addr, s->base.pc_next); +} + +static void per_branch(DisasContext *s, bool to_next) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i64(tcg_ctx, gbea, s->base.pc_next); + + if (s->base.tb->flags & FLAG_MASK_PER) { + TCGv_i64 next_pc = to_next ? tcg_const_i64(tcg_ctx, s->pc_tmp) : psw_addr; + gen_helper_per_branch(tcg_ctx, tcg_ctx->cpu_env, gbea, next_pc); + if (to_next) { + tcg_temp_free_i64(tcg_ctx, next_pc); + } + } +} + +static void per_branch_cond(DisasContext *s, TCGCond cond, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (s->base.tb->flags & FLAG_MASK_PER) { + TCGLabel *lab = gen_new_label(tcg_ctx); + tcg_gen_brcond_i64(tcg_ctx, tcg_invert_cond(cond), arg1, arg2, lab); + + tcg_gen_movi_i64(tcg_ctx, gbea, s->base.pc_next); + gen_helper_per_branch(tcg_ctx, tcg_ctx->cpu_env, gbea, psw_addr); + + gen_set_label(tcg_ctx, lab); + } else { + TCGv_i64 pc = tcg_const_i64(tcg_ctx, s->base.pc_next); + tcg_gen_movcond_i64(tcg_ctx, cond, gbea, arg1, arg2, gbea, pc); + tcg_temp_free_i64(tcg_ctx, pc); + } +} + +static void per_breaking_event(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i64(tcg_ctx, gbea, s->base.pc_next); +} + +static void update_cc_op(DisasContext *s) +{ + if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, cc_op, s->cc_op); + } +} + +static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc) +{ + return (uint64_t)cpu_lduw_code(env, pc); +} + +static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc) +{ + return (uint64_t)(uint32_t)cpu_ldl_code(env, pc); +} + +static int get_mem_index(DisasContext *s) +{ + if (!(s->base.tb->flags & FLAG_MASK_DAT)) { + return MMU_REAL_IDX; + } + + switch (s->base.tb->flags & FLAG_MASK_ASC) { + case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: + return MMU_PRIMARY_IDX; + case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: + return MMU_SECONDARY_IDX; + case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: + return MMU_HOME_IDX; + default: + tcg_abort(); + break; + } +} + +static void gen_exception(TCGContext *tcg_ctx, int excp) +{ + TCGv_i32 tmp = tcg_const_i32(tcg_ctx, excp); + gen_helper_exception(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static void gen_program_exception(DisasContext *s, int code) +{ + TCGv_i32 tmp; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + /* Remember what pgm exeption this was. */ + tmp = tcg_const_i32(tcg_ctx, code); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUS390XState, int_pgm_code)); + tcg_temp_free_i32(tcg_ctx, tmp); + + tmp = tcg_const_i32(tcg_ctx, s->ilen); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUS390XState, int_pgm_ilen)); + tcg_temp_free_i32(tcg_ctx, tmp); + + /* update the psw */ + update_psw_addr(s); + + /* Save off cc. */ + update_cc_op(s); + + /* Trigger exception. */ + gen_exception(tcg_ctx, EXCP_PGM); +} + +static inline void gen_illegal_opcode(DisasContext *s) +{ + gen_program_exception(s, PGM_OPERATION); +} + +static inline void gen_data_exception(TCGContext *tcg_ctx, uint8_t dxc) +{ + TCGv_i32 tmp = tcg_const_i32(tcg_ctx, dxc); + gen_helper_data_exception(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static inline void gen_trap(DisasContext *s) +{ + /* Set DXC to 0xff */ + gen_data_exception(s->uc->tcg_ctx, 0xff); +} + +static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src, + int64_t imm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_addi_i64(tcg_ctx, dst, src, imm); + if (!(s->base.tb->flags & FLAG_MASK_64)) { + if (s->base.tb->flags & FLAG_MASK_32) { + tcg_gen_andi_i64(tcg_ctx, dst, dst, 0x7fffffff); + } else { + tcg_gen_andi_i64(tcg_ctx, dst, dst, 0x00ffffff); + } + } +} + +static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + /* + * Note that d2 is limited to 20 bits, signed. If we crop negative + * displacements early we create larger immedate addends. + */ + if (b2 && x2) { + tcg_gen_add_i64(tcg_ctx, tmp, regs[b2], regs[x2]); + gen_addi_and_wrap_i64(s, tmp, tmp, d2); + } else if (b2) { + gen_addi_and_wrap_i64(s, tmp, regs[b2], d2); + } else if (x2) { + gen_addi_and_wrap_i64(s, tmp, regs[x2], d2); + } else if (!(s->base.tb->flags & FLAG_MASK_64)) { + if (s->base.tb->flags & FLAG_MASK_32) { + tcg_gen_movi_i64(tcg_ctx, tmp, d2 & 0x7fffffff); + } else { + tcg_gen_movi_i64(tcg_ctx, tmp, d2 & 0x00ffffff); + } + } else { + tcg_gen_movi_i64(tcg_ctx, tmp, d2); + } + + return tmp; +} + +static inline bool live_cc_data(DisasContext *s) +{ + return (s->cc_op != CC_OP_DYNAMIC + && s->cc_op != CC_OP_STATIC + && s->cc_op > 3); +} + +static inline void gen_op_movi_cc(DisasContext *s, uint32_t val) +{ + if (live_cc_data(s)) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_discard_i64(tcg_ctx, cc_src); + tcg_gen_discard_i64(tcg_ctx, cc_dst); + tcg_gen_discard_i64(tcg_ctx, cc_vr); + } + s->cc_op = CC_OP_CONST0 + val; +} + +static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (live_cc_data(s)) { + tcg_gen_discard_i64(tcg_ctx, cc_src); + tcg_gen_discard_i64(tcg_ctx, cc_vr); + } + tcg_gen_mov_i64(tcg_ctx, cc_dst, dst); + s->cc_op = op; +} + +static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, + TCGv_i64 dst) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (live_cc_data(s)) { + tcg_gen_discard_i64(tcg_ctx, cc_vr); + } + tcg_gen_mov_i64(tcg_ctx, cc_src, src); + tcg_gen_mov_i64(tcg_ctx, cc_dst, dst); + s->cc_op = op; +} + +static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, + TCGv_i64 dst, TCGv_i64 vr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_mov_i64(tcg_ctx, cc_src, src); + tcg_gen_mov_i64(tcg_ctx, cc_dst, dst); + tcg_gen_mov_i64(tcg_ctx, cc_vr, vr); + s->cc_op = op; +} + +static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val) +{ + gen_op_update1_cc_i64(s, CC_OP_NZ, val); +} + +static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val) +{ + gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val); +} + +static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val) +{ + gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val); +} + +static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl) +{ + gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl); +} + +/* CC value is in env->cc_op */ +static void set_cc_static(DisasContext *s) +{ + if (live_cc_data(s)) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_discard_i64(tcg_ctx, cc_src); + tcg_gen_discard_i64(tcg_ctx, cc_dst); + tcg_gen_discard_i64(tcg_ctx, cc_vr); + } + s->cc_op = CC_OP_STATIC; +} + +/* calculates cc into cc_op */ +static void gen_op_calc_cc(DisasContext *s) +{ + TCGv_i32 local_cc_op = NULL; + TCGv_i64 dummy = NULL; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + switch (s->cc_op) { + default: + dummy = tcg_const_i64(tcg_ctx, 0); + /* FALLTHRU */ + case CC_OP_ADD_64: + case CC_OP_ADDU_64: + case CC_OP_ADDC_64: + case CC_OP_SUB_64: + case CC_OP_SUBU_64: + case CC_OP_SUBB_64: + case CC_OP_ADD_32: + case CC_OP_ADDU_32: + case CC_OP_ADDC_32: + case CC_OP_SUB_32: + case CC_OP_SUBU_32: + case CC_OP_SUBB_32: + local_cc_op = tcg_const_i32(tcg_ctx, s->cc_op); + break; + case CC_OP_CONST0: + case CC_OP_CONST1: + case CC_OP_CONST2: + case CC_OP_CONST3: + case CC_OP_STATIC: + case CC_OP_DYNAMIC: + break; + } + + switch (s->cc_op) { + case CC_OP_CONST0: + case CC_OP_CONST1: + case CC_OP_CONST2: + case CC_OP_CONST3: + /* s->cc_op is the cc value */ + tcg_gen_movi_i32(tcg_ctx, cc_op, s->cc_op - CC_OP_CONST0); + break; + case CC_OP_STATIC: + /* env->cc_op already is the cc value */ + break; + case CC_OP_NZ: + case CC_OP_ABS_64: + case CC_OP_NABS_64: + case CC_OP_ABS_32: + case CC_OP_NABS_32: + case CC_OP_LTGT0_32: + case CC_OP_LTGT0_64: + case CC_OP_COMP_32: + case CC_OP_COMP_64: + case CC_OP_NZ_F32: + case CC_OP_NZ_F64: + case CC_OP_FLOGR: + case CC_OP_LCBB: + /* 1 argument */ + gen_helper_calc_cc(tcg_ctx, cc_op, tcg_ctx->cpu_env, local_cc_op, dummy, cc_dst, dummy); + break; + case CC_OP_ICM: + case CC_OP_LTGT_32: + case CC_OP_LTGT_64: + case CC_OP_LTUGTU_32: + case CC_OP_LTUGTU_64: + case CC_OP_TM_32: + case CC_OP_TM_64: + case CC_OP_SLA_32: + case CC_OP_SLA_64: + case CC_OP_NZ_F128: + case CC_OP_VC: + /* 2 arguments */ + gen_helper_calc_cc(tcg_ctx, cc_op, tcg_ctx->cpu_env, local_cc_op, cc_src, cc_dst, dummy); + break; + case CC_OP_ADD_64: + case CC_OP_ADDU_64: + case CC_OP_ADDC_64: + case CC_OP_SUB_64: + case CC_OP_SUBU_64: + case CC_OP_SUBB_64: + case CC_OP_ADD_32: + case CC_OP_ADDU_32: + case CC_OP_ADDC_32: + case CC_OP_SUB_32: + case CC_OP_SUBU_32: + case CC_OP_SUBB_32: + /* 3 arguments */ + gen_helper_calc_cc(tcg_ctx, cc_op, tcg_ctx->cpu_env, local_cc_op, cc_src, cc_dst, cc_vr); + break; + case CC_OP_DYNAMIC: + /* unknown operation - assume 3 arguments and cc_op in env */ + gen_helper_calc_cc(tcg_ctx, cc_op, tcg_ctx->cpu_env, cc_op, cc_src, cc_dst, cc_vr); + break; + default: + tcg_abort(); + } + + if (local_cc_op) { + tcg_temp_free_i32(tcg_ctx, local_cc_op); + } + if (dummy) { + tcg_temp_free_i64(tcg_ctx, dummy); + } + + /* We now have cc in cc_op as constant */ + set_cc_static(s); +} + +static bool use_exit_tb(DisasContext *s) +{ + return s->base.singlestep_enabled || + (tb_cflags(s->base.tb) & CF_LAST_IO) || + (s->base.tb->flags & FLAG_MASK_PER); +} + +static bool use_goto_tb(DisasContext *s, uint64_t dest) +{ + if (unlikely(use_exit_tb(s))) { + return false; + } + return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) || + (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK); +} + +static void account_noninline_branch(DisasContext *s, int cc_op) +{ +#ifdef DEBUG_INLINE_BRANCHES + inline_branch_miss[cc_op]++; +#endif +} + +static void account_inline_branch(DisasContext *s, int cc_op) +{ +#ifdef DEBUG_INLINE_BRANCHES + inline_branch_hit[cc_op]++; +#endif +} + +/* Table of mask values to comparison codes, given a comparison as input. + For such, CC=3 should not be possible. */ +static const TCGCond ltgt_cond[16] = { + TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */ + TCG_COND_GT, TCG_COND_GT, /* | | GT | x */ + TCG_COND_LT, TCG_COND_LT, /* | LT | | x */ + TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */ + TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */ + TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */ + TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */ + TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */ +}; + +/* Table of mask values to comparison codes, given a logic op as input. + For such, only CC=0 and CC=1 should be possible. */ +static const TCGCond nz_cond[16] = { + TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */ + TCG_COND_NEVER, TCG_COND_NEVER, + TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */ + TCG_COND_NE, TCG_COND_NE, + TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */ + TCG_COND_EQ, TCG_COND_EQ, + TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */ + TCG_COND_ALWAYS, TCG_COND_ALWAYS, +}; + +/* Interpret MASK in terms of S->CC_OP, and fill in C with all the + details required to generate a TCG comparison. */ +static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) +{ + TCGCond cond; + enum cc_op old_cc_op = s->cc_op; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (mask == 15 || mask == 0) { + c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); + c->u.s32.a = cc_op; + c->u.s32.b = cc_op; + c->g1 = c->g2 = true; + c->is_64 = false; + return; + } + + /* Find the TCG condition for the mask + cc op. */ + switch (old_cc_op) { + case CC_OP_LTGT0_32: + case CC_OP_LTGT0_64: + case CC_OP_LTGT_32: + case CC_OP_LTGT_64: + cond = ltgt_cond[mask]; + if (cond == TCG_COND_NEVER) { + goto do_dynamic; + } + account_inline_branch(s, old_cc_op); + break; + + case CC_OP_LTUGTU_32: + case CC_OP_LTUGTU_64: + cond = tcg_unsigned_cond(ltgt_cond[mask]); + if (cond == TCG_COND_NEVER) { + goto do_dynamic; + } + account_inline_branch(s, old_cc_op); + break; + + case CC_OP_NZ: + cond = nz_cond[mask]; + if (cond == TCG_COND_NEVER) { + goto do_dynamic; + } + account_inline_branch(s, old_cc_op); + break; + + case CC_OP_TM_32: + case CC_OP_TM_64: + switch (mask) { + case 8: + cond = TCG_COND_EQ; + break; + case 4 | 2 | 1: + cond = TCG_COND_NE; + break; + default: + goto do_dynamic; + } + account_inline_branch(s, old_cc_op); + break; + + case CC_OP_ICM: + switch (mask) { + case 8: + cond = TCG_COND_EQ; + break; + case 4 | 2 | 1: + case 4 | 2: + cond = TCG_COND_NE; + break; + default: + goto do_dynamic; + } + account_inline_branch(s, old_cc_op); + break; + + case CC_OP_FLOGR: + switch (mask & 0xa) { + case 8: /* src == 0 -> no one bit found */ + cond = TCG_COND_EQ; + break; + case 2: /* src != 0 -> one bit found */ + cond = TCG_COND_NE; + break; + default: + goto do_dynamic; + } + account_inline_branch(s, old_cc_op); + break; + + case CC_OP_ADDU_32: + case CC_OP_ADDU_64: + switch (mask) { + case 8 | 2: /* vr == 0 */ + cond = TCG_COND_EQ; + break; + case 4 | 1: /* vr != 0 */ + cond = TCG_COND_NE; + break; + case 8 | 4: /* no carry -> vr >= src */ + cond = TCG_COND_GEU; + break; + case 2 | 1: /* carry -> vr < src */ + cond = TCG_COND_LTU; + break; + default: + goto do_dynamic; + } + account_inline_branch(s, old_cc_op); + break; + + case CC_OP_SUBU_32: + case CC_OP_SUBU_64: + /* Note that CC=0 is impossible; treat it as dont-care. */ + switch (mask & 7) { + case 2: /* zero -> op1 == op2 */ + cond = TCG_COND_EQ; + break; + case 4 | 1: /* !zero -> op1 != op2 */ + cond = TCG_COND_NE; + break; + case 4: /* borrow (!carry) -> op1 < op2 */ + cond = TCG_COND_LTU; + break; + case 2 | 1: /* !borrow (carry) -> op1 >= op2 */ + cond = TCG_COND_GEU; + break; + default: + goto do_dynamic; + } + account_inline_branch(s, old_cc_op); + break; + + default: + do_dynamic: + /* Calculate cc value. */ + gen_op_calc_cc(s); + /* FALLTHRU */ + + case CC_OP_STATIC: + /* Jump based on CC. We'll load up the real cond below; + the assignment here merely avoids a compiler warning. */ + account_noninline_branch(s, old_cc_op); + old_cc_op = CC_OP_STATIC; + cond = TCG_COND_NEVER; + break; + } + + /* Load up the arguments of the comparison. */ + c->is_64 = true; + c->g1 = c->g2 = false; + switch (old_cc_op) { + case CC_OP_LTGT0_32: + c->is_64 = false; + c->u.s32.a = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, cc_dst); + c->u.s32.b = tcg_const_i32(tcg_ctx, 0); + break; + case CC_OP_LTGT_32: + case CC_OP_LTUGTU_32: + case CC_OP_SUBU_32: + c->is_64 = false; + c->u.s32.a = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, cc_src); + c->u.s32.b = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.b, cc_dst); + break; + + case CC_OP_LTGT0_64: + case CC_OP_NZ: + case CC_OP_FLOGR: + c->u.s64.a = cc_dst; + c->u.s64.b = tcg_const_i64(tcg_ctx, 0); + c->g1 = true; + break; + case CC_OP_LTGT_64: + case CC_OP_LTUGTU_64: + case CC_OP_SUBU_64: + c->u.s64.a = cc_src; + c->u.s64.b = cc_dst; + c->g1 = c->g2 = true; + break; + + case CC_OP_TM_32: + case CC_OP_TM_64: + case CC_OP_ICM: + c->u.s64.a = tcg_temp_new_i64(tcg_ctx); + c->u.s64.b = tcg_const_i64(tcg_ctx, 0); + tcg_gen_and_i64(tcg_ctx, c->u.s64.a, cc_src, cc_dst); + break; + + case CC_OP_ADDU_32: + c->is_64 = false; + c->u.s32.a = tcg_temp_new_i32(tcg_ctx); + c->u.s32.b = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, cc_vr); + if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { + tcg_gen_movi_i32(tcg_ctx, c->u.s32.b, 0); + } else { + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.b, cc_src); + } + break; + + case CC_OP_ADDU_64: + c->u.s64.a = cc_vr; + c->g1 = true; + if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { + c->u.s64.b = tcg_const_i64(tcg_ctx, 0); + } else { + c->u.s64.b = cc_src; + c->g2 = true; + } + break; + + case CC_OP_STATIC: + c->is_64 = false; + c->u.s32.a = cc_op; + c->g1 = true; + switch (mask) { + case 0x8 | 0x4 | 0x2: /* cc != 3 */ + cond = TCG_COND_NE; + c->u.s32.b = tcg_const_i32(tcg_ctx, 3); + break; + case 0x8 | 0x4 | 0x1: /* cc != 2 */ + cond = TCG_COND_NE; + c->u.s32.b = tcg_const_i32(tcg_ctx, 2); + break; + case 0x8 | 0x2 | 0x1: /* cc != 1 */ + cond = TCG_COND_NE; + c->u.s32.b = tcg_const_i32(tcg_ctx, 1); + break; + case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */ + cond = TCG_COND_EQ; + c->g1 = false; + c->u.s32.a = tcg_temp_new_i32(tcg_ctx); + c->u.s32.b = tcg_const_i32(tcg_ctx, 0); + tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, cc_op, 1); + break; + case 0x8 | 0x4: /* cc < 2 */ + cond = TCG_COND_LTU; + c->u.s32.b = tcg_const_i32(tcg_ctx, 2); + break; + case 0x8: /* cc == 0 */ + cond = TCG_COND_EQ; + c->u.s32.b = tcg_const_i32(tcg_ctx, 0); + break; + case 0x4 | 0x2 | 0x1: /* cc != 0 */ + cond = TCG_COND_NE; + c->u.s32.b = tcg_const_i32(tcg_ctx, 0); + break; + case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ + cond = TCG_COND_NE; + c->g1 = false; + c->u.s32.a = tcg_temp_new_i32(tcg_ctx); + c->u.s32.b = tcg_const_i32(tcg_ctx, 0); + tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, cc_op, 1); + break; + case 0x4: /* cc == 1 */ + cond = TCG_COND_EQ; + c->u.s32.b = tcg_const_i32(tcg_ctx, 1); + break; + case 0x2 | 0x1: /* cc > 1 */ + cond = TCG_COND_GTU; + c->u.s32.b = tcg_const_i32(tcg_ctx, 1); + break; + case 0x2: /* cc == 2 */ + cond = TCG_COND_EQ; + c->u.s32.b = tcg_const_i32(tcg_ctx, 2); + break; + case 0x1: /* cc == 3 */ + cond = TCG_COND_EQ; + c->u.s32.b = tcg_const_i32(tcg_ctx, 3); + break; + default: + /* CC is masked by something else: (8 >> cc) & mask. */ + cond = TCG_COND_NE; + c->g1 = false; + c->u.s32.a = tcg_const_i32(tcg_ctx, 8); + c->u.s32.b = tcg_const_i32(tcg_ctx, 0); + tcg_gen_shr_i32(tcg_ctx, c->u.s32.a, c->u.s32.a, cc_op); + tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, c->u.s32.a, mask); + break; + } + break; + + default: + abort(); + } + c->cond = cond; +} + +static void free_compare(TCGContext *tcg_ctx, DisasCompare *c) +{ + if (!c->g1) { + if (c->is_64) { + tcg_temp_free_i64(tcg_ctx, c->u.s64.a); + } else { + tcg_temp_free_i32(tcg_ctx, c->u.s32.a); + } + } + if (!c->g2) { + if (c->is_64) { + tcg_temp_free_i64(tcg_ctx, c->u.s64.b); + } else { + tcg_temp_free_i32(tcg_ctx, c->u.s32.b); + } + } +} + +/* ====================================================================== */ +/* Define the insn format enumeration. */ +#define F0(N) FMT_##N, +#define F1(N, X1) F0(N) +#define F2(N, X1, X2) F0(N) +#define F3(N, X1, X2, X3) F0(N) +#define F4(N, X1, X2, X3, X4) F0(N) +#define F5(N, X1, X2, X3, X4, X5) F0(N) +#define F6(N, X1, X2, X3, X4, X5, X6) F0(N) + +typedef enum { +#include "insn-format.def" +} DisasFormat; + +#undef F0 +#undef F1 +#undef F2 +#undef F3 +#undef F4 +#undef F5 +#undef F6 + +/* This is the way fields are to be accessed out of DisasFields. */ +#define have_field(S, F) have_field1((S), FLD_O_##F) +#define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F) + +static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c) +{ + return (s->fields.presentO >> c) & 1; +} + +static int get_field1(const DisasContext *s, enum DisasFieldIndexO o, + enum DisasFieldIndexC c) +{ + assert(have_field1(s, o)); + return s->fields.c[c]; +} + +/* Describe the layout of each field in each format. */ +typedef struct DisasField { + unsigned int beg:8; + unsigned int size:8; + unsigned int type:2; + unsigned int indexC:6; + enum DisasFieldIndexO indexO:8; +} DisasField; + +typedef struct DisasFormatInfo { + DisasField op[NUM_C_FIELD]; +} DisasFormatInfo; + +#define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N } +#define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N } +#define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N } +#define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ + { BD, 12, 0, FLD_C_d##N, FLD_O_d##N } +#define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ + { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ + { 20, 12, 0, FLD_C_d##N, FLD_O_d##N } +#define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ + { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } +#define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ + { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ + { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } +#define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } +#define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } + +#define F0(N) { { } }, +#define F1(N, X1) { { X1 } }, +#define F2(N, X1, X2) { { X1, X2 } }, +#define F3(N, X1, X2, X3) { { X1, X2, X3 } }, +#define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } }, +#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } }, +#define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } }, + +static const DisasFormatInfo format_info[] = { +#include "insn-format.def" +}; + +#undef F0 +#undef F1 +#undef F2 +#undef F3 +#undef F4 +#undef F5 +#undef F6 +#undef R +#undef M +#undef V +#undef BD +#undef BXD +#undef BDL +#undef BXDL +#undef I +#undef L + +/* Generally, we'll extract operands into this structures, operate upon + them, and store them back. See the "in1", "in2", "prep", "wout" sets + of routines below for more details. */ +typedef struct { + bool g_out, g_out2, g_in1, g_in2; + TCGv_i64 out, out2, in1, in2; + TCGv_i64 addr1; +} DisasOps; + +/* Instructions can place constraints on their operands, raising specification + exceptions if they are violated. To make this easy to automate, each "in1", + "in2", "prep", "wout" helper will have a SPEC_ define that equals one + of the following, or 0. To make this easy to document, we'll put the + SPEC_ defines next to . */ + +#define SPEC_r1_even 1 +#define SPEC_r2_even 2 +#define SPEC_r3_even 4 +#define SPEC_r1_f128 8 +#define SPEC_r2_f128 16 + +/* Return values from translate_one, indicating the state of the TB. */ + +/* We are not using a goto_tb (for whatever reason), but have updated + the PC (for whatever reason), so there's no need to do it again on + exiting the TB. */ +#define DISAS_PC_UPDATED DISAS_TARGET_0 + +/* We have emitted one or more goto_tb. No fixup required. */ +#define DISAS_GOTO_TB DISAS_TARGET_1 + +/* We have updated the PC and CC values. */ +#define DISAS_PC_CC_UPDATED DISAS_TARGET_2 + +/* We are exiting the TB, but have neither emitted a goto_tb, nor + updated the PC for the next instruction to be executed. */ +#define DISAS_PC_STALE DISAS_TARGET_3 + +/* We are exiting the TB to the main loop. */ +#define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4 + +#define DISAS_UNICORN_HALT DISAS_TARGET_11 + +/* Instruction flags */ +#define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ +#define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */ +#define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */ +#define IF_BFP 0x0008 /* binary floating point instruction */ +#define IF_DFP 0x0010 /* decimal floating point instruction */ +#define IF_PRIV 0x0020 /* privileged instruction */ +#define IF_VEC 0x0040 /* vector instruction */ + +struct DisasInsn { + unsigned opc:16; + unsigned flags:16; + DisasFormat fmt:8; + unsigned fac:8; + unsigned spec:8; + + const char *name; + + /* Pre-process arguments before HELP_OP. */ + void (*help_in1)(DisasContext *, DisasOps *); + void (*help_in2)(DisasContext *, DisasOps *); + void (*help_prep)(DisasContext *, DisasOps *); + + /* + * Post-process output after HELP_OP. + * Note that these are not called if HELP_OP returns DISAS_NORETURN. + */ + void (*help_wout)(DisasContext *, DisasOps *); + void (*help_cout)(DisasContext *, DisasOps *); + + /* Implement the operation itself. */ + DisasJumpType (*help_op)(DisasContext *, DisasOps *); + + uint64_t data; +}; + +/* ====================================================================== */ +/* Miscellaneous helpers, used by several operations. */ + +static void help_l2_shift(DisasContext *s, DisasOps *o, int mask) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + int b2 = get_field(s, b2); + int d2 = get_field(s, d2); + + if (b2 == 0) { + o->in2 = tcg_const_i64(tcg_ctx, d2 & mask); + } else { + o->in2 = get_address(s, 0, b2, d2); + tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, mask); + } +} + +static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (dest == s->pc_tmp) { + per_branch(s, true); + return DISAS_NEXT; + } + if (use_goto_tb(s, dest)) { + update_cc_op(s); + per_breaking_event(s); + tcg_gen_goto_tb(tcg_ctx, 0); + tcg_gen_movi_i64(tcg_ctx, psw_addr, dest); + tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); + return DISAS_GOTO_TB; + } else { + tcg_gen_movi_i64(tcg_ctx, psw_addr, dest); + per_branch(s, false); + return DISAS_PC_UPDATED; + } +} + +static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, + bool is_imm, int imm, TCGv_i64 cdest) +{ + DisasJumpType ret; + uint64_t dest = s->base.pc_next + 2 * imm; + TCGLabel *lab; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + /* Take care of the special cases first. */ + if (c->cond == TCG_COND_NEVER) { + ret = DISAS_NEXT; + goto egress; + } + if (is_imm) { + if (dest == s->pc_tmp) { + /* Branch to next. */ + per_branch(s, true); + ret = DISAS_NEXT; + goto egress; + } + if (c->cond == TCG_COND_ALWAYS) { + ret = help_goto_direct(s, dest); + goto egress; + } + } else { + if (!cdest) { + /* E.g. bcr %r0 -> no branch. */ + ret = DISAS_NEXT; + goto egress; + } + if (c->cond == TCG_COND_ALWAYS) { + tcg_gen_mov_i64(tcg_ctx, psw_addr, cdest); + per_branch(s, false); + ret = DISAS_PC_UPDATED; + goto egress; + } + } + + if (use_goto_tb(s, s->pc_tmp)) { + if (is_imm && use_goto_tb(s, dest)) { + /* Both exits can use goto_tb. */ + update_cc_op(s); + + lab = gen_new_label(tcg_ctx); + if (c->is_64) { + tcg_gen_brcond_i64(tcg_ctx, c->cond, c->u.s64.a, c->u.s64.b, lab); + } else { + tcg_gen_brcond_i32(tcg_ctx, c->cond, c->u.s32.a, c->u.s32.b, lab); + } + + /* Branch not taken. */ + tcg_gen_goto_tb(tcg_ctx, 0); + tcg_gen_movi_i64(tcg_ctx, psw_addr, s->pc_tmp); + tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); + + /* Branch taken. */ + gen_set_label(tcg_ctx, lab); + per_breaking_event(s); + tcg_gen_goto_tb(tcg_ctx, 1); + tcg_gen_movi_i64(tcg_ctx, psw_addr, dest); + tcg_gen_exit_tb(tcg_ctx, s->base.tb, 1); + + ret = DISAS_GOTO_TB; + } else { + /* Fallthru can use goto_tb, but taken branch cannot. */ + /* Store taken branch destination before the brcond. This + avoids having to allocate a new local temp to hold it. + We'll overwrite this in the not taken case anyway. */ + if (!is_imm) { + tcg_gen_mov_i64(tcg_ctx, psw_addr, cdest); + } + + lab = gen_new_label(tcg_ctx); + if (c->is_64) { + tcg_gen_brcond_i64(tcg_ctx, c->cond, c->u.s64.a, c->u.s64.b, lab); + } else { + tcg_gen_brcond_i32(tcg_ctx, c->cond, c->u.s32.a, c->u.s32.b, lab); + } + + /* Branch not taken. */ + update_cc_op(s); + tcg_gen_goto_tb(tcg_ctx, 0); + tcg_gen_movi_i64(tcg_ctx, psw_addr, s->pc_tmp); + tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); + + gen_set_label(tcg_ctx, lab); + if (is_imm) { + tcg_gen_movi_i64(tcg_ctx, psw_addr, dest); + } + per_breaking_event(s); + ret = DISAS_PC_UPDATED; + } + } else { + /* Fallthru cannot use goto_tb. This by itself is vanishingly rare. + Most commonly we're single-stepping or some other condition that + disables all use of goto_tb. Just update the PC and exit. */ + + TCGv_i64 next = tcg_const_i64(tcg_ctx, s->pc_tmp); + if (is_imm) { + cdest = tcg_const_i64(tcg_ctx, dest); + } + + if (c->is_64) { + tcg_gen_movcond_i64(tcg_ctx, c->cond, psw_addr, c->u.s64.a, c->u.s64.b, + cdest, next); + per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 z = tcg_const_i64(tcg_ctx, 0); + tcg_gen_setcond_i32(tcg_ctx, c->cond, t0, c->u.s32.a, c->u.s32.b); + tcg_gen_extu_i32_i64(tcg_ctx, t1, t0); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, psw_addr, t1, z, cdest, next); + per_branch_cond(s, TCG_COND_NE, t1, z); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, z); + } + + if (is_imm) { + tcg_temp_free_i64(tcg_ctx, cdest); + } + tcg_temp_free_i64(tcg_ctx, next); + + ret = DISAS_PC_UPDATED; + } + + egress: + free_compare(tcg_ctx, c); + return ret; +} + +/* ====================================================================== */ +/* The operations. These perform the bulk of the work for any insn, + usually after the operands have been loaded and output initialized. */ + +static DisasJumpType op_abs(DisasContext *s, DisasOps *o) +{ + tcg_gen_abs_i64(s->uc->tcg_ctx, o->out, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_absf32(DisasContext *s, DisasOps *o) +{ + tcg_gen_andi_i64(s->uc->tcg_ctx, o->out, o->in2, 0x7fffffffull); + return DISAS_NEXT; +} + +static DisasJumpType op_absf64(DisasContext *s, DisasOps *o) +{ + tcg_gen_andi_i64(s->uc->tcg_ctx, o->out, o->in2, 0x7fffffffffffffffull); + return DISAS_NEXT; +} + +static DisasJumpType op_absf128(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_andi_i64(tcg_ctx, o->out, o->in1, 0x7fffffffffffffffull); + tcg_gen_mov_i64(tcg_ctx, o->out2, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_add(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_add_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_addc(DisasContext *s, DisasOps *o) +{ + DisasCompare cmp; + TCGv_i64 carry; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tcg_gen_add_i64(tcg_ctx, o->out, o->in1, o->in2); + + /* The carry flag is the msb of CC, therefore the branch mask that would + create that comparison is 3. Feeding the generated comparison to + setcond produces the carry flag that we desire. */ + disas_jcc(s, &cmp, 3); + carry = tcg_temp_new_i64(tcg_ctx); + if (cmp.is_64) { + tcg_gen_setcond_i64(tcg_ctx, cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b); + } else { + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_setcond_i32(tcg_ctx, cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b); + tcg_gen_extu_i32_i64(tcg_ctx, carry, t); + tcg_temp_free_i32(tcg_ctx, t); + } + free_compare(tcg_ctx, &cmp); + + tcg_gen_add_i64(tcg_ctx, o->out, o->out, carry); + tcg_temp_free_i64(tcg_ctx, carry); + return DISAS_NEXT; +} + +static DisasJumpType op_asi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + + if (!s390_has_feat(s->uc, S390_FEAT_STFLE_45)) { + tcg_gen_qemu_ld_tl(tcg_ctx, o->in1, o->addr1, get_mem_index(s), s->insn->data); + } else { + /* Perform the atomic addition in memory. */ + tcg_gen_atomic_fetch_add_i64(tcg_ctx, o->in1, o->addr1, o->in2, get_mem_index(s), + s->insn->data); + } + + /* Recompute also for atomic case: needed for setting CC. */ + tcg_gen_add_i64(tcg_ctx, o->out, o->in1, o->in2); + + if (!s390_has_feat(s->uc, S390_FEAT_STFLE_45)) { + tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), s->insn->data); + } + return DISAS_NEXT; +} + +static DisasJumpType op_aeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_aeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_adb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_adb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_axb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_axb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_and(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_andi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int shift = s->insn->data & 0xff; + int size = s->insn->data >> 8; + uint64_t mask = ((1ull << size) - 1) << shift; + + assert(!o->g_in2); + tcg_gen_shli_i64(tcg_ctx, o->in2, o->in2, shift); + tcg_gen_ori_i64(tcg_ctx, o->in2, o->in2, ~mask); + tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); + + /* Produce the CC from only the bits manipulated. */ + tcg_gen_andi_i64(tcg_ctx, cc_dst, o->out, mask); + set_cc_nz_u64(s, cc_dst); + return DISAS_NEXT; +} + +static DisasJumpType op_ni(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + + if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { + tcg_gen_qemu_ld_tl(tcg_ctx, o->in1, o->addr1, get_mem_index(s), s->insn->data); + } else { + /* Perform the atomic operation in memory. */ + tcg_gen_atomic_fetch_and_i64(tcg_ctx, o->in1, o->addr1, o->in2, get_mem_index(s), + s->insn->data); + } + + /* Recompute also for atomic case: needed for setting CC. */ + tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); + + if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { + tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), s->insn->data); + } + return DISAS_NEXT; +} + +static DisasJumpType op_bas(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + pc_to_link_info(tcg_ctx, o->out, s, s->pc_tmp); + if (o->in2) { + tcg_gen_mov_i64(tcg_ctx, psw_addr, o->in2); + per_branch(s, false); + return DISAS_PC_UPDATED; + } else { + return DISAS_NEXT; + } +} + +static void save_link_info(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t; + + if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) { + pc_to_link_info(tcg_ctx, o->out, s, s->pc_tmp); + return; + } + gen_op_calc_cc(s); + tcg_gen_andi_i64(tcg_ctx, o->out, o->out, 0xffffffff00000000ull); + tcg_gen_ori_i64(tcg_ctx, o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp); + t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, t, psw_mask, 16); + tcg_gen_andi_i64(tcg_ctx, t, t, 0x0f000000); + tcg_gen_or_i64(tcg_ctx, o->out, o->out, t); + tcg_gen_extu_i32_i64(tcg_ctx, t, cc_op); + tcg_gen_shli_i64(tcg_ctx, t, t, 28); + tcg_gen_or_i64(tcg_ctx, o->out, o->out, t); + tcg_temp_free_i64(tcg_ctx, t); +} + +static DisasJumpType op_bal(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + save_link_info(s, o); + if (o->in2) { + tcg_gen_mov_i64(tcg_ctx, psw_addr, o->in2); + per_branch(s, false); + return DISAS_PC_UPDATED; + } else { + return DISAS_NEXT; + } +} + +static DisasJumpType op_basi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + pc_to_link_info(tcg_ctx, o->out, s, s->pc_tmp); + return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2)); +} + +static DisasJumpType op_bc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int m1 = get_field(s, m1); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; + DisasCompare c; + + /* BCR with R2 = 0 causes no branching */ + if (have_field(s, r2) && get_field(s, r2) == 0) { + if (m1 == 14) { + /* Perform serialization */ + /* FIXME: check for fast-BCR-serialization facility */ + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); + } + if (m1 == 15) { + /* Perform serialization */ + /* FIXME: perform checkpoint-synchronisation */ + tcg_gen_mb(tcg_ctx, TCG_MO_ALL | TCG_BAR_SC); + } + return DISAS_NEXT; + } + + disas_jcc(s, &c, m1); + return help_branch(s, &c, is_imm, imm, o->in2); +} + +static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; + DisasCompare c; + TCGv_i64 t; + + c.cond = TCG_COND_NE; + c.is_64 = false; + c.g1 = false; + c.g2 = false; + + t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_subi_i64(tcg_ctx, t, regs[r1], 1); + store_reg32_i64(tcg_ctx, r1, t); + c.u.s32.a = tcg_temp_new_i32(tcg_ctx); + c.u.s32.b = tcg_const_i32(tcg_ctx, 0); + tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.a, t); + tcg_temp_free_i64(tcg_ctx, t); + + return help_branch(s, &c, is_imm, imm, o->in2); +} + +static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int imm = get_field(s, i2); + DisasCompare c; + TCGv_i64 t; + + c.cond = TCG_COND_NE; + c.is_64 = false; + c.g1 = false; + c.g2 = false; + + t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, t, regs[r1], 32); + tcg_gen_subi_i64(tcg_ctx, t, t, 1); + store_reg32h_i64(tcg_ctx, r1, t); + c.u.s32.a = tcg_temp_new_i32(tcg_ctx); + c.u.s32.b = tcg_const_i32(tcg_ctx, 0); + tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.a, t); + tcg_temp_free_i64(tcg_ctx, t); + + return help_branch(s, &c, 1, imm, o->in2); +} + +static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; + DisasCompare c; + + c.cond = TCG_COND_NE; + c.is_64 = true; + c.g1 = true; + c.g2 = false; + + tcg_gen_subi_i64(tcg_ctx, regs[r1], regs[r1], 1); + c.u.s64.a = regs[r1]; + c.u.s64.b = tcg_const_i64(tcg_ctx, 0); + + return help_branch(s, &c, is_imm, imm, o->in2); +} + +static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; + DisasCompare c; + TCGv_i64 t; + + c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); + c.is_64 = false; + c.g1 = false; + c.g2 = false; + + t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_add_i64(tcg_ctx, t, regs[r1], regs[r3]); + c.u.s32.a = tcg_temp_new_i32(tcg_ctx); + c.u.s32.b = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.a, t); + tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.b, regs[r3 | 1]); + store_reg32_i64(tcg_ctx, r1, t); + tcg_temp_free_i64(tcg_ctx, t); + + return help_branch(s, &c, is_imm, imm, o->in2); +} + +static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + bool is_imm = have_field(s, i2); + int imm = is_imm ? get_field(s, i2) : 0; + DisasCompare c; + + c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); + c.is_64 = true; + + if (r1 == (r3 | 1)) { + c.u.s64.b = load_reg(tcg_ctx, r3 | 1); + c.g2 = false; + } else { + c.u.s64.b = regs[r3 | 1]; + c.g2 = true; + } + + tcg_gen_add_i64(tcg_ctx, regs[r1], regs[r1], regs[r3]); + c.u.s64.a = regs[r1]; + c.g1 = true; + + return help_branch(s, &c, is_imm, imm, o->in2); +} + +static DisasJumpType op_cj(DisasContext *s, DisasOps *o) +{ + int imm, m3 = get_field(s, m3); + bool is_imm; + DisasCompare c; + + c.cond = ltgt_cond[m3]; + if (s->insn->data) { + c.cond = tcg_unsigned_cond(c.cond); + } + c.is_64 = c.g1 = c.g2 = true; + c.u.s64.a = o->in1; + c.u.s64.b = o->in2; + + is_imm = have_field(s, i4); + if (is_imm) { + imm = get_field(s, i4); + } else { + imm = 0; + o->out = get_address(s, 0, get_field(s, b4), + get_field(s, d4)); + } + + return help_branch(s, &c, is_imm, imm, o->out); +} + +static DisasJumpType op_ceb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_ceb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_cdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_cdb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_cxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_cxb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe, + bool m4_with_fpe) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const bool fpe = s390_has_feat(s->uc, S390_FEAT_FLOATING_POINT_EXT); + uint8_t m3 = get_field(s, m3); + uint8_t m4 = get_field(s, m4); + + /* m3 field was introduced with FPE */ + if (!fpe && m3_with_fpe) { + m3 = 0; + } + /* m4 field was introduced with FPE */ + if (!fpe && m4_with_fpe) { + m4 = 0; + } + + /* Check for valid rounding modes. Mode 3 was introduced later. */ + if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) { + gen_program_exception(s, PGM_SPECIFICATION); + return NULL; + } + + return tcg_const_i32(tcg_ctx, deposit32(m3, 4, 4, m4)); +} + +static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cfeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f32(s, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cfdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f64(s, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cfxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f128(s, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cgeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f32(s, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cgdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f64(s, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cgxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f128(s, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_clfeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f32(s, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_clfdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f64(s, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_clfxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f128(s, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_clgeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f32(s, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_clgdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f64(s, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_clgxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + gen_set_cc_nz_f128(s, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_cegb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, true, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cegb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, true, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cdgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, true, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cxgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_celgb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_celgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cdlgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, false); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_cxlgb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r2 = get_field(s, r2); + TCGv_i64 len = tcg_temp_new_i64(tcg_ctx); + + gen_helper_cksm(tcg_ctx, len, tcg_ctx->cpu_env, o->in1, o->in2, regs[r2 + 1]); + set_cc_static(s); + return_low128(tcg_ctx, o->out); + + tcg_gen_add_i64(tcg_ctx, regs[r2], regs[r2], len); + tcg_gen_sub_i64(tcg_ctx, regs[r2 + 1], regs[r2 + 1], len); + tcg_temp_free_i64(tcg_ctx, len); + + return DISAS_NEXT; +} + +static DisasJumpType op_clc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int l = get_field(s, l1); + TCGv_i32 vl; + + switch (l + 1) { + case 1: + tcg_gen_qemu_ld8u(tcg_ctx, cc_src, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld8u(tcg_ctx, cc_dst, o->in2, get_mem_index(s)); + break; + case 2: + tcg_gen_qemu_ld16u(tcg_ctx, cc_src, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld16u(tcg_ctx, cc_dst, o->in2, get_mem_index(s)); + break; + case 4: + tcg_gen_qemu_ld32u(tcg_ctx, cc_src, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld32u(tcg_ctx, cc_dst, o->in2, get_mem_index(s)); + break; + case 8: + tcg_gen_qemu_ld64(tcg_ctx, cc_src, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld64(tcg_ctx, cc_dst, o->in2, get_mem_index(s)); + break; + default: + vl = tcg_const_i32(tcg_ctx, l); + gen_helper_clc(tcg_ctx, cc_op, tcg_ctx->cpu_env, vl, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, vl); + set_cc_static(s); + return DISAS_NEXT; + } + gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); + return DISAS_NEXT; +} + +static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r2 = get_field(s, r2); + TCGv_i32 t1, t2; + + /* r1 and r2 must be even. */ + if (r1 & 1 || r2 & 1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t1 = tcg_const_i32(tcg_ctx, r1); + t2 = tcg_const_i32(tcg_ctx, r2); + gen_helper_clcl(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, t2); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + TCGv_i32 t1, t3; + + /* r1 and r3 must be even. */ + if (r1 & 1 || r3 & 1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t1 = tcg_const_i32(tcg_ctx, r1); + t3 = tcg_const_i32(tcg_ctx, r3); + gen_helper_clcle(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t3); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + TCGv_i32 t1, t3; + + /* r1 and r3 must be even. */ + if (r1 & 1 || r3 & 1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t1 = tcg_const_i32(tcg_ctx, r1); + t3 = tcg_const_i32(tcg_ctx, r3); + gen_helper_clclu(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t3); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_clm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m3 = tcg_const_i32(tcg_ctx, get_field(s, m3)); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, t1, o->in1); + gen_helper_clm(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, m3, o->in2); + set_cc_static(s); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, m3); + return DISAS_NEXT; +} + +static DisasJumpType op_clst(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_clst(tcg_ctx, o->in1, tcg_ctx->cpu_env, regs[0], o->in1, o->in2); + set_cc_static(s); + return_low128(tcg_ctx, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_cps(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_andi_i64(tcg_ctx, t, o->in1, 0x8000000000000000ull); + tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, 0x7fffffffffffffffull); + tcg_gen_or_i64(tcg_ctx, o->out, o->out, t); + tcg_temp_free_i64(tcg_ctx, t); + return DISAS_NEXT; +} + +static DisasJumpType op_cs(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int d2 = get_field(s, d2); + int b2 = get_field(s, b2); + TCGv_i64 addr, cc; + + /* Note that in1 = R3 (new value) and + in2 = (zero-extended) R1 (expected value). */ + + addr = get_address(s, 0, b2, d2); + tcg_gen_atomic_cmpxchg_i64(tcg_ctx, o->out, addr, o->in2, o->in1, + get_mem_index(s), s->insn->data | MO_ALIGN); + tcg_temp_free_i64(tcg_ctx, addr); + + /* Are the memory and expected values (un)equal? Note that this setcond + produces the output CC value, thus the NE sense of the test. */ + cc = tcg_temp_new_i64(tcg_ctx); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, cc, o->in2, o->out); + tcg_gen_extrl_i64_i32(tcg_ctx, cc_op, cc); + tcg_temp_free_i64(tcg_ctx, cc); + set_cc_static(s); + + return DISAS_NEXT; +} + +static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + int d2 = get_field(s, d2); + int b2 = get_field(s, b2); + DisasJumpType ret = DISAS_NEXT; + TCGv_i64 addr; + TCGv_i32 t_r1, t_r3; + + /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */ + addr = get_address(s, 0, b2, d2); + t_r1 = tcg_const_i32(tcg_ctx, r1); + t_r3 = tcg_const_i32(tcg_ctx, r3); + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_cdsg(tcg_ctx, tcg_ctx->cpu_env, addr, t_r1, t_r3); + } else if (HAVE_CMPXCHG128) { + gen_helper_cdsg_parallel(tcg_ctx, tcg_ctx->cpu_env, addr, t_r1, t_r3); + } else { + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + ret = DISAS_NORETURN; + } + tcg_temp_free_i64(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, t_r1); + tcg_temp_free_i32(tcg_ctx, t_r3); + + set_cc_static(s); + return ret; +} + +static DisasJumpType op_csst(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r3 = get_field(s, r3); + TCGv_i32 t_r3 = tcg_const_i32(tcg_ctx, r3); + + if (tb_cflags(s->base.tb) & CF_PARALLEL) { + gen_helper_csst_parallel(tcg_ctx, cc_op, tcg_ctx->cpu_env, t_r3, o->addr1, o->in2); + } else { + gen_helper_csst(tcg_ctx, cc_op, tcg_ctx->cpu_env, t_r3, o->addr1, o->in2); + } + tcg_temp_free_i32(tcg_ctx, t_r3); + + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_csp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + MemOp mop = s->insn->data; + TCGv_i64 addr, old, cc; + TCGLabel *lab = gen_new_label(tcg_ctx); + + /* Note that in1 = R1 (zero-extended expected value), + out = R1 (original reg), out2 = R1+1 (new value). */ + + addr = tcg_temp_new_i64(tcg_ctx); + old = tcg_temp_new_i64(tcg_ctx); + tcg_gen_andi_i64(tcg_ctx, addr, o->in2, -1ULL << (mop & MO_SIZE)); + tcg_gen_atomic_cmpxchg_i64(tcg_ctx, old, addr, o->in1, o->out2, + get_mem_index(s), mop | MO_ALIGN); + tcg_temp_free_i64(tcg_ctx, addr); + + /* Are the memory and expected values (un)equal? */ + cc = tcg_temp_new_i64(tcg_ctx); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, cc, o->in1, old); + tcg_gen_extrl_i64_i32(tcg_ctx, cc_op, cc); + + /* Write back the output now, so that it happens before the + following branch, so that we don't need local temps. */ + if ((mop & MO_SIZE) == MO_32) { + tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, old, 0, 32); + } else { + tcg_gen_mov_i64(tcg_ctx, o->out, old); + } + tcg_temp_free_i64(tcg_ctx, old); + + /* If the comparison was equal, and the LSB of R2 was set, + then we need to flush the TLB (for all cpus). */ + tcg_gen_xori_i64(tcg_ctx, cc, cc, 1); + tcg_gen_and_i64(tcg_ctx, cc, cc, o->in2); + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_EQ, cc, 0, lab); + tcg_temp_free_i64(tcg_ctx, cc); + + gen_helper_purge(tcg_ctx, tcg_ctx->cpu_env); + gen_set_label(tcg_ctx, lab); + + return DISAS_NEXT; +} + +static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, t2, o->in1); + gen_helper_cvd(tcg_ctx, t1, t2); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_gen_qemu_st64(tcg_ctx, t1, o->in2, get_mem_index(s)); + tcg_temp_free_i64(tcg_ctx, t1); + return DISAS_NEXT; +} + +static DisasJumpType op_ct(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int m3 = get_field(s, m3); + TCGLabel *lab = gen_new_label(tcg_ctx); + TCGCond c; + + c = tcg_invert_cond(ltgt_cond[m3]); + if (s->insn->data) { + c = tcg_unsigned_cond(c); + } + tcg_gen_brcond_i64(tcg_ctx, c, o->in1, o->in2, lab); + + /* Trap. */ + gen_trap(s); + + gen_set_label(tcg_ctx, lab); + return DISAS_NEXT; +} + +static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int m3 = get_field(s, m3); + int r1 = get_field(s, r1); + int r2 = get_field(s, r2); + TCGv_i32 tr1, tr2, chk; + + /* R1 and R2 must both be even. */ + if ((r1 | r2) & 1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + if (!s390_has_feat(s->uc, S390_FEAT_ETF3_ENH)) { + m3 = 0; + } + + tr1 = tcg_const_i32(tcg_ctx, r1); + tr2 = tcg_const_i32(tcg_ctx, r2); + chk = tcg_const_i32(tcg_ctx, m3); + + switch (s->insn->data) { + case 12: + gen_helper_cu12(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + break; + case 14: + gen_helper_cu14(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + break; + case 21: + gen_helper_cu21(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + break; + case 24: + gen_helper_cu24(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + break; + case 41: + gen_helper_cu41(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + break; + case 42: + gen_helper_cu42(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + break; + default: + //g_assert_not_reached(); + break; + } + + tcg_temp_free_i32(tcg_ctx, tr1); + tcg_temp_free_i32(tcg_ctx, tr2); + tcg_temp_free_i32(tcg_ctx, chk); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_diag(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + TCGv_i32 func_code = tcg_const_i32(tcg_ctx, get_field(s, i2)); + + gen_helper_diag(tcg_ctx, tcg_ctx->cpu_env, r1, r3, func_code); + + tcg_temp_free_i32(tcg_ctx, func_code); + tcg_temp_free_i32(tcg_ctx, r3); + tcg_temp_free_i32(tcg_ctx, r1); + return DISAS_NEXT; +} + +static DisasJumpType op_divs32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_divs32(tcg_ctx, o->out2, tcg_ctx->cpu_env, o->in1, o->in2); + return_low128(tcg_ctx, o->out); + return DISAS_NEXT; +} + +static DisasJumpType op_divu32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_divu32(tcg_ctx, o->out2, tcg_ctx->cpu_env, o->in1, o->in2); + return_low128(tcg_ctx, o->out); + return DISAS_NEXT; +} + +static DisasJumpType op_divs64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_divs64(tcg_ctx, o->out2, tcg_ctx->cpu_env, o->in1, o->in2); + return_low128(tcg_ctx, o->out); + return DISAS_NEXT; +} + +static DisasJumpType op_divu64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_divu64(tcg_ctx, o->out2, tcg_ctx->cpu_env, o->out, o->out2, o->in2); + return_low128(tcg_ctx, o->out); + return DISAS_NEXT; +} + +static DisasJumpType op_deb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_deb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_ddb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_ddb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_dxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_dxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_ear(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r2 = get_field(s, r2); + tcg_gen_ld32u_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, aregs[r2])); + return DISAS_NEXT; +} + +static DisasJumpType op_ecag(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* No cache information provided. */ + tcg_gen_movi_i64(tcg_ctx, o->out, -1); + return DISAS_NEXT; +} + +static DisasJumpType op_efpc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ld32u_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, fpc)); + return DISAS_NEXT; +} + +static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r2 = get_field(s, r2); + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + /* Note the "subsequently" in the PoO, which implies a defined result + if r1 == r2. Thus we cannot defer these writes to an output hook. */ + tcg_gen_shri_i64(tcg_ctx, t, psw_mask, 32); + store_reg32_i64(tcg_ctx, r1, t); + if (r2 != 0) { + store_reg32_i64(tcg_ctx, r2, psw_mask); + } + + tcg_temp_free_i64(tcg_ctx, t); + return DISAS_NEXT; +} + +static DisasJumpType op_ex(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + TCGv_i32 ilen; + TCGv_i64 v1; + + /* Nested EXECUTE is not allowed. */ + if (unlikely(s->ex_value)) { + gen_program_exception(s, PGM_EXECUTE); + return DISAS_NORETURN; + } + + update_psw_addr(s); + update_cc_op(s); + + if (r1 == 0) { + v1 = tcg_const_i64(tcg_ctx, 0); + } else { + v1 = regs[r1]; + } + + ilen = tcg_const_i32(tcg_ctx, s->ilen); + gen_helper_ex(tcg_ctx, tcg_ctx->cpu_env, ilen, v1, o->in2); + tcg_temp_free_i32(tcg_ctx, ilen); + + if (r1 == 0) { + tcg_temp_free_i64(tcg_ctx, v1); + } + + return DISAS_PC_CC_UPDATED; +} + +static DisasJumpType op_fieb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_fieb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_fidb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_fidb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_fixb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, false, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_fixb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); + return_low128(tcg_ctx, o->out2); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_flogr(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* We'll use the original input for cc computation, since we get to + compare that against 0, which ought to be better than comparing + the real output against 64. It also lets cc_dst be a convenient + temporary during our computation. */ + gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2); + + /* R1 = IN ? CLZ(IN) : 64. */ + tcg_gen_clzi_i64(tcg_ctx, o->out, o->in2, 64); + + /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this + value by 64, which is undefined. But since the shift is 64 iff the + input is zero, we still get the correct result after and'ing. */ + tcg_gen_movi_i64(tcg_ctx, o->out2, 0x8000000000000000ull); + tcg_gen_shr_i64(tcg_ctx, o->out2, o->out2, o->out); + tcg_gen_andc_i64(tcg_ctx, o->out2, cc_dst, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_icm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int m3 = get_field(s, m3); + int pos, len, base = s->insn->data; + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + uint64_t ccm; + + switch (m3) { + case 0xf: + /* Effectively a 32-bit load. */ + tcg_gen_qemu_ld32u(tcg_ctx, tmp, o->in2, get_mem_index(s)); + len = 32; + goto one_insert; + + case 0xc: + case 0x6: + case 0x3: + /* Effectively a 16-bit load. */ + tcg_gen_qemu_ld16u(tcg_ctx, tmp, o->in2, get_mem_index(s)); + len = 16; + goto one_insert; + + case 0x8: + case 0x4: + case 0x2: + case 0x1: + /* Effectively an 8-bit load. */ + tcg_gen_qemu_ld8u(tcg_ctx, tmp, o->in2, get_mem_index(s)); + len = 8; + goto one_insert; + + one_insert: + pos = base + ctz32(m3) * 8; + tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, tmp, pos, len); + ccm = ((1ull << len) - 1) << pos; + break; + + default: + /* This is going to be a sequence of loads and inserts. */ + pos = base + 32 - 8; + ccm = 0; + while (m3) { + if (m3 & 0x8) { + tcg_gen_qemu_ld8u(tcg_ctx, tmp, o->in2, get_mem_index(s)); + tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 1); + tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, tmp, pos, 8); + ccm |= 0xff << pos; + } + m3 = (m3 << 1) & 0xf; + pos -= 8; + } + break; + } + + tcg_gen_movi_i64(tcg_ctx, tmp, ccm); + gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_insi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int shift = s->insn->data & 0xff; + int size = s->insn->data >> 8; + tcg_gen_deposit_i64(tcg_ctx, o->out, o->in1, o->in2, shift, size); + return DISAS_NEXT; +} + +static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t1, t2; + + gen_op_calc_cc(s); + t1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extract_i64(tcg_ctx, t1, psw_mask, 40, 4); + t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t2, cc_op); + tcg_gen_deposit_i64(tcg_ctx, t1, t1, t2, 4, 60); + tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, t1, 24, 8); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + return DISAS_NEXT; +} + +static DisasJumpType op_idte(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m4; + + if (s390_has_feat(s->uc, S390_FEAT_LOCAL_TLB_CLEARING)) { + m4 = tcg_const_i32(tcg_ctx, get_field(s, m4)); + } else { + m4 = tcg_const_i32(tcg_ctx, 0); + } + gen_helper_idte(tcg_ctx, tcg_ctx->cpu_env, o->in1, o->in2, m4); + tcg_temp_free_i32(tcg_ctx, m4); + return DISAS_NEXT; +} + +static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m4; + + if (s390_has_feat(s->uc, S390_FEAT_LOCAL_TLB_CLEARING)) { + m4 = tcg_const_i32(tcg_ctx, get_field(s, m4)); + } else { + m4 = tcg_const_i32(tcg_ctx, 0); + } + gen_helper_ipte(tcg_ctx, tcg_ctx->cpu_env, o->in1, o->in2, m4); + tcg_temp_free_i32(tcg_ctx, m4); + return DISAS_NEXT; +} + +static DisasJumpType op_iske(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_iske(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_msa(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = have_field(s, r1) ? get_field(s, r1) : 0; + int r2 = have_field(s, r2) ? get_field(s, r2) : 0; + int r3 = have_field(s, r3) ? get_field(s, r3) : 0; + TCGv_i32 t_r1, t_r2, t_r3, type; + + switch (s->insn->data) { + case S390_FEAT_TYPE_KMCTR: + if (r3 & 1 || !r3) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + /* FALL THROUGH */ + case S390_FEAT_TYPE_PPNO: + case S390_FEAT_TYPE_KMF: + case S390_FEAT_TYPE_KMC: + case S390_FEAT_TYPE_KMO: + case S390_FEAT_TYPE_KM: + if (r1 & 1 || !r1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + /* FALL THROUGH */ + case S390_FEAT_TYPE_KMAC: + case S390_FEAT_TYPE_KIMD: + case S390_FEAT_TYPE_KLMD: + if (r2 & 1 || !r2) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + /* FALL THROUGH */ + case S390_FEAT_TYPE_PCKMO: + case S390_FEAT_TYPE_PCC: + break; + default: + // g_assert_not_reached(); + break; + }; + + t_r1 = tcg_const_i32(tcg_ctx, r1); + t_r2 = tcg_const_i32(tcg_ctx, r2); + t_r3 = tcg_const_i32(tcg_ctx, r3); + type = tcg_const_i32(tcg_ctx, s->insn->data); + gen_helper_msa(tcg_ctx, cc_op, tcg_ctx->cpu_env, t_r1, t_r2, t_r3, type); + set_cc_static(s); + tcg_temp_free_i32(tcg_ctx, t_r1); + tcg_temp_free_i32(tcg_ctx, t_r2); + tcg_temp_free_i32(tcg_ctx, t_r3); + tcg_temp_free_i32(tcg_ctx, type); + return DISAS_NEXT; +} + +static DisasJumpType op_keb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_keb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_kdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_kdb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_kxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_kxb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_laa(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* The real output is indeed the original value in memory; + recompute the addition for the computation of CC. */ + tcg_gen_atomic_fetch_add_i64(tcg_ctx, o->in2, o->in2, o->in1, get_mem_index(s), + s->insn->data | MO_ALIGN); + /* However, we need to recompute the addition for setting CC. */ + tcg_gen_add_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_lan(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* The real output is indeed the original value in memory; + recompute the addition for the computation of CC. */ + tcg_gen_atomic_fetch_and_i64(tcg_ctx, o->in2, o->in2, o->in1, get_mem_index(s), + s->insn->data | MO_ALIGN); + /* However, we need to recompute the operation for setting CC. */ + tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_lao(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* The real output is indeed the original value in memory; + recompute the addition for the computation of CC. */ + tcg_gen_atomic_fetch_or_i64(tcg_ctx, o->in2, o->in2, o->in1, get_mem_index(s), + s->insn->data | MO_ALIGN); + /* However, we need to recompute the operation for setting CC. */ + tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_lax(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* The real output is indeed the original value in memory; + recompute the addition for the computation of CC. */ + tcg_gen_atomic_fetch_xor_i64(tcg_ctx, o->in2, o->in2, o->in1, get_mem_index(s), + s->insn->data | MO_ALIGN); + /* However, we need to recompute the operation for setting CC. */ + tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_ldeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_ledb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, true, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_ledb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, true, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_ldxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_lexb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 m34 = fpinst_extract_m34(s, true, true); + + if (!m34) { + return DISAS_NORETURN; + } + gen_helper_lexb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, m34); + tcg_temp_free_i32(tcg_ctx, m34); + return DISAS_NEXT; +} + +static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_lxdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_lxeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_lde(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_shli_i64(tcg_ctx, o->out, o->in2, 32); + return DISAS_NEXT; +} + +static DisasJumpType op_llgt(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, 0x7fffffff); + return DISAS_NEXT; +} + +static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_ld8s(tcg_ctx, o->out, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_ld8u(tcg_ctx, o->out, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_ld16s(tcg_ctx, o->out, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_ld16u(tcg_ctx, o->out, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_ld32s(tcg_ctx, o->out, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_ld32u(tcg_ctx, o->out, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_ld64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_ld64(tcg_ctx, o->out, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_lat(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGLabel *lab = gen_new_label(tcg_ctx); + store_reg32_i64(tcg_ctx, get_field(s, r1), o->in2); + /* The value is stored even in case of trap. */ + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->in2, 0, lab); + gen_trap(s); + gen_set_label(tcg_ctx, lab); + return DISAS_NEXT; +} + +static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGLabel *lab = gen_new_label(tcg_ctx); + tcg_gen_qemu_ld64(tcg_ctx, o->out, o->in2, get_mem_index(s)); + /* The value is stored even in case of trap. */ + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->out, 0, lab); + gen_trap(s); + gen_set_label(tcg_ctx, lab); + return DISAS_NEXT; +} + +static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGLabel *lab = gen_new_label(tcg_ctx); + store_reg32h_i64(tcg_ctx, get_field(s, r1), o->in2); + /* The value is stored even in case of trap. */ + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->in2, 0, lab); + gen_trap(s); + gen_set_label(tcg_ctx, lab); + return DISAS_NEXT; +} + +static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGLabel *lab = gen_new_label(tcg_ctx); + tcg_gen_qemu_ld32u(tcg_ctx, o->out, o->in2, get_mem_index(s)); + /* The value is stored even in case of trap. */ + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->out, 0, lab); + gen_trap(s); + gen_set_label(tcg_ctx, lab); + return DISAS_NEXT; +} + +static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGLabel *lab = gen_new_label(tcg_ctx); + tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, 0x7fffffff); + /* The value is stored even in case of trap. */ + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_NE, o->out, 0, lab); + gen_trap(s); + gen_set_label(tcg_ctx, lab); + return DISAS_NEXT; +} + +static DisasJumpType op_loc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + DisasCompare c; + + disas_jcc(s, &c, get_field(s, m3)); + + if (c.is_64) { + tcg_gen_movcond_i64(tcg_ctx, c.cond, o->out, c.u.s64.a, c.u.s64.b, + o->in2, o->in1); + free_compare(tcg_ctx, &c); + } else { + TCGv_i32 t32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 t, z; + + tcg_gen_setcond_i32(tcg_ctx, c.cond, t32, c.u.s32.a, c.u.s32.b); + free_compare(tcg_ctx, &c); + + t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t, t32); + tcg_temp_free_i32(tcg_ctx, t32); + + z = tcg_const_i64(tcg_ctx, 0); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, o->out, t, z, o->in2, o->in1); + tcg_temp_free_i64(tcg_ctx, t); + tcg_temp_free_i64(tcg_ctx, z); + } + + return DISAS_NEXT; +} + +static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + gen_helper_lctl(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r3); + /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ + return DISAS_PC_STALE_NOCHAIN; +} + +static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + gen_helper_lctlg(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r3); + /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ + return DISAS_PC_STALE_NOCHAIN; +} + +static DisasJumpType op_lra(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_lra(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_lpp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_st_i64(tcg_ctx, o->in2, tcg_ctx->cpu_env, offsetof(CPUS390XState, pp)); + return DISAS_NEXT; +} + +static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t1, t2; + + per_breaking_event(s); + + t1 = tcg_temp_new_i64(tcg_ctx); + t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->in2, get_mem_index(s), + MO_TEUL | MO_ALIGN_8); + tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 4); + tcg_gen_qemu_ld32u(tcg_ctx, t2, o->in2, get_mem_index(s)); + /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ + tcg_gen_shli_i64(tcg_ctx, t1, t1, 32); + gen_helper_load_psw(tcg_ctx, tcg_ctx->cpu_env, t1, t2); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + return DISAS_NORETURN; +} + +static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t1, t2; + + per_breaking_event(s); + + t1 = tcg_temp_new_i64(tcg_ctx); + t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->in2, get_mem_index(s), + MO_TEQ | MO_ALIGN_8); + tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 8); + tcg_gen_qemu_ld64(tcg_ctx, t2, o->in2, get_mem_index(s)); + gen_helper_load_psw(tcg_ctx, tcg_ctx->cpu_env, t1, t2); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + return DISAS_NORETURN; +} + +static DisasJumpType op_lam(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + gen_helper_lam(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + TCGv_i64 t1, t2; + + /* Only one register to read. */ + t1 = tcg_temp_new_i64(tcg_ctx); + if (unlikely(r1 == r3)) { + tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); + store_reg32_i64(tcg_ctx, r1, t1); + tcg_temp_free(tcg_ctx, t1); + return DISAS_NEXT; + } + + /* First load the values of the first and last registers to trigger + possible page faults. */ + t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); + tcg_gen_addi_i64(tcg_ctx, t2, o->in2, 4 * ((r3 - r1) & 15)); + tcg_gen_qemu_ld32u(tcg_ctx, t2, t2, get_mem_index(s)); + store_reg32_i64(tcg_ctx, r1, t1); + store_reg32_i64(tcg_ctx, r3, t2); + + /* Only two registers to read. */ + if (((r1 + 1) & 15) == r3) { + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + return DISAS_NEXT; + } + + /* Then load the remaining registers. Page fault can't occur. */ + r3 = (r3 - 1) & 15; + tcg_gen_movi_i64(tcg_ctx, t2, 4); + while (r1 != r3) { + r1 = (r1 + 1) & 15; + tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t2); + tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); + store_reg32_i64(tcg_ctx, r1, t1); + } + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + + return DISAS_NEXT; +} + +static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + TCGv_i64 t1, t2; + + /* Only one register to read. */ + t1 = tcg_temp_new_i64(tcg_ctx); + if (unlikely(r1 == r3)) { + tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); + store_reg32h_i64(tcg_ctx, r1, t1); + tcg_temp_free(tcg_ctx, t1); + return DISAS_NEXT; + } + + /* First load the values of the first and last registers to trigger + possible page faults. */ + t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); + tcg_gen_addi_i64(tcg_ctx, t2, o->in2, 4 * ((r3 - r1) & 15)); + tcg_gen_qemu_ld32u(tcg_ctx, t2, t2, get_mem_index(s)); + store_reg32h_i64(tcg_ctx, r1, t1); + store_reg32h_i64(tcg_ctx, r3, t2); + + /* Only two registers to read. */ + if (((r1 + 1) & 15) == r3) { + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + return DISAS_NEXT; + } + + /* Then load the remaining registers. Page fault can't occur. */ + r3 = (r3 - 1) & 15; + tcg_gen_movi_i64(tcg_ctx, t2, 4); + while (r1 != r3) { + r1 = (r1 + 1) & 15; + tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t2); + tcg_gen_qemu_ld32u(tcg_ctx, t1, o->in2, get_mem_index(s)); + store_reg32h_i64(tcg_ctx, r1, t1); + } + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + + return DISAS_NEXT; +} + +static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + TCGv_i64 t1, t2; + + /* Only one register to read. */ + if (unlikely(r1 == r3)) { + tcg_gen_qemu_ld64(tcg_ctx, regs[r1], o->in2, get_mem_index(s)); + return DISAS_NEXT; + } + + /* First load the values of the first and last registers to trigger + possible page faults. */ + t1 = tcg_temp_new_i64(tcg_ctx); + t2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld64(tcg_ctx, t1, o->in2, get_mem_index(s)); + tcg_gen_addi_i64(tcg_ctx, t2, o->in2, 8 * ((r3 - r1) & 15)); + tcg_gen_qemu_ld64(tcg_ctx, regs[r3], t2, get_mem_index(s)); + tcg_gen_mov_i64(tcg_ctx, regs[r1], t1); + tcg_temp_free(tcg_ctx, t2); + + /* Only two registers to read. */ + if (((r1 + 1) & 15) == r3) { + tcg_temp_free(tcg_ctx, t1); + return DISAS_NEXT; + } + + /* Then load the remaining registers. Page fault can't occur. */ + r3 = (r3 - 1) & 15; + tcg_gen_movi_i64(tcg_ctx, t1, 8); + while (r1 != r3) { + r1 = (r1 + 1) & 15; + tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t1); + tcg_gen_qemu_ld64(tcg_ctx, regs[r1], o->in2, get_mem_index(s)); + } + tcg_temp_free(tcg_ctx, t1); + + return DISAS_NEXT; +} + +static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 a1, a2; + MemOp mop = s->insn->data; + + /* In a parallel context, stop the world and single step. */ + if (tb_cflags(s->base.tb) & CF_PARALLEL) { + update_psw_addr(s); + update_cc_op(s); + gen_exception(tcg_ctx, EXCP_ATOMIC); + return DISAS_NORETURN; + } + + /* In a serial context, perform the two loads ... */ + a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); + a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2)); + tcg_gen_qemu_ld_i64(tcg_ctx, o->out, a1, get_mem_index(s), mop | MO_ALIGN); + tcg_gen_qemu_ld_i64(tcg_ctx, o->out2, a2, get_mem_index(s), mop | MO_ALIGN); + tcg_temp_free_i64(tcg_ctx, a1); + tcg_temp_free_i64(tcg_ctx, a2); + + /* ... and indicate that we performed them while interlocked. */ + gen_op_movi_cc(s, 0); + return DISAS_NEXT; +} + +static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_lpq(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + } else if (HAVE_ATOMIC128) { + gen_helper_lpq_parallel(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + } else { + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + return DISAS_NORETURN; + } + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_lura(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->addr1 = get_address(s, 0, get_field(s, r2), 0); + tcg_gen_qemu_ld_tl(tcg_ctx, o->out, o->addr1, MMU_REAL_IDX, s->insn->data); + return DISAS_NEXT; +} + +static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, -256); + return DISAS_NEXT; +} + +static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const int64_t block_size = (1ull << (get_field(s, m3) + 6)); + + if (get_field(s, m3) > 6) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tcg_gen_ori_i64(tcg_ctx, o->addr1, o->addr1, -block_size); + tcg_gen_neg_i64(tcg_ctx, o->addr1, o->addr1); + tcg_gen_movi_i64(tcg_ctx, o->out, 16); + tcg_gen_umin_i64(tcg_ctx, o->out, o->out, o->addr1); + gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out); + return DISAS_NEXT; +} + +static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) +{ + o->out = o->in2; + o->g_out = o->g_in2; + o->in2 = NULL; + o->g_in2 = false; + return DISAS_NEXT; +} + +static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int b2 = get_field(s, b2); + TCGv ar1 = tcg_temp_new_i64(tcg_ctx); + + o->out = o->in2; + o->g_out = o->g_in2; + o->in2 = NULL; + o->g_in2 = false; + + switch (s->base.tb->flags & FLAG_MASK_ASC) { + case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: + tcg_gen_movi_i64(tcg_ctx, ar1, 0); + break; + case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT: + tcg_gen_movi_i64(tcg_ctx, ar1, 1); + break; + case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: + if (b2) { + tcg_gen_ld32u_i64(tcg_ctx, ar1, tcg_ctx->cpu_env, offsetof(CPUS390XState, aregs[b2])); + } else { + tcg_gen_movi_i64(tcg_ctx, ar1, 0); + } + break; + case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: + tcg_gen_movi_i64(tcg_ctx, ar1, 2); + break; + } + + tcg_gen_st32_i64(tcg_ctx, ar1, tcg_ctx->cpu_env, offsetof(CPUS390XState, aregs[1])); + tcg_temp_free_i64(tcg_ctx, ar1); + + return DISAS_NEXT; +} + +static DisasJumpType op_movx(DisasContext *s, DisasOps *o) +{ + o->out = o->in1; + o->out2 = o->in2; + o->g_out = o->g_in1; + o->g_out2 = o->g_in2; + o->in1 = NULL; + o->in2 = NULL; + o->g_in1 = o->g_in2 = false; + return DISAS_NEXT; +} + +static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_mvc(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_mvcin(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r2 = get_field(s, r2); + TCGv_i32 t1, t2; + + /* r1 and r2 must be even. */ + if (r1 & 1 || r2 & 1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t1 = tcg_const_i32(tcg_ctx, r1); + t2 = tcg_const_i32(tcg_ctx, r2); + gen_helper_mvcl(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, t2); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + TCGv_i32 t1, t3; + + /* r1 and r3 must be even. */ + if (r1 & 1 || r3 & 1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t1 = tcg_const_i32(tcg_ctx, r1); + t3 = tcg_const_i32(tcg_ctx, r3); + gen_helper_mvcle(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t3); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + TCGv_i32 t1, t3; + + /* r1 and r3 must be even. */ + if (r1 & 1 || r3 & 1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t1 = tcg_const_i32(tcg_ctx, r1); + t3 = tcg_const_i32(tcg_ctx, r3); + gen_helper_mvclu(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t3); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r3 = get_field(s, r3); + gen_helper_mvcos(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, o->in2, regs[r3]); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, l1); + gen_helper_mvcp(tcg_ctx, cc_op, tcg_ctx->cpu_env, regs[r1], o->addr1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, l1); + gen_helper_mvcs(tcg_ctx, cc_op, tcg_ctx->cpu_env, regs[r1], o->addr1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mvn(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_mvn(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_mvo(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_mvpg(tcg_ctx, cc_op, tcg_ctx->cpu_env, regs[0], o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 t2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); + + gen_helper_mvst(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, t2); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mvz(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_mvz(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_mul(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mul_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_mul128(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mulu2_i64(tcg_ctx, o->out2, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_meeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_meeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_mdeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_mdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_mdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_mxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_mxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_mxdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in2); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 r3 = load_freg32_i64(tcg_ctx, get_field(s, r3)); + gen_helper_maeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, r3); + tcg_temp_free_i64(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_madb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 r3 = load_freg(tcg_ctx, get_field(s, r3)); + gen_helper_madb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, r3); + tcg_temp_free_i64(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 r3 = load_freg32_i64(tcg_ctx, get_field(s, r3)); + gen_helper_mseb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, r3); + tcg_temp_free_i64(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 r3 = load_freg(tcg_ctx, get_field(s, r3)); + gen_helper_msdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2, r3); + tcg_temp_free_i64(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_nabs(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 z, n; + z = tcg_const_i64(tcg_ctx, 0); + n = tcg_temp_new_i64(tcg_ctx); + tcg_gen_neg_i64(tcg_ctx, n, o->in2); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, o->out, o->in2, z, n, o->in2); + tcg_temp_free_i64(tcg_ctx, n); + tcg_temp_free_i64(tcg_ctx, z); + return DISAS_NEXT; +} + +static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ori_i64(tcg_ctx, o->out, o->in2, 0x80000000ull); + return DISAS_NEXT; +} + +static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ori_i64(tcg_ctx, o->out, o->in2, 0x8000000000000000ull); + return DISAS_NEXT; +} + +static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ori_i64(tcg_ctx, o->out, o->in1, 0x8000000000000000ull); + tcg_gen_mov_i64(tcg_ctx, o->out2, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_nc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_nc(tcg_ctx, cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_neg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_neg_i64(tcg_ctx, o->out, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_negf32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_xori_i64(tcg_ctx, o->out, o->in2, 0x80000000ull); + return DISAS_NEXT; +} + +static DisasJumpType op_negf64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_xori_i64(tcg_ctx, o->out, o->in2, 0x8000000000000000ull); + return DISAS_NEXT; +} + +static DisasJumpType op_negf128(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_xori_i64(tcg_ctx, o->out, o->in1, 0x8000000000000000ull); + tcg_gen_mov_i64(tcg_ctx, o->out2, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_oc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_oc(tcg_ctx, cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_or(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_ori(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int shift = s->insn->data & 0xff; + int size = s->insn->data >> 8; + uint64_t mask = ((1ull << size) - 1) << shift; + + assert(!o->g_in2); + tcg_gen_shli_i64(tcg_ctx, o->in2, o->in2, shift); + tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); + + /* Produce the CC from only the bits manipulated. */ + tcg_gen_andi_i64(tcg_ctx, cc_dst, o->out, mask); + set_cc_nz_u64(s, cc_dst); + return DISAS_NEXT; +} + +static DisasJumpType op_oi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + + if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { + tcg_gen_qemu_ld_tl(tcg_ctx, o->in1, o->addr1, get_mem_index(s), s->insn->data); + } else { + /* Perform the atomic operation in memory. */ + tcg_gen_atomic_fetch_or_i64(tcg_ctx, o->in1, o->addr1, o->in2, get_mem_index(s), + s->insn->data); + } + + /* Recompute also for atomic case: needed for setting CC. */ + tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); + + if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { + tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), s->insn->data); + } + return DISAS_NEXT; +} + +static DisasJumpType op_pack(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_pack(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_pka(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int l2 = get_field(s, l2) + 1; + TCGv_i32 l; + + /* The length must not exceed 32 bytes. */ + if (l2 > 32) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + l = tcg_const_i32(tcg_ctx, l2); + gen_helper_pka(tcg_ctx, tcg_ctx->cpu_env, o->addr1, o->in2, l); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_pku(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int l2 = get_field(s, l2) + 1; + TCGv_i32 l; + + /* The length must be even and should not exceed 64 bytes. */ + if ((l2 & 1) || (l2 > 64)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + l = tcg_const_i32(tcg_ctx, l2); + gen_helper_pku(tcg_ctx, tcg_ctx->cpu_env, o->addr1, o->in2, l); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_popcnt(tcg_ctx, o->out, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_ptlb(tcg_ctx, tcg_ctx->cpu_env); + return DISAS_NEXT; +} + +static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int i3 = get_field(s, i3); + int i4 = get_field(s, i4); + int i5 = get_field(s, i5); + int do_zero = i4 & 0x80; + uint64_t mask, imask, pmask; + int pos, len, rot; + + /* Adjust the arguments for the specific insn. */ + switch (s->fields.op2) { + case 0x55: /* risbg */ + case 0x59: /* risbgn */ + i3 &= 63; + i4 &= 63; + pmask = ~0; + break; + case 0x5d: /* risbhg */ + i3 &= 31; + i4 &= 31; + pmask = 0xffffffff00000000ull; + break; + case 0x51: /* risblg */ + i3 &= 31; + i4 &= 31; + pmask = 0x00000000ffffffffull; + break; + default: + // g_assert_not_reached(); + break; + } + + /* MASK is the set of bits to be inserted from R2. + Take care for I3/I4 wraparound. */ + mask = pmask >> i3; + if (i3 <= i4) { + mask ^= pmask >> i4 >> 1; + } else { + mask |= ~(pmask >> i4 >> 1); + } + mask &= pmask; + + /* IMASK is the set of bits to be kept from R1. In the case of the high/low + insns, we need to keep the other half of the register. */ + imask = ~mask | ~pmask; + if (do_zero) { + imask = ~pmask; + } + + len = i4 - i3 + 1; + pos = 63 - i4; + rot = i5 & 63; + if (s->fields.op2 == 0x5d) { + pos += 32; + } + + /* In some cases we can implement this with extract. */ + if (imask == 0 && pos == 0 && len > 0 && len <= rot) { + tcg_gen_extract_i64(tcg_ctx, o->out, o->in2, 64 - rot, len); + return DISAS_NEXT; + } + + /* In some cases we can implement this with deposit. */ + if (len > 0 && (imask == 0 || ~mask == imask)) { + /* Note that we rotate the bits to be inserted to the lsb, not to + the position as described in the PoO. */ + rot = (rot - pos) & 63; + } else { + pos = -1; + } + + /* Rotate the input as necessary. */ + tcg_gen_rotli_i64(tcg_ctx, o->in2, o->in2, rot); + + /* Insert the selected bits into the output. */ + if (pos >= 0) { + if (imask == 0) { + tcg_gen_deposit_z_i64(tcg_ctx, o->out, o->in2, pos, len); + } else { + tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, o->in2, pos, len); + } + } else if (imask == 0) { + tcg_gen_andi_i64(tcg_ctx, o->out, o->in2, mask); + } else { + tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, mask); + tcg_gen_andi_i64(tcg_ctx, o->out, o->out, imask); + tcg_gen_or_i64(tcg_ctx, o->out, o->out, o->in2); + } + return DISAS_NEXT; +} + +static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int i3 = get_field(s, i3); + int i4 = get_field(s, i4); + int i5 = get_field(s, i5); + uint64_t mask; + + /* If this is a test-only form, arrange to discard the result. */ + if (i3 & 0x80) { + o->out = tcg_temp_new_i64(tcg_ctx); + o->g_out = false; + } + + i3 &= 63; + i4 &= 63; + i5 &= 63; + + /* MASK is the set of bits to be operated on from R2. + Take care for I3/I4 wraparound. */ + mask = ~0ull >> i3; + if (i3 <= i4) { + mask ^= ~0ull >> i4 >> 1; + } else { + mask |= ~(~0ull >> i4 >> 1); + } + + /* Rotate the input as necessary. */ + tcg_gen_rotli_i64(tcg_ctx, o->in2, o->in2, i5); + + /* Operate. */ + switch (s->fields.op2) { + case 0x54: /* AND */ + tcg_gen_ori_i64(tcg_ctx, o->in2, o->in2, ~mask); + tcg_gen_and_i64(tcg_ctx, o->out, o->out, o->in2); + break; + case 0x56: /* OR */ + tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, mask); + tcg_gen_or_i64(tcg_ctx, o->out, o->out, o->in2); + break; + case 0x57: /* XOR */ + tcg_gen_andi_i64(tcg_ctx, o->in2, o->in2, mask); + tcg_gen_xor_i64(tcg_ctx, o->out, o->out, o->in2); + break; + default: + abort(); + } + + /* Set the CC. */ + tcg_gen_andi_i64(tcg_ctx, cc_dst, o->out, mask); + set_cc_nz_u64(s, cc_dst); + return DISAS_NEXT; +} + +static DisasJumpType op_rev16(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_bswap16_i64(tcg_ctx, o->out, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_rev32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_bswap32_i64(tcg_ctx, o->out, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_rev64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_bswap64_i64(tcg_ctx, o->out, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_rll32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 to = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, t1, o->in1); + tcg_gen_extrl_i64_i32(tcg_ctx, t2, o->in2); + tcg_gen_rotl_i32(tcg_ctx, to, t1, t2); + tcg_gen_extu_i32_i64(tcg_ctx, o->out, to); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, to); + return DISAS_NEXT; +} + +static DisasJumpType op_rll64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_rotl_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_rrbe(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sacf(tcg_ctx, tcg_ctx->cpu_env, o->in2); + /* Addressing mode has changed, so end the block. */ + return DISAS_PC_STALE; +} + +static DisasJumpType op_sam(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int sam = s->insn->data; + TCGv_i64 tsam; + uint64_t mask; + + switch (sam) { + case 0: + mask = 0xffffff; + break; + case 1: + mask = 0x7fffffff; + break; + default: + mask = -1; + break; + } + + /* Bizarre but true, we check the address of the current insn for the + specification exception, not the next to be executed. Thus the PoO + documents that Bad Things Happen two bytes before the end. */ + if (s->base.pc_next & ~mask) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + s->pc_tmp &= mask; + + tsam = tcg_const_i64(tcg_ctx, sam); + tcg_gen_deposit_i64(tcg_ctx, psw_mask, psw_mask, tsam, 31, 2); + tcg_temp_free_i64(tcg_ctx, tsam); + + /* Always exit the TB, since we (may have) changed execution mode. */ + return DISAS_PC_STALE; +} + +static DisasJumpType op_sar(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + tcg_gen_st32_i64(tcg_ctx, o->in2, tcg_ctx->cpu_env, offsetof(CPUS390XState, aregs[r1])); + return DISAS_NEXT; +} + +static DisasJumpType op_seb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_seb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_sdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_sxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sqeb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sqdb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sqxb(tcg_ctx, o->out, tcg_ctx->cpu_env, o->in1, o->in2); + return_low128(tcg_ctx, o->out2); + return DISAS_NEXT; +} + +static DisasJumpType op_servc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_servc(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2, o->in1); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + gen_helper_sigp(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2, r1, r3); + set_cc_static(s); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_soc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + DisasCompare c; + TCGv_i64 a, h; + TCGLabel *lab; + int r1; + + disas_jcc(s, &c, get_field(s, m3)); + + /* We want to store when the condition is fulfilled, so branch + out when it's not */ + c.cond = tcg_invert_cond(c.cond); + + lab = gen_new_label(tcg_ctx); + if (c.is_64) { + tcg_gen_brcond_i64(tcg_ctx, c.cond, c.u.s64.a, c.u.s64.b, lab); + } else { + tcg_gen_brcond_i32(tcg_ctx, c.cond, c.u.s32.a, c.u.s32.b, lab); + } + free_compare(tcg_ctx, &c); + + r1 = get_field(s, r1); + a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); + switch (s->insn->data) { + case 1: /* STOCG */ + tcg_gen_qemu_st64(tcg_ctx, regs[r1], a, get_mem_index(s)); + break; + case 0: /* STOC */ + tcg_gen_qemu_st32(tcg_ctx, regs[r1], a, get_mem_index(s)); + break; + case 2: /* STOCFH */ + h = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, h, regs[r1], 32); + tcg_gen_qemu_st32(tcg_ctx, h, a, get_mem_index(s)); + tcg_temp_free_i64(tcg_ctx, h); + break; + default: + // g_assert_not_reached(); + break; + } + tcg_temp_free_i64(tcg_ctx, a); + + gen_set_label(tcg_ctx, lab); + return DISAS_NEXT; +} + +static DisasJumpType op_sla(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint64_t sign = 1ull << s->insn->data; + enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64; + gen_op_update2_cc_i64(s, cco, o->in1, o->in2); + tcg_gen_shl_i64(tcg_ctx, o->out, o->in1, o->in2); + /* The arithmetic left shift is curious in that it does not affect + the sign bit. Copy that over from the source unchanged. */ + tcg_gen_andi_i64(tcg_ctx, o->out, o->out, ~sign); + tcg_gen_andi_i64(tcg_ctx, o->in1, o->in1, sign); + tcg_gen_or_i64(tcg_ctx, o->out, o->out, o->in1); + return DISAS_NEXT; +} + +static DisasJumpType op_sll(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_shl_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_sra(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_sar_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_srl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_shr_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sfpc(tcg_ctx, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_sfas(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sfas(tcg_ctx, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_srnm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */ + tcg_gen_andi_i64(tcg_ctx, o->addr1, o->addr1, 0x3ull); + gen_helper_srnm(tcg_ctx, tcg_ctx->cpu_env, o->addr1); + return DISAS_NEXT; +} + +static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Bits 0-55 are are ignored. */ + tcg_gen_andi_i64(tcg_ctx, o->addr1, o->addr1, 0xffull); + gen_helper_srnm(tcg_ctx, tcg_ctx->cpu_env, o->addr1); + return DISAS_NEXT; +} + +static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + /* Bits other than 61-63 are ignored. */ + tcg_gen_andi_i64(tcg_ctx, o->addr1, o->addr1, 0x7ull); + + /* No need to call a helper, we don't implement dfp */ + tcg_gen_ld32u_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUS390XState, fpc)); + tcg_gen_deposit_i64(tcg_ctx, tmp, tmp, o->addr1, 4, 3); + tcg_gen_st32_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUS390XState, fpc)); + + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_spm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_extrl_i64_i32(tcg_ctx, cc_op, o->in1); + tcg_gen_extract_i32(tcg_ctx, cc_op, cc_op, 28, 2); + set_cc_static(s); + + tcg_gen_shri_i64(tcg_ctx, o->in1, o->in1, 24); + tcg_gen_deposit_i64(tcg_ctx, psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4); + return DISAS_NEXT; +} + +static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int b1 = get_field(s, b1); + int d1 = get_field(s, d1); + int b2 = get_field(s, b2); + int d2 = get_field(s, d2); + int r3 = get_field(s, r3); + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + /* fetch all operands first */ + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_addi_i64(tcg_ctx, o->in1, regs[b1], d1); + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_addi_i64(tcg_ctx, o->in2, regs[b2], d2); + o->addr1 = get_address(s, 0, r3, 0); + + /* load the third operand into r3 before modifying anything */ + tcg_gen_qemu_ld64(tcg_ctx, regs[r3], o->addr1, get_mem_index(s)); + + /* subtract CPU timer from first operand and store in GR0 */ + gen_helper_stpt(tcg_ctx, tmp, tcg_ctx->cpu_env); + tcg_gen_sub_i64(tcg_ctx, regs[0], o->in1, tmp); + + /* store second operand in GR1 */ + tcg_gen_mov_i64(tcg_ctx, regs[1], o->in2); + + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_spka(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_shri_i64(tcg_ctx, o->in2, o->in2, 4); + tcg_gen_deposit_i64(tcg_ctx, psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4); + return DISAS_NEXT; +} + +static DisasJumpType op_sske(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sske(tcg_ctx, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_deposit_i64(tcg_ctx, psw_mask, psw_mask, o->in2, 56, 8); + /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ + return DISAS_PC_STALE_NOCHAIN; +} + +static DisasJumpType op_stap(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ld32u_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, core_id)); + return DISAS_NEXT; +} + +static DisasJumpType op_stck(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_stck(tcg_ctx, o->out, tcg_ctx->cpu_env); + /* ??? We don't implement clock states. */ + gen_op_movi_cc(s, 0); + return DISAS_NEXT; +} + +static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 c1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 c2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 todpr = tcg_temp_new_i64(tcg_ctx); + gen_helper_stck(tcg_ctx, c1, tcg_ctx->cpu_env); + /* 16 bit value store in an uint32_t (only valid bits set) */ + tcg_gen_ld32u_i64(tcg_ctx, todpr, tcg_ctx->cpu_env, offsetof(CPUS390XState, todpr)); + /* Shift the 64-bit value into its place as a zero-extended + 104-bit value. Note that "bit positions 64-103 are always + non-zero so that they compare differently to STCK"; we set + the least significant bit to 1. */ + tcg_gen_shli_i64(tcg_ctx, c2, c1, 56); + tcg_gen_shri_i64(tcg_ctx, c1, c1, 8); + tcg_gen_ori_i64(tcg_ctx, c2, c2, 0x10000); + tcg_gen_or_i64(tcg_ctx, c2, c2, todpr); + tcg_gen_qemu_st64(tcg_ctx, c1, o->in2, get_mem_index(s)); + tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 8); + tcg_gen_qemu_st64(tcg_ctx, c2, o->in2, get_mem_index(s)); + tcg_temp_free_i64(tcg_ctx, c1); + tcg_temp_free_i64(tcg_ctx, c2); + tcg_temp_free_i64(tcg_ctx, todpr); + /* ??? We don't implement clock states. */ + gen_op_movi_cc(s, 0); + return DISAS_NEXT; +} + +static DisasJumpType op_sck(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_ld_i64(tcg_ctx, o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); + gen_helper_sck(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_sckc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sckc(tcg_ctx, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sckpf(tcg_ctx, tcg_ctx->cpu_env, regs[0]); + return DISAS_NEXT; +} + +static DisasJumpType op_stckc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_stckc(tcg_ctx, o->out, tcg_ctx->cpu_env); + return DISAS_NEXT; +} + +static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + gen_helper_stctg(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + gen_helper_stctl(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_stidp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ld_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, cpuid)); + return DISAS_NEXT; +} + +static DisasJumpType op_spt(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_spt(tcg_ctx, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_stfl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_stfl(tcg_ctx, tcg_ctx->cpu_env); + return DISAS_NEXT; +} + +static DisasJumpType op_stpt(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_stpt(tcg_ctx, o->out, tcg_ctx->cpu_env); + return DISAS_NEXT; +} + +static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_stsi(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2, regs[0], regs[1]); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_spx(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_spx(tcg_ctx, tcg_ctx->cpu_env, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_xsch(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_csch(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_csch(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_hsch(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_msch(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_msch(tcg_ctx, tcg_ctx->cpu_env, regs[1], o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_rchp(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_rsch(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_sal(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sal(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + return DISAS_NEXT; +} + +static DisasJumpType op_schm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_schm(tcg_ctx, tcg_ctx->cpu_env, regs[1], regs[2], o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_siga(DisasContext *s, DisasOps *o) +{ + /* From KVM code: Not provided, set CC = 3 for subchannel not operational */ + gen_op_movi_cc(s, 3); + return DISAS_NEXT; +} + +static DisasJumpType op_stcps(DisasContext *s, DisasOps *o) +{ + /* The instruction is suppressed if not provided. */ + return DISAS_NEXT; +} + +static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_ssch(tcg_ctx, tcg_ctx->cpu_env, regs[1], o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_stsch(tcg_ctx, tcg_ctx->cpu_env, regs[1], o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_stcrw(tcg_ctx, tcg_ctx->cpu_env, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_tpi(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_tsch(tcg_ctx, tcg_ctx->cpu_env, regs[1], o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_chsc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_chsc(tcg_ctx, tcg_ctx->cpu_env, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_stpx(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ld_i64(tcg_ctx, o->out, tcg_ctx->cpu_env, offsetof(CPUS390XState, psa)); + tcg_gen_andi_i64(tcg_ctx, o->out, o->out, 0x7fffe000); + return DISAS_NEXT; +} + +static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint64_t i2 = get_field(s, i2); + TCGv_i64 t; + + /* It is important to do what the instruction name says: STORE THEN. + If we let the output hook perform the store then if we fault and + restart, we'll have the wrong SYSTEM MASK in place. */ + t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, t, psw_mask, 56); + tcg_gen_qemu_st8(tcg_ctx, t, o->addr1, get_mem_index(s)); + tcg_temp_free_i64(tcg_ctx, t); + + if (s->fields.op == 0xac) { + tcg_gen_andi_i64(tcg_ctx, psw_mask, psw_mask, + (i2 << 56) | 0x00ffffffffffffffull); + } else { + tcg_gen_ori_i64(tcg_ctx, psw_mask, psw_mask, i2 << 56); + } + + /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ + return DISAS_PC_STALE_NOCHAIN; +} + +static DisasJumpType op_stura(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->addr1 = get_address(s, 0, get_field(s, r2), 0); + tcg_gen_qemu_st_tl(tcg_ctx, o->in1, o->addr1, MMU_REAL_IDX, s->insn->data); + + if (s->base.tb->flags & FLAG_MASK_PER) { + update_psw_addr(s); + gen_helper_per_store_real(tcg_ctx, tcg_ctx->cpu_env); + } + return DISAS_NEXT; +} + +static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_stfle(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_st8(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st8(tcg_ctx, o->in1, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_st16(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st16(tcg_ctx, o->in1, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_st32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st32(tcg_ctx, o->in1, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_st64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st64(tcg_ctx, o->in1, o->in2, get_mem_index(s)); + return DISAS_NEXT; +} + +static DisasJumpType op_stam(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + gen_helper_stam(tcg_ctx, tcg_ctx->cpu_env, r1, o->in2, r3); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r3); + return DISAS_NEXT; +} + +static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int m3 = get_field(s, m3); + int pos, base = s->insn->data; + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + pos = base + ctz32(m3) * 8; + switch (m3) { + case 0xf: + /* Effectively a 32-bit store. */ + tcg_gen_shri_i64(tcg_ctx, tmp, o->in1, pos); + tcg_gen_qemu_st32(tcg_ctx, tmp, o->in2, get_mem_index(s)); + break; + + case 0xc: + case 0x6: + case 0x3: + /* Effectively a 16-bit store. */ + tcg_gen_shri_i64(tcg_ctx, tmp, o->in1, pos); + tcg_gen_qemu_st16(tcg_ctx, tmp, o->in2, get_mem_index(s)); + break; + + case 0x8: + case 0x4: + case 0x2: + case 0x1: + /* Effectively an 8-bit store. */ + tcg_gen_shri_i64(tcg_ctx, tmp, o->in1, pos); + tcg_gen_qemu_st8(tcg_ctx, tmp, o->in2, get_mem_index(s)); + break; + + default: + /* This is going to be a sequence of shifts and stores. */ + pos = base + 32 - 8; + while (m3) { + if (m3 & 0x8) { + tcg_gen_shri_i64(tcg_ctx, tmp, o->in1, pos); + tcg_gen_qemu_st8(tcg_ctx, tmp, o->in2, get_mem_index(s)); + tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 1); + } + m3 = (m3 << 1) & 0xf; + pos -= 8; + } + break; + } + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_stm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + int size = s->insn->data; + TCGv_i64 tsize = tcg_const_i64(tcg_ctx, size); + + while (1) { + if (size == 8) { + tcg_gen_qemu_st64(tcg_ctx, regs[r1], o->in2, get_mem_index(s)); + } else { + tcg_gen_qemu_st32(tcg_ctx, regs[r1], o->in2, get_mem_index(s)); + } + if (r1 == r3) { + break; + } + tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, tsize); + r1 = (r1 + 1) & 15; + } + + tcg_temp_free_i64(tcg_ctx, tsize); + return DISAS_NEXT; +} + +static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + int r3 = get_field(s, r3); + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t4 = tcg_const_i64(tcg_ctx, 4); + TCGv_i64 t32 = tcg_const_i64(tcg_ctx, 32); + + while (1) { + tcg_gen_shl_i64(tcg_ctx, t, regs[r1], t32); + tcg_gen_qemu_st32(tcg_ctx, t, o->in2, get_mem_index(s)); + if (r1 == r3) { + break; + } + tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t4); + r1 = (r1 + 1) & 15; + } + + tcg_temp_free_i64(tcg_ctx, t); + tcg_temp_free_i64(tcg_ctx, t4); + tcg_temp_free_i64(tcg_ctx, t32); + return DISAS_NEXT; +} + +static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_stpq(tcg_ctx, tcg_ctx->cpu_env, o->in2, o->out2, o->out); + } else if (HAVE_ATOMIC128) { + gen_helper_stpq_parallel(tcg_ctx, tcg_ctx->cpu_env, o->in2, o->out2, o->out); + } else { + gen_helper_exit_atomic(tcg_ctx, tcg_ctx->cpu_env); + return DISAS_NORETURN; + } + return DISAS_NEXT; +} + +static DisasJumpType op_srst(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); + + gen_helper_srst(tcg_ctx, tcg_ctx->cpu_env, r1, r2); + + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_srstu(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); + + gen_helper_srstu(tcg_ctx, tcg_ctx->cpu_env, r1, r2); + + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_sub(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_sub_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_subb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + DisasCompare cmp; + TCGv_i64 borrow; + + tcg_gen_sub_i64(tcg_ctx, o->out, o->in1, o->in2); + + /* The !borrow flag is the msb of CC. Since we want the inverse of + that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */ + disas_jcc(s, &cmp, 8 | 4); + borrow = tcg_temp_new_i64(tcg_ctx); + if (cmp.is_64) { + tcg_gen_setcond_i64(tcg_ctx, cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b); + } else { + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + tcg_gen_setcond_i32(tcg_ctx, cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b); + tcg_gen_extu_i32_i64(tcg_ctx, borrow, t); + tcg_temp_free_i32(tcg_ctx, t); + } + free_compare(tcg_ctx, &cmp); + + tcg_gen_sub_i64(tcg_ctx, o->out, o->out, borrow); + tcg_temp_free_i64(tcg_ctx, borrow); + return DISAS_NEXT; +} + +static DisasJumpType op_svc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t; + + update_psw_addr(s); + update_cc_op(s); + + t = tcg_const_i32(tcg_ctx, get_field(s, i1) & 0xff); + tcg_gen_st_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUS390XState, int_svc_code)); + tcg_temp_free_i32(tcg_ctx, t); + + t = tcg_const_i32(tcg_ctx, s->ilen); + tcg_gen_st_i32(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUS390XState, int_svc_ilen)); + tcg_temp_free_i32(tcg_ctx, t); + + gen_exception(tcg_ctx, EXCP_SVC); + return DISAS_NORETURN; +} + +static DisasJumpType op_tam(DisasContext *s, DisasOps *o) +{ + int cc = 0; + + cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0; + cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0; + gen_op_movi_cc(s, cc); + return DISAS_NEXT; +} + +static DisasJumpType op_tceb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_tceb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_tcdb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_tcxb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_testblock(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_tprot(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_tprot(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, o->in2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_tp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l1 = tcg_const_i32(tcg_ctx, get_field(s, l1) + 1); + gen_helper_tp(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, l1); + tcg_temp_free_i32(tcg_ctx, l1); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_tr(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_tr(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_tre(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_tre(tcg_ctx, o->out, tcg_ctx->cpu_env, o->out, o->out2, o->in2); + return_low128(tcg_ctx, o->out2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_trt(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_trt(tcg_ctx, cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_trtr(tcg_ctx, cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); + TCGv_i32 sizes = tcg_const_i32(tcg_ctx, s->insn->opc & 3); + TCGv_i32 tst = tcg_temp_new_i32(tcg_ctx); + int m3 = get_field(s, m3); + + if (!s390_has_feat(s->uc, S390_FEAT_ETF2_ENH)) { + m3 = 0; + } + if (m3 & 1) { + tcg_gen_movi_i32(tcg_ctx, tst, -1); + } else { + tcg_gen_extrl_i64_i32(tcg_ctx, tst, regs[0]); + if (s->insn->opc & 3) { + tcg_gen_ext8u_i32(tcg_ctx, tst, tst); + } else { + tcg_gen_ext16u_i32(tcg_ctx, tst, tst); + } + } + gen_helper_trXX(tcg_ctx, cc_op, tcg_ctx->cpu_env, r1, r2, tst, sizes); + + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r2); + tcg_temp_free_i32(tcg_ctx, sizes); + tcg_temp_free_i32(tcg_ctx, tst); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_ts(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0xff); + tcg_gen_atomic_xchg_i32(tcg_ctx, t1, o->in2, t1, get_mem_index(s), MO_UB); + tcg_gen_extract_i32(tcg_ctx, cc_op, t1, 7, 1); + tcg_temp_free_i32(tcg_ctx, t1); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_unpk(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); + gen_helper_unpk(tcg_ctx, tcg_ctx->cpu_env, l, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + return DISAS_NEXT; +} + +static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int l1 = get_field(s, l1) + 1; + TCGv_i32 l; + + /* The length must not exceed 32 bytes. */ + if (l1 > 32) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + l = tcg_const_i32(tcg_ctx, l1); + gen_helper_unpka(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, l, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int l1 = get_field(s, l1) + 1; + TCGv_i32 l; + + /* The length must be even and should not exceed 64 bytes. */ + if ((l1 & 1) || (l1 > 64)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + l = tcg_const_i32(tcg_ctx, l1); + gen_helper_unpku(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, l, o->in2); + tcg_temp_free_i32(tcg_ctx, l); + set_cc_static(s); + return DISAS_NEXT; +} + + +static DisasJumpType op_xc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int d1 = get_field(s, d1); + int d2 = get_field(s, d2); + int b1 = get_field(s, b1); + int b2 = get_field(s, b2); + int l = get_field(s, l1); + TCGv_i32 t32; + + o->addr1 = get_address(s, 0, b1, d1); + + /* If the addresses are identical, this is a store/memset of zero. */ + if (b1 == b2 && d1 == d2 && (l + 1) <= 32) { + o->in2 = tcg_const_i64(tcg_ctx, 0); + + l++; + while (l >= 8) { + tcg_gen_qemu_st64(tcg_ctx, o->in2, o->addr1, get_mem_index(s)); + l -= 8; + if (l > 0) { + tcg_gen_addi_i64(tcg_ctx, o->addr1, o->addr1, 8); + } + } + if (l >= 4) { + tcg_gen_qemu_st32(tcg_ctx, o->in2, o->addr1, get_mem_index(s)); + l -= 4; + if (l > 0) { + tcg_gen_addi_i64(tcg_ctx, o->addr1, o->addr1, 4); + } + } + if (l >= 2) { + tcg_gen_qemu_st16(tcg_ctx, o->in2, o->addr1, get_mem_index(s)); + l -= 2; + if (l > 0) { + tcg_gen_addi_i64(tcg_ctx, o->addr1, o->addr1, 2); + } + } + if (l) { + tcg_gen_qemu_st8(tcg_ctx, o->in2, o->addr1, get_mem_index(s)); + } + gen_op_movi_cc(s, 0); + return DISAS_NEXT; + } + + /* But in general we'll defer to a helper. */ + o->in2 = get_address(s, 0, b2, d2); + t32 = tcg_const_i32(tcg_ctx, l); + gen_helper_xc(tcg_ctx, cc_op, tcg_ctx->cpu_env, t32, o->addr1, o->in2); + tcg_temp_free_i32(tcg_ctx, t32); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_xor(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_xori(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int shift = s->insn->data & 0xff; + int size = s->insn->data >> 8; + uint64_t mask = ((1ull << size) - 1) << shift; + + assert(!o->g_in2); + tcg_gen_shli_i64(tcg_ctx, o->in2, o->in2, shift); + tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); + + /* Produce the CC from only the bits manipulated. */ + tcg_gen_andi_i64(tcg_ctx, cc_dst, o->out, mask); + set_cc_nz_u64(s, cc_dst); + return DISAS_NEXT; +} + +static DisasJumpType op_xi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + + if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { + tcg_gen_qemu_ld_tl(tcg_ctx, o->in1, o->addr1, get_mem_index(s), s->insn->data); + } else { + /* Perform the atomic operation in memory. */ + tcg_gen_atomic_fetch_xor_i64(tcg_ctx, o->in1, o->addr1, o->in2, get_mem_index(s), + s->insn->data); + } + + /* Recompute also for atomic case: needed for setting CC. */ + tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); + + if (!s390_has_feat(s->uc, S390_FEAT_INTERLOCKED_ACCESS_2)) { + tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), s->insn->data); + } + return DISAS_NEXT; +} + +static DisasJumpType op_zero(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->out = tcg_const_i64(tcg_ctx, 0); + return DISAS_NEXT; +} + +static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->out = tcg_const_i64(tcg_ctx, 0); + o->out2 = o->out; + o->g_out2 = true; + return DISAS_NEXT; +} + +static DisasJumpType op_clp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); + + gen_helper_clp(tcg_ctx, tcg_ctx->cpu_env, r2); + tcg_temp_free_i32(tcg_ctx, r2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); + + gen_helper_pcilg(tcg_ctx, tcg_ctx->cpu_env, r1, r2); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); + + gen_helper_pcistg(tcg_ctx, tcg_ctx->cpu_env, r1, r2); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 ar = tcg_const_i32(tcg_ctx, get_field(s, b2)); + + gen_helper_stpcifc(tcg_ctx, tcg_ctx->cpu_env, r1, o->addr1, ar); + tcg_temp_free_i32(tcg_ctx, ar); + tcg_temp_free_i32(tcg_ctx, r1); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_sic(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_helper_sic(tcg_ctx, tcg_ctx->cpu_env, o->in1, o->in2); + return DISAS_NEXT; +} + +static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); + + gen_helper_rpcit(tcg_ctx, tcg_ctx->cpu_env, r1, r2); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r2); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); + TCGv_i32 ar = tcg_const_i32(tcg_ctx, get_field(s, b2)); + + gen_helper_pcistb(tcg_ctx, tcg_ctx->cpu_env, r1, r3, o->addr1, ar); + tcg_temp_free_i32(tcg_ctx, ar); + tcg_temp_free_i32(tcg_ctx, r1); + tcg_temp_free_i32(tcg_ctx, r3); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); + TCGv_i32 ar = tcg_const_i32(tcg_ctx, get_field(s, b2)); + + gen_helper_mpcifc(tcg_ctx, tcg_ctx->cpu_env, r1, o->addr1, ar); + tcg_temp_free_i32(tcg_ctx, ar); + tcg_temp_free_i32(tcg_ctx, r1); + set_cc_static(s); + return DISAS_NEXT; +} + +#include "translate_vx.inc.c" + +/* ====================================================================== */ +/* The "Cc OUTput" generators. Given the generated output (and in some cases + the original inputs), update the various cc data structures in order to + be able to compute the new condition code. */ + +static void cout_abs32(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out); +} + +static void cout_abs64(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out); +} + +static void cout_adds32(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out); +} + +static void cout_adds64(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out); +} + +static void cout_addu32(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out); +} + +static void cout_addu64(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out); +} + +static void cout_addc32(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out); +} + +static void cout_addc64(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out); +} + +static void cout_cmps32(DisasContext *s, DisasOps *o) +{ + gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2); +} + +static void cout_cmps64(DisasContext *s, DisasOps *o) +{ + gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2); +} + +static void cout_cmpu32(DisasContext *s, DisasOps *o) +{ + gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2); +} + +static void cout_cmpu64(DisasContext *s, DisasOps *o) +{ + gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2); +} + +static void cout_f32(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out); +} + +static void cout_f64(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out); +} + +static void cout_f128(DisasContext *s, DisasOps *o) +{ + gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2); +} + +static void cout_nabs32(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out); +} + +static void cout_nabs64(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out); +} + +static void cout_neg32(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out); +} + +static void cout_neg64(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out); +} + +static void cout_nz32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ext32u_i64(tcg_ctx, cc_dst, o->out); + gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst); +} + +static void cout_nz64(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_NZ, o->out); +} + +static void cout_s32(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out); +} + +static void cout_s64(DisasContext *s, DisasOps *o) +{ + gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out); +} + +static void cout_subs32(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out); +} + +static void cout_subs64(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out); +} + +static void cout_subu32(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out); +} + +static void cout_subu64(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out); +} + +static void cout_subb32(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out); +} + +static void cout_subb64(DisasContext *s, DisasOps *o) +{ + gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out); +} + +static void cout_tm32(DisasContext *s, DisasOps *o) +{ + gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2); +} + +static void cout_tm64(DisasContext *s, DisasOps *o) +{ + gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2); +} + +/* ====================================================================== */ +/* The "PREParation" generators. These initialize the DisasOps.OUT fields + with the TCG register to which we will write. Used in combination with + the "wout" generators, in some cases we need a new temporary, and in + some cases we can write to a TCG global. */ + +static void prep_new(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->out = tcg_temp_new_i64(tcg_ctx); +} +#define SPEC_prep_new 0 + +static void prep_new_P(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->out = tcg_temp_new_i64(tcg_ctx); + o->out2 = tcg_temp_new_i64(tcg_ctx); +} +#define SPEC_prep_new_P 0 + +static void prep_r1(DisasContext *s, DisasOps *o) +{ + o->out = regs[get_field(s, r1)]; + o->g_out = true; +} +#define SPEC_prep_r1 0 + +static void prep_r1_P(DisasContext *s, DisasOps *o) +{ + int r1 = get_field(s, r1); + o->out = regs[r1]; + o->out2 = regs[r1 + 1]; + o->g_out = o->g_out2 = true; +} +#define SPEC_prep_r1_P SPEC_r1_even + +/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */ +static void prep_x1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->out = load_freg(tcg_ctx, get_field(s, r1)); + o->out2 = load_freg(tcg_ctx, get_field(s, r1) + 2); +} +#define SPEC_prep_x1 SPEC_r1_f128 + +/* ====================================================================== */ +/* The "Write OUTput" generators. These generally perform some non-trivial + copy of data to TCG globals, or to main memory. The trivial cases are + generally handled by having a "prep" generator install the TCG global + as the destination of the operation. */ + +static void wout_r1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_reg(tcg_ctx, get_field(s, r1), o->out); +} +#define SPEC_wout_r1 0 + +static void wout_r1_8(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + tcg_gen_deposit_i64(tcg_ctx, regs[r1], regs[r1], o->out, 0, 8); +} +#define SPEC_wout_r1_8 0 + +static void wout_r1_16(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + tcg_gen_deposit_i64(tcg_ctx, regs[r1], regs[r1], o->out, 0, 16); +} +#define SPEC_wout_r1_16 0 + +static void wout_r1_32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_reg32_i64(tcg_ctx, get_field(s, r1), o->out); +} +#define SPEC_wout_r1_32 0 + +static void wout_r1_32h(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_reg32h_i64(tcg_ctx, get_field(s, r1), o->out); +} +#define SPEC_wout_r1_32h 0 + +static void wout_r1_P32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + store_reg32_i64(tcg_ctx, r1, o->out); + store_reg32_i64(tcg_ctx, r1 + 1, o->out2); +} +#define SPEC_wout_r1_P32 SPEC_r1_even + +static void wout_r1_D32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + store_reg32_i64(tcg_ctx, r1 + 1, o->out); + tcg_gen_shri_i64(tcg_ctx, o->out, o->out, 32); + store_reg32_i64(tcg_ctx, r1, o->out); +} +#define SPEC_wout_r1_D32 SPEC_r1_even + +static void wout_r3_P32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r3 = get_field(s, r3); + store_reg32_i64(tcg_ctx, r3, o->out); + store_reg32_i64(tcg_ctx, r3 + 1, o->out2); +} +#define SPEC_wout_r3_P32 SPEC_r3_even + +static void wout_r3_P64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r3 = get_field(s, r3); + store_reg(tcg_ctx, r3, o->out); + store_reg(tcg_ctx, r3 + 1, o->out2); +} +#define SPEC_wout_r3_P64 SPEC_r3_even + +static void wout_e1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_freg32_i64(tcg_ctx, get_field(s, r1), o->out); +} +#define SPEC_wout_e1 0 + +static void wout_f1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_freg(tcg_ctx, get_field(s, r1), o->out); +} +#define SPEC_wout_f1 0 + +static void wout_x1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int f1 = get_field(s, r1); + store_freg(tcg_ctx, f1, o->out); + store_freg(tcg_ctx, f1 + 2, o->out2); +} +#define SPEC_wout_x1 SPEC_r1_f128 + +static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o) +{ + if (get_field(s, r1) != get_field(s, r2)) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_reg32_i64(tcg_ctx, get_field(s, r1), o->out); + } +} +#define SPEC_wout_cond_r1r2_32 0 + +static void wout_cond_e1e2(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (get_field(s, r1) != get_field(s, r2)) { + store_freg32_i64(tcg_ctx, get_field(s, r1), o->out); + } +} +#define SPEC_wout_cond_e1e2 0 + +static void wout_m1_8(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st8(tcg_ctx, o->out, o->addr1, get_mem_index(s)); +} +#define SPEC_wout_m1_8 0 + +static void wout_m1_16(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st16(tcg_ctx, o->out, o->addr1, get_mem_index(s)); +} +#define SPEC_wout_m1_16 0 + +static void wout_m1_16a(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN); +} +#define SPEC_wout_m1_16a 0 + +static void wout_m1_32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st32(tcg_ctx, o->out, o->addr1, get_mem_index(s)); +} +#define SPEC_wout_m1_32 0 + +static void wout_m1_32a(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st_tl(tcg_ctx, o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN); +} +#define SPEC_wout_m1_32a 0 + +static void wout_m1_64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st64(tcg_ctx, o->out, o->addr1, get_mem_index(s)); +} +#define SPEC_wout_m1_64 0 + +static void wout_m1_64a(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st_i64(tcg_ctx, o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); +} +#define SPEC_wout_m1_64a 0 + +static void wout_m2_32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_qemu_st32(tcg_ctx, o->out, o->in2, get_mem_index(s)); +} +#define SPEC_wout_m2_32 0 + +static void wout_in2_r1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_reg(tcg_ctx, get_field(s, r1), o->in2); +} +#define SPEC_wout_in2_r1 0 + +static void wout_in2_r1_32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + store_reg32_i64(tcg_ctx, get_field(s, r1), o->in2); +} +#define SPEC_wout_in2_r1_32 0 + +/* ====================================================================== */ +/* The "INput 1" generators. These load the first operand to an insn. */ + +static void in1_r1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = load_reg(tcg_ctx, get_field(s, r1)); +} +#define SPEC_in1_r1 0 + +static void in1_r1_o(DisasContext *s, DisasOps *o) +{ + o->in1 = regs[get_field(s, r1)]; + o->g_in1 = true; +} +#define SPEC_in1_r1_o 0 + +static void in1_r1_32s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32s_i64(tcg_ctx, o->in1, regs[get_field(s, r1)]); +} +#define SPEC_in1_r1_32s 0 + +static void in1_r1_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32u_i64(tcg_ctx, o->in1, regs[get_field(s, r1)]); +} +#define SPEC_in1_r1_32u 0 + +static void in1_r1_sr32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, o->in1, regs[get_field(s, r1)], 32); +} +#define SPEC_in1_r1_sr32 0 + +static void in1_r1p1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = load_reg(tcg_ctx, get_field(s, r1) + 1); +} +#define SPEC_in1_r1p1 SPEC_r1_even + +static void in1_r1p1_32s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32s_i64(tcg_ctx, o->in1, regs[get_field(s, r1) + 1]); +} +#define SPEC_in1_r1p1_32s SPEC_r1_even + +static void in1_r1p1_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32u_i64(tcg_ctx, o->in1, regs[get_field(s, r1) + 1]); +} +#define SPEC_in1_r1p1_32u SPEC_r1_even + +static void in1_r1_D32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat32_i64(tcg_ctx, o->in1, regs[r1 + 1], regs[r1]); +} +#define SPEC_in1_r1_D32 SPEC_r1_even + +static void in1_r2(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = load_reg(tcg_ctx, get_field(s, r2)); +} +#define SPEC_in1_r2 0 + +static void in1_r2_sr32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, o->in1, regs[get_field(s, r2)], 32); +} +#define SPEC_in1_r2_sr32 0 + +static void in1_r3(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = load_reg(tcg_ctx, get_field(s, r3)); +} +#define SPEC_in1_r3 0 + +static void in1_r3_o(DisasContext *s, DisasOps *o) +{ + o->in1 = regs[get_field(s, r3)]; + o->g_in1 = true; +} +#define SPEC_in1_r3_o 0 + +static void in1_r3_32s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32s_i64(tcg_ctx, o->in1, regs[get_field(s, r3)]); +} +#define SPEC_in1_r3_32s 0 + +static void in1_r3_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32u_i64(tcg_ctx, o->in1, regs[get_field(s, r3)]); +} +#define SPEC_in1_r3_32u 0 + +static void in1_r3_D32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r3 = get_field(s, r3); + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat32_i64(tcg_ctx, o->in1, regs[r3 + 1], regs[r3]); +} +#define SPEC_in1_r3_D32 SPEC_r3_even + +static void in1_e1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = load_freg32_i64(tcg_ctx, get_field(s, r1)); +} +#define SPEC_in1_e1 0 + +static void in1_f1(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = load_freg(tcg_ctx, get_field(s, r1)); +} +#define SPEC_in1_f1 0 + +/* Load the high double word of an extended (128-bit) format FP number */ +static void in1_x2h(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = load_freg(tcg_ctx, get_field(s, r2)); +} +#define SPEC_in1_x2h SPEC_r2_f128 + +static void in1_f3(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = load_freg(tcg_ctx, get_field(s, r3)); +} +#define SPEC_in1_f3 0 + +static void in1_la1(DisasContext *s, DisasOps *o) +{ + o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); +} +#define SPEC_in1_la1 0 + +static void in1_la2(DisasContext *s, DisasOps *o) +{ + int x2 = have_field(s, x2) ? get_field(s, x2) : 0; + o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); +} +#define SPEC_in1_la2 0 + +static void in1_m1_8u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in1_la1(s, o); + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld8u(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); +} +#define SPEC_in1_m1_8u 0 + +static void in1_m1_16s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in1_la1(s, o); + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld16s(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); +} +#define SPEC_in1_m1_16s 0 + +static void in1_m1_16u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in1_la1(s, o); + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld16u(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); +} +#define SPEC_in1_m1_16u 0 + +static void in1_m1_32s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in1_la1(s, o); + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld32s(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); +} +#define SPEC_in1_m1_32s 0 + +static void in1_m1_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in1_la1(s, o); + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld32u(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); +} +#define SPEC_in1_m1_32u 0 + +static void in1_m1_64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in1_la1(s, o); + o->in1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld64(tcg_ctx, o->in1, o->addr1, get_mem_index(s)); +} +#define SPEC_in1_m1_64 0 + +/* ====================================================================== */ +/* The "INput 2" generators. These load the second operand to an insn. */ + +static void in2_r1_o(DisasContext *s, DisasOps *o) +{ + o->in2 = regs[get_field(s, r1)]; + o->g_in2 = true; +} +#define SPEC_in2_r1_o 0 + +static void in2_r1_16u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext16u_i64(tcg_ctx, o->in2, regs[get_field(s, r1)]); +} +#define SPEC_in2_r1_16u 0 + +static void in2_r1_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32u_i64(tcg_ctx, o->in2, regs[get_field(s, r1)]); +} +#define SPEC_in2_r1_32u 0 + +static void in2_r1_D32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r1 = get_field(s, r1); + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat32_i64(tcg_ctx, o->in2, regs[r1 + 1], regs[r1]); +} +#define SPEC_in2_r1_D32 SPEC_r1_even + +static void in2_r2(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = load_reg(tcg_ctx, get_field(s, r2)); +} +#define SPEC_in2_r2 0 + +static void in2_r2_o(DisasContext *s, DisasOps *o) +{ + o->in2 = regs[get_field(s, r2)]; + o->g_in2 = true; +} +#define SPEC_in2_r2_o 0 + +static void in2_r2_nz(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int r2 = get_field(s, r2); + if (r2 != 0) { + o->in2 = load_reg(tcg_ctx, r2); + } +} +#define SPEC_in2_r2_nz 0 + +static void in2_r2_8s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext8s_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); +} +#define SPEC_in2_r2_8s 0 + +static void in2_r2_8u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext8u_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); +} +#define SPEC_in2_r2_8u 0 + +static void in2_r2_16s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext16s_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); +} +#define SPEC_in2_r2_16s 0 + +static void in2_r2_16u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext16u_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); +} +#define SPEC_in2_r2_16u 0 + +static void in2_r3(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = load_reg(tcg_ctx, get_field(s, r3)); +} +#define SPEC_in2_r3 0 + +static void in2_r3_sr32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, o->in2, regs[get_field(s, r3)], 32); +} +#define SPEC_in2_r3_sr32 0 + +static void in2_r3_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32u_i64(tcg_ctx, o->in2, regs[get_field(s, r3)]); +} +#define SPEC_in2_r3_32u 0 + +static void in2_r2_32s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32s_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); +} +#define SPEC_in2_r2_32s 0 + +static void in2_r2_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32u_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); +} +#define SPEC_in2_r2_32u 0 + +static void in2_r2_sr32(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, o->in2, regs[get_field(s, r2)], 32); +} +#define SPEC_in2_r2_sr32 0 + +static void in2_e2(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = load_freg32_i64(tcg_ctx, get_field(s, r2)); +} +#define SPEC_in2_e2 0 + +static void in2_f2(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = load_freg(tcg_ctx, get_field(s, r2)); +} +#define SPEC_in2_f2 0 + +/* Load the low double word of an extended (128-bit) format FP number */ +static void in2_x2l(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = load_freg(tcg_ctx, get_field(s, r2) + 2); +} +#define SPEC_in2_x2l SPEC_r2_f128 + +static void in2_ra2(DisasContext *s, DisasOps *o) +{ + o->in2 = get_address(s, 0, get_field(s, r2), 0); +} +#define SPEC_in2_ra2 0 + +static void in2_a2(DisasContext *s, DisasOps *o) +{ + int x2 = have_field(s, x2) ? get_field(s, x2) : 0; + o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); +} +#define SPEC_in2_a2 0 + +static void in2_ri2(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_const_i64(tcg_ctx, s->base.pc_next + (int64_t)get_field(s, i2) * 2); +} +#define SPEC_in2_ri2 0 + +static void in2_sh32(DisasContext *s, DisasOps *o) +{ + help_l2_shift(s, o, 31); +} +#define SPEC_in2_sh32 0 + +static void in2_sh64(DisasContext *s, DisasOps *o) +{ + help_l2_shift(s, o, 63); +} +#define SPEC_in2_sh64 0 + +static void in2_m2_8u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_a2(s, o); + tcg_gen_qemu_ld8u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_m2_8u 0 + +static void in2_m2_16s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_a2(s, o); + tcg_gen_qemu_ld16s(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_m2_16s 0 + +static void in2_m2_16u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_a2(s, o); + tcg_gen_qemu_ld16u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_m2_16u 0 + +static void in2_m2_32s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_a2(s, o); + tcg_gen_qemu_ld32s(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_m2_32s 0 + +static void in2_m2_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_a2(s, o); + tcg_gen_qemu_ld32u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_m2_32u 0 + +static void in2_m2_32ua(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_a2(s, o); + tcg_gen_qemu_ld_tl(tcg_ctx, o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN); +} +#define SPEC_in2_m2_32ua 0 + +static void in2_m2_64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_a2(s, o); + tcg_gen_qemu_ld64(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_m2_64 0 + +static void in2_m2_64a(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_a2(s, o); + tcg_gen_qemu_ld_i64(tcg_ctx, o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN); +} +#define SPEC_in2_m2_64a 0 + +static void in2_mri2_16u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_ri2(s, o); + tcg_gen_qemu_ld16u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_mri2_16u 0 + +static void in2_mri2_32s(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_ri2(s, o); + tcg_gen_qemu_ld32s(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_mri2_32s 0 + +static void in2_mri2_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_ri2(s, o); + tcg_gen_qemu_ld32u(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_mri2_32u 0 + +static void in2_mri2_64(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + in2_ri2(s, o); + tcg_gen_qemu_ld64(tcg_ctx, o->in2, o->in2, get_mem_index(s)); +} +#define SPEC_in2_mri2_64 0 + +static void in2_i2(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_const_i64(tcg_ctx, get_field(s, i2)); +} +#define SPEC_in2_i2 0 + +static void in2_i2_8u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_const_i64(tcg_ctx, (uint8_t)get_field(s, i2)); +} +#define SPEC_in2_i2_8u 0 + +static void in2_i2_16u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_const_i64(tcg_ctx, (uint16_t)get_field(s, i2)); +} +#define SPEC_in2_i2_16u 0 + +static void in2_i2_32u(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_const_i64(tcg_ctx, (uint32_t)get_field(s, i2)); +} +#define SPEC_in2_i2_32u 0 + +static void in2_i2_16u_shl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint64_t i2 = (uint16_t)get_field(s, i2); + o->in2 = tcg_const_i64(tcg_ctx, i2 << s->insn->data); +} +#define SPEC_in2_i2_16u_shl 0 + +static void in2_i2_32u_shl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint64_t i2 = (uint32_t)get_field(s, i2); + o->in2 = tcg_const_i64(tcg_ctx, i2 << s->insn->data); +} +#define SPEC_in2_i2_32u_shl 0 + +static void in2_insn(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_const_i64(tcg_ctx, s->fields.raw_insn); +} +#define SPEC_in2_insn 0 + +/* ====================================================================== */ + +/* Find opc within the table of insns. This is formulated as a switch + statement so that (1) we get compile-time notice of cut-paste errors + for duplicated opcodes, and (2) the compiler generates the binary + search tree, rather than us having to post-process the table. */ + +#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \ + E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0) + +#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \ + E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0) + +#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \ + E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL) + +#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM, + +enum DisasInsnEnum { +#include "insn-data.def" +}; + +#undef E +#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \ + .opc = OPC, \ + .flags = FL, \ + .fmt = FMT_##FT, \ + .fac = FAC_##FC, \ + .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \ + .name = #NM, \ + .help_in1 = in1_##I1, \ + .help_in2 = in2_##I2, \ + .help_prep = prep_##P, \ + .help_wout = wout_##W, \ + .help_cout = cout_##CC, \ + .help_op = op_##OP, \ + .data = D \ + }, + +/* Allow 0 to be used for NULL in the table below. */ +#define in1_0 NULL +#define in2_0 NULL +#define prep_0 NULL +#define wout_0 NULL +#define cout_0 NULL +#define op_0 NULL + +#define SPEC_in1_0 0 +#define SPEC_in2_0 0 +#define SPEC_prep_0 0 +#define SPEC_wout_0 0 + +/* Give smaller names to the various facilities. */ +#define FAC_Z S390_FEAT_ZARCH +#define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE +#define FAC_DFP S390_FEAT_DFP +#define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */ +#define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */ +#define FAC_EE S390_FEAT_EXECUTE_EXT +#define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE +#define FAC_FPE S390_FEAT_FLOATING_POINT_EXT +#define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */ +#define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */ +#define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT +#define FAC_HFP_MA S390_FEAT_HFP_MADDSUB +#define FAC_HW S390_FEAT_STFLE_45 /* high-word */ +#define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */ +#define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */ +#define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */ +#define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */ +#define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */ +#define FAC_LD S390_FEAT_LONG_DISPLACEMENT +#define FAC_PC S390_FEAT_STFLE_45 /* population count */ +#define FAC_SCF S390_FEAT_STORE_CLOCK_FAST +#define FAC_SFLE S390_FEAT_STFLE +#define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */ +#define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC +#define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */ +#define FAC_DAT_ENH S390_FEAT_DAT_ENH +#define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2 +#define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */ +#define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */ +#define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */ +#define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3 +#define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */ +#define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */ +#define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */ +#define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */ +#define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME +#define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */ +#define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION +#define FAC_V S390_FEAT_VECTOR /* vector facility */ +#define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */ + +static const DisasInsn insn_info[] = { +#include "insn-data.def" +}; + +#undef E +#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \ + case OPC: return &insn_info[insn_ ## NM]; + +static const DisasInsn *lookup_opc(uint16_t opc) +{ + switch (opc) { +#include "insn-data.def" + default: + return NULL; + } +} + +#undef F +#undef E +#undef D +#undef C + +/* Extract a field from the insn. The INSN should be left-aligned in + the uint64_t so that we can more easily utilize the big-bit-endian + definitions we extract from the Principals of Operation. */ + +static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn) +{ + uint32_t r, m; + + if (f->size == 0) { + return; + } + + /* Zero extract the field from the insn. */ + r = (insn << f->beg) >> (64 - f->size); + + /* Sign-extend, or un-swap the field as necessary. */ + switch (f->type) { + case 0: /* unsigned */ + break; + case 1: /* signed */ + assert(f->size <= 32); + m = 1u << (f->size - 1); + r = (r ^ m) - m; + break; + case 2: /* dl+dh split, signed 20 bit. */ + r = ((int8_t)r << 12) | (r >> 8); + break; + case 3: /* MSB stored in RXB */ + g_assert(f->size == 4); + switch (f->beg) { + case 8: + r |= extract64(insn, 63 - 36, 1) << 4; + break; + case 12: + r |= extract64(insn, 63 - 37, 1) << 4; + break; + case 16: + r |= extract64(insn, 63 - 38, 1) << 4; + break; + case 32: + r |= extract64(insn, 63 - 39, 1) << 4; + break; + default: + // g_assert_not_reached(); + break; + } + break; + default: + abort(); + } + + /* Validate that the "compressed" encoding we selected above is valid. + I.e. we havn't make two different original fields overlap. */ + assert(((o->presentC >> f->indexC) & 1) == 0); + o->presentC |= 1 << f->indexC; + o->presentO |= 1 << f->indexO; + + o->c[f->indexC] = r; +} + +/* Lookup the insn at the current PC, extracting the operands into O and + returning the info struct for the insn. Returns NULL for invalid insn. */ + +static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint64_t insn, pc = s->base.pc_next; + int op, op2, ilen; + const DisasInsn *info; + + if (unlikely(s->ex_value)) { + /* Drop the EX data now, so that it's clear on exception paths. */ + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + tcg_gen_st_i64(tcg_ctx, zero, tcg_ctx->cpu_env, offsetof(CPUS390XState, ex_value)); + tcg_temp_free_i64(tcg_ctx, zero); + + /* Extract the values saved by EXECUTE. */ + insn = s->ex_value & 0xffffffffffff0000ull; + ilen = s->ex_value & 0xf; + op = insn >> 56; + } else { + insn = ld_code2(env, pc); + op = (insn >> 8) & 0xff; + ilen = get_ilen(op); + switch (ilen) { + case 2: + insn = insn << 48; + break; + case 4: + insn = ld_code4(env, pc) << 32; + break; + case 6: + insn = (insn << 48) | (ld_code4(env, pc + 2) << 16); + break; + default: + // g_assert_not_reached(); + break; + } + } + s->pc_tmp = s->base.pc_next + ilen; + s->ilen = ilen; + + /* We can't actually determine the insn format until we've looked up + the full insn opcode. Which we can't do without locating the + secondary opcode. Assume by default that OP2 is at bit 40; for + those smaller insns that don't actually have a secondary opcode + this will correctly result in OP2 = 0. */ + switch (op) { + case 0x01: /* E */ + case 0x80: /* S */ + case 0x82: /* S */ + case 0x93: /* S */ + case 0xb2: /* S, RRF, RRE, IE */ + case 0xb3: /* RRE, RRD, RRF */ + case 0xb9: /* RRE, RRF */ + case 0xe5: /* SSE, SIL */ + op2 = (insn << 8) >> 56; + break; + case 0xa5: /* RI */ + case 0xa7: /* RI */ + case 0xc0: /* RIL */ + case 0xc2: /* RIL */ + case 0xc4: /* RIL */ + case 0xc6: /* RIL */ + case 0xc8: /* SSF */ + case 0xcc: /* RIL */ + op2 = (insn << 12) >> 60; + break; + case 0xc5: /* MII */ + case 0xc7: /* SMI */ + case 0xd0 ... 0xdf: /* SS */ + case 0xe1: /* SS */ + case 0xe2: /* SS */ + case 0xe8: /* SS */ + case 0xe9: /* SS */ + case 0xea: /* SS */ + case 0xee ... 0xf3: /* SS */ + case 0xf8 ... 0xfd: /* SS */ + op2 = 0; + break; + default: + op2 = (insn << 40) >> 56; + break; + } + + memset(&s->fields, 0, sizeof(s->fields)); + s->fields.raw_insn = insn; + s->fields.op = op; + s->fields.op2 = op2; + + /* Lookup the instruction. */ + info = lookup_opc(op << 8 | op2); + s->insn = info; + + /* If we found it, extract the operands. */ + if (info != NULL) { + DisasFormat fmt = info->fmt; + int i; + + for (i = 0; i < NUM_C_FIELD; ++i) { + extract_field(&s->fields, &format_info[fmt].op[i], insn); + } + } + return info; +} + +static bool is_afp_reg(int reg) +{ + return reg % 2 || reg > 6; +} + +static bool is_fp_pair(int reg) +{ + /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */ + return !(reg & 0x2); +} + +static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const DisasInsn *insn; + DisasJumpType ret = DISAS_NEXT; + DisasOps o = {}; + + /* Search for the insn in the table. */ + insn = extract_insn(env, s); + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->base.pc_next)) { + gen_uc_tracecode(tcg_ctx, s->ilen, UC_HOOK_CODE_IDX, s->uc, s->base.pc_next); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + /* Emit insn_start now that we know the ILEN. */ + tcg_gen_insn_start(tcg_ctx, s->base.pc_next, s->cc_op, s->ilen); + + /* Not found means unimplemented/illegal opcode. */ + if (insn == NULL) { + // qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", + // s->fields.op, s->fields.op2); + gen_illegal_opcode(s); + return DISAS_NORETURN; + } + + if (s->base.tb->flags & FLAG_MASK_PER) { + TCGv_i64 addr = tcg_const_i64(tcg_ctx, s->base.pc_next); + gen_helper_per_ifetch(tcg_ctx, tcg_ctx->cpu_env, addr); + tcg_temp_free_i64(tcg_ctx, addr); + } + + /* process flags */ + if (insn->flags) { + /* privileged instruction */ + if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { + gen_program_exception(s, PGM_PRIVILEGED); + return DISAS_NORETURN; + } + + /* if AFP is not enabled, instructions and registers are forbidden */ + if (!(s->base.tb->flags & FLAG_MASK_AFP)) { + uint8_t dxc = 0; + + if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) { + dxc = 1; + } + if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) { + dxc = 1; + } + if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) { + dxc = 1; + } + if (insn->flags & IF_BFP) { + dxc = 2; + } + if (insn->flags & IF_DFP) { + dxc = 3; + } + if (insn->flags & IF_VEC) { + dxc = 0xfe; + } + if (dxc) { + gen_data_exception(tcg_ctx, dxc); + return DISAS_NORETURN; + } + } + + /* if vector instructions not enabled, executing them is forbidden */ + if (insn->flags & IF_VEC) { + if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) { + gen_data_exception(tcg_ctx, 0xfe); + return DISAS_NORETURN; + } + } + } + + /* Check for insn specification exceptions. */ + if (insn->spec) { + if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) || + (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) || + (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) || + (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) || + (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + } + + /* Implement the instruction. */ + if (insn->help_in1) { + insn->help_in1(s, &o); + } + if (insn->help_in2) { + insn->help_in2(s, &o); + } + if (insn->help_prep) { + insn->help_prep(s, &o); + } + if (insn->help_op) { + ret = insn->help_op(s, &o); + } + if (ret != DISAS_NORETURN) { + if (insn->help_wout) { + insn->help_wout(s, &o); + } + if (insn->help_cout) { + insn->help_cout(s, &o); + } + } + + /* Free any temporaries created by the helpers. */ + if (o.out && !o.g_out) { + tcg_temp_free_i64(tcg_ctx, o.out); + } + if (o.out2 && !o.g_out2) { + tcg_temp_free_i64(tcg_ctx, o.out2); + } + if (o.in1 && !o.g_in1) { + tcg_temp_free_i64(tcg_ctx, o.in1); + } + if (o.in2 && !o.g_in2) { + tcg_temp_free_i64(tcg_ctx, o.in2); + } + if (o.addr1) { + tcg_temp_free_i64(tcg_ctx, o.addr1); + } + + if (s->base.tb->flags & FLAG_MASK_PER) { + /* An exception might be triggered, save PSW if not already done. */ + if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { + tcg_gen_movi_i64(tcg_ctx, psw_addr, s->pc_tmp); + } + + /* Call the helper to check for a possible PER exception. */ + gen_helper_per_check_exception(tcg_ctx, tcg_ctx->cpu_env); + } + + /* Advance to the next instruction. */ + s->base.pc_next = s->pc_tmp; + return ret; +} + +static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = cs->uc; + + // unicorn handle + dc->uc = uc; + + /* 31-bit mode */ + if (!(dc->base.tb->flags & FLAG_MASK_64)) { + dc->base.pc_first &= 0x7fffffff; + dc->base.pc_next = dc->base.pc_first; + } + + dc->cc_op = CC_OP_DYNAMIC; + dc->ex_value = dc->base.tb->cs_base; + dc->do_debug = dc->base.singlestep_enabled; +} + +static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) +{ +} + +static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) +{ +} + +static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, + const CPUBreakpoint *bp) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + /* + * Emit an insn_start to accompany the breakpoint exception. + * The ILEN value is a dummy, since this does not result in + * an s390x exception, but an internal qemu exception which + * brings us back to interact with the gdbstub. + */ + tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, dc->cc_op, 2); + + dc->base.is_jmp = DISAS_PC_STALE; + dc->do_debug = true; + /* The address covered by the breakpoint must be included in + [tb->pc, tb->pc + tb->size) in order to for it to be + properly cleared -- thus we increment the PC here so that + the logic setting tb->size does the right thing. */ + dc->base.pc_next += 2; + return true; +} + +static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) +{ + CPUS390XState *env = cs->env_ptr; + DisasContext *dc = container_of(dcbase, DisasContext, base); + + // Unicorn: end address tells us to stop emulation + if (dcbase->pc_next == dc->uc->addr_end) { + // imitate PGM exception to halt emulation + dcbase->is_jmp = DISAS_UNICORN_HALT; + } else { + dc->base.is_jmp = translate_one(env, dc); + if (dc->base.is_jmp == DISAS_NEXT) { + uint64_t page_start; + + page_start = dc->base.pc_first & TARGET_PAGE_MASK; + if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) { + dc->base.is_jmp = DISAS_TOO_MANY; + } + } + } +} + +static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *dc = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + switch (dc->base.is_jmp) { + case DISAS_UNICORN_HALT: + gen_exception(tcg_ctx, EXCP_PGM); + break; + case DISAS_GOTO_TB: + case DISAS_NORETURN: + break; + case DISAS_TOO_MANY: + case DISAS_PC_STALE: + case DISAS_PC_STALE_NOCHAIN: + update_psw_addr(dc); + /* FALLTHRU */ + case DISAS_PC_UPDATED: + /* Next TB starts off with CC_OP_DYNAMIC, so make sure the + cc op type is in env */ + update_cc_op(dc); + /* FALLTHRU */ + case DISAS_PC_CC_UPDATED: + /* Exit the TB, either by raising a debug exception or by return. */ + if (dc->do_debug) { + gen_exception(tcg_ctx, EXCP_DEBUG); + } else if (use_exit_tb(dc) || + dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + } else { + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + } + break; + default: + // g_assert_not_reached(); + break; + } +} + +static const TranslatorOps s390x_tr_ops = { + .init_disas_context = s390x_tr_init_disas_context, + .tb_start = s390x_tr_tb_start, + .insn_start = s390x_tr_insn_start, + .breakpoint_check = s390x_tr_breakpoint_check, + .translate_insn = s390x_tr_translate_insn, + .tb_stop = s390x_tr_tb_stop, +}; + +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +{ + DisasContext dc; + + translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns); +} + +void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, + target_ulong *data) +{ + int cc_op = data[1]; + + env->psw.addr = data[0]; + + /* Update the CC opcode if it is not already up-to-date. */ + if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) { + env->cc_op = cc_op; + } + + /* Record ILEN. */ + env->int_pgm_ilen = data[2]; +} diff --git a/qemu/target/s390x/translate_vx.inc.c b/qemu/target/s390x/translate_vx.inc.c new file mode 100644 index 00000000..568b6a2a --- /dev/null +++ b/qemu/target/s390x/translate_vx.inc.c @@ -0,0 +1,2882 @@ +/* + * QEMU TCG support -- s390x vector instruction translation functions + * + * Copyright (C) 2019 Red Hat Inc + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * For most instructions that use the same element size for reads and + * writes, we can use real gvec vector expansion, which potantially uses + * real host vector instructions. As they only work up to 64 bit elements, + * 128 bit elements (vector is a single element) have to be handled + * differently. Operations that are too complicated to encode via TCG ops + * are handled via gvec ool (out-of-line) handlers. + * + * As soon as instructions use different element sizes for reads and writes + * or access elements "out of their element scope" we expand them manually + * in fancy loops, as gvec expansion does not deal with actual element + * numbers and does also not support access to other elements. + * + * 128 bit elements: + * As we only have i32/i64, such elements have to be loaded into two + * i64 values and can then be processed e.g. by tcg_gen_add2_i64. + * + * Sizes: + * On s390x, the operand size (oprsz) and the maximum size (maxsz) are + * always 16 (128 bit). What gvec code calls "vece", s390x calls "es", + * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only + * 128 bit element size has to be treated in a special way (MO_64 + 1). + * We will use ES_* instead of MO_* for this reason in this file. + * + * CC handling: + * As gvec ool-helpers can currently not return values (besides via + * pointers like vectors or cpu_env), whenever we have to set the CC and + * can't conclude the value from the result vector, we will directly + * set it in "env->cc_op" and mark it as static via set_cc_static()". + * Whenever this is done, the helper writes globals (cc_op). + */ + +#define NUM_VEC_ELEMENT_BYTES(es) (1 << (es)) +#define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es)) +#define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE) + +#define ES_8 MO_8 +#define ES_16 MO_16 +#define ES_32 MO_32 +#define ES_64 MO_64 +#define ES_128 4 + +/* Floating-Point Format */ +#define FPF_SHORT 2 +#define FPF_LONG 3 +#define FPF_EXT 4 + +static inline bool valid_vec_element(uint8_t enr, MemOp es) +{ + return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1)); +} + +static void read_vec_element_i64(TCGContext *tcg_ctx, TCGv_i64 dst, uint8_t reg, uint8_t enr, + MemOp memop) +{ + const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); + + switch (memop) { + case ES_8: + tcg_gen_ld8u_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_16: + tcg_gen_ld16u_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_32: + tcg_gen_ld32u_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_8 | MO_SIGN: + tcg_gen_ld8s_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_16 | MO_SIGN: + tcg_gen_ld16s_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_32 | MO_SIGN: + tcg_gen_ld32s_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_64: + case ES_64 | MO_SIGN: + tcg_gen_ld_i64(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + default: + g_assert_not_reached(); + } +} + +static void read_vec_element_i32(TCGContext *tcg_ctx, TCGv_i32 dst, uint8_t reg, uint8_t enr, + MemOp memop) +{ + const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); + + switch (memop) { + case ES_8: + tcg_gen_ld8u_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_16: + tcg_gen_ld16u_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_8 | MO_SIGN: + tcg_gen_ld8s_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_16 | MO_SIGN: + tcg_gen_ld16s_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + case ES_32: + case ES_32 | MO_SIGN: + tcg_gen_ld_i32(tcg_ctx, dst, tcg_ctx->cpu_env, offs); + break; + default: + // g_assert_not_reached(); + break; + } +} + +static void write_vec_element_i64(TCGContext *tcg_ctx, TCGv_i64 src, int reg, uint8_t enr, + MemOp memop) +{ + const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); + + switch (memop) { + case ES_8: + tcg_gen_st8_i64(tcg_ctx, src, tcg_ctx->cpu_env, offs); + break; + case ES_16: + tcg_gen_st16_i64(tcg_ctx, src, tcg_ctx->cpu_env, offs); + break; + case ES_32: + tcg_gen_st32_i64(tcg_ctx, src, tcg_ctx->cpu_env, offs); + break; + case ES_64: + tcg_gen_st_i64(tcg_ctx, src, tcg_ctx->cpu_env, offs); + break; + default: + g_assert_not_reached(); + } +} + +static void write_vec_element_i32(TCGContext *tcg_ctx, TCGv_i32 src, int reg, uint8_t enr, + MemOp memop) +{ + const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); + + switch (memop) { + case ES_8: + tcg_gen_st8_i32(tcg_ctx, src, tcg_ctx->cpu_env, offs); + break; + case ES_16: + tcg_gen_st16_i32(tcg_ctx, src, tcg_ctx->cpu_env, offs); + break; + case ES_32: + tcg_gen_st_i32(tcg_ctx, src, tcg_ctx->cpu_env, offs); + break; + default: + // g_assert_not_reached(); + break; + } +} + +static void get_vec_element_ptr_i64(TCGContext *tcg_ctx, TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr, + uint8_t es) +{ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + /* mask off invalid parts from the element nr */ + tcg_gen_andi_i64(tcg_ctx, tmp, enr, NUM_VEC_ELEMENTS(es) - 1); + + /* convert it to an element offset relative to cpu_env (vec_reg_offset() */ + tcg_gen_shli_i64(tcg_ctx, tmp, tmp, es); +#ifndef HOST_WORDS_BIGENDIAN + tcg_gen_xori_i64(tcg_ctx, tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es)); +#endif + tcg_gen_addi_i64(tcg_ctx, tmp, tmp, vec_full_reg_offset(reg)); + + /* generate the final ptr by adding cpu_env */ + tcg_gen_trunc_i64_ptr(tcg_ctx, ptr, tmp); + tcg_gen_add_ptr(tcg_ctx, ptr, ptr, tcg_ctx->cpu_env); + + tcg_temp_free_i64(tcg_ctx, tmp); +} + +#define gen_gvec_2(tcg_ctx, v1, v2, gen) \ + tcg_gen_gvec_2(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + 16, 16, gen) +#define gen_gvec_2s(tcg_ctx, v1, v2, c, gen) \ + tcg_gen_gvec_2s(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + 16, 16, c, gen) +#define gen_gvec_2_ool(tcg_ctx, v1, v2, data, fn) \ + tcg_gen_gvec_2_ool(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + 16, 16, data, fn) +#define gen_gvec_2i_ool(tcg_ctx, v1, v2, c, data, fn) \ + tcg_gen_gvec_2i_ool(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + c, 16, 16, data, fn) +#define gen_gvec_2_ptr(tcg_ctx, v1, v2, ptr, data, fn) \ + tcg_gen_gvec_2_ptr(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + ptr, 16, 16, data, fn) +#define gen_gvec_3(tcg_ctx, v1, v2, v3, gen) \ + tcg_gen_gvec_3(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), 16, 16, gen) +#define gen_gvec_3_ool(tcg_ctx, v1, v2, v3, data, fn) \ + tcg_gen_gvec_3_ool(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), 16, 16, data, fn) +#define gen_gvec_3_ptr(tcg_ctx, v1, v2, v3, ptr, data, fn) \ + tcg_gen_gvec_3_ptr(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), ptr, 16, 16, data, fn) +#define gen_gvec_3i(tcg_ctx, v1, v2, v3, c, gen) \ + tcg_gen_gvec_3i(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), 16, 16, c, gen) +#define gen_gvec_4(tcg_ctx, v1, v2, v3, v4, gen) \ + tcg_gen_gvec_4(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ + 16, 16, gen) +#define gen_gvec_4_ool(tcg_ctx, v1, v2, v3, v4, data, fn) \ + tcg_gen_gvec_4_ool(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ + 16, 16, data, fn) +#define gen_gvec_4_ptr(tcg_ctx, v1, v2, v3, v4, ptr, data, fn) \ + tcg_gen_gvec_4_ptr(tcg_ctx, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ + ptr, 16, 16, data, fn) +#define gen_gvec_dup_i64(tcg_ctx, es, v1, c) \ + tcg_gen_gvec_dup_i64(tcg_ctx, es, vec_full_reg_offset(v1), 16, 16, c) +#define gen_gvec_mov(tcg_ctx, v1, v2) \ + tcg_gen_gvec_mov(tcg_ctx, 0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \ + 16) +#define gen_gvec_dup64i(tcg_ctx, v1, c) \ + tcg_gen_gvec_dup64i(tcg_ctx, vec_full_reg_offset(v1), 16, 16, c) +#define gen_gvec_fn_2(tcg_ctx, fn, es, v1, v2) \ + tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + 16, 16) +#define gen_gvec_fn_2i(tcg_ctx, fn, es, v1, v2, c) \ + tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + c, 16, 16) +#define gen_gvec_fn_2s(tcg_ctx, fn, es, v1, v2, s) \ + tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + s, 16, 16) +#define gen_gvec_fn_3(tcg_ctx, fn, es, v1, v2, v3) \ + tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), 16, 16) +#define gen_gvec_fn_4(tcg_ctx, fn, es, v1, v2, v3, v4) \ + tcg_gen_gvec_##fn(tcg_ctx, es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ + vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16) + +/* + * Helper to carry out a 128 bit vector computation using 2 i64 values per + * vector. + */ +typedef void (*gen_gvec128_3_i64_fn)(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); +static void gen_gvec128_3_i64(TCGContext *tcg_ctx, gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a, + uint8_t b) +{ + TCGv_i64 dh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 dl = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 ah = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 al = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 bh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 bl = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i64(tcg_ctx, ah, a, 0, ES_64); + read_vec_element_i64(tcg_ctx, al, a, 1, ES_64); + read_vec_element_i64(tcg_ctx, bh, b, 0, ES_64); + read_vec_element_i64(tcg_ctx, bl, b, 1, ES_64); + fn(tcg_ctx, dl, dh, al, ah, bl, bh); + write_vec_element_i64(tcg_ctx, dh, d, 0, ES_64); + write_vec_element_i64(tcg_ctx, dl, d, 1, ES_64); + + tcg_temp_free_i64(tcg_ctx, dh); + tcg_temp_free_i64(tcg_ctx, dl); + tcg_temp_free_i64(tcg_ctx, ah); + tcg_temp_free_i64(tcg_ctx, al); + tcg_temp_free_i64(tcg_ctx, bh); + tcg_temp_free_i64(tcg_ctx, bl); +} + +typedef void (*gen_gvec128_4_i64_fn)(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh, + TCGv_i64 cl, TCGv_i64 ch); +static void gen_gvec128_4_i64(TCGContext *tcg_ctx, gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a, + uint8_t b, uint8_t c) +{ + TCGv_i64 dh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 dl = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 ah = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 al = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 bh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 bl = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 ch = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 cl = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i64(tcg_ctx, ah, a, 0, ES_64); + read_vec_element_i64(tcg_ctx, al, a, 1, ES_64); + read_vec_element_i64(tcg_ctx, bh, b, 0, ES_64); + read_vec_element_i64(tcg_ctx, bl, b, 1, ES_64); + read_vec_element_i64(tcg_ctx, ch, c, 0, ES_64); + read_vec_element_i64(tcg_ctx, cl, c, 1, ES_64); + fn(tcg_ctx, dl, dh, al, ah, bl, bh, cl, ch); + write_vec_element_i64(tcg_ctx, dh, d, 0, ES_64); + write_vec_element_i64(tcg_ctx, dl, d, 1, ES_64); + + tcg_temp_free_i64(tcg_ctx, dh); + tcg_temp_free_i64(tcg_ctx, dl); + tcg_temp_free_i64(tcg_ctx, ah); + tcg_temp_free_i64(tcg_ctx, al); + tcg_temp_free_i64(tcg_ctx, bh); + tcg_temp_free_i64(tcg_ctx, bl); + tcg_temp_free_i64(tcg_ctx, ch); + tcg_temp_free_i64(tcg_ctx, cl); +} + +static void gen_gvec_dupi(TCGContext *tcg_ctx, uint8_t es, uint8_t reg, uint64_t c) +{ + switch (es) { + case ES_8: + tcg_gen_gvec_dup8i(tcg_ctx, vec_full_reg_offset(reg), 16, 16, c); + break; + case ES_16: + tcg_gen_gvec_dup16i(tcg_ctx, vec_full_reg_offset(reg), 16, 16, c); + break; + case ES_32: + tcg_gen_gvec_dup32i(tcg_ctx, vec_full_reg_offset(reg), 16, 16, c); + break; + case ES_64: + gen_gvec_dup64i(tcg_ctx, reg, c); + break; + default: + g_assert_not_reached(); + } +} + +static void zero_vec(TCGContext *tcg_ctx, uint8_t reg) +{ + tcg_gen_gvec_dup8i(tcg_ctx, vec_full_reg_offset(reg), 16, 16, 0); +} + +static void gen_addi2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, + uint64_t b) +{ + TCGv_i64 bl = tcg_const_i64(tcg_ctx, b); + TCGv_i64 bh = tcg_const_i64(tcg_ctx, 0); + + tcg_gen_add2_i64(tcg_ctx, dl, dh, al, ah, bl, bh); + tcg_temp_free_i64(tcg_ctx, bl); + tcg_temp_free_i64(tcg_ctx, bh); +} + +static DisasJumpType op_vge(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = s->insn->data; + const uint8_t enr = get_field(s, m3); + TCGv_i64 tmp; + + if (!valid_vec_element(enr, es)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), enr, es); + tcg_gen_add_i64(tcg_ctx, o->addr1, o->addr1, tmp); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); + + tcg_gen_qemu_ld_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); + write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static uint64_t generate_byte_mask(uint8_t mask) +{ + uint64_t r = 0; + int i; + + for (i = 0; i < 8; i++) { + if ((mask >> i) & 1) { + r |= 0xffull << (i * 8); + } + } + return r; +} + +static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint16_t i2 = get_field(s, i2); + + if (i2 == (i2 & 0xff) * 0x0101) { + /* + * Masks for both 64 bit elements of the vector are the same. + * Trust tcg to produce a good constant loading. + */ + gen_gvec_dup64i(tcg_ctx, get_field(s, v1), + generate_byte_mask(i2 & 0xff)); + } else { + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_movi_i64(tcg_ctx, t, generate_byte_mask(i2 >> 8)); + write_vec_element_i64(tcg_ctx, t, get_field(s, v1), 0, ES_64); + tcg_gen_movi_i64(tcg_ctx, t, generate_byte_mask(i2)); + write_vec_element_i64(tcg_ctx, t, get_field(s, v1), 1, ES_64); + tcg_temp_free_i64(tcg_ctx, t); + } + return DISAS_NEXT; +} + +static DisasJumpType op_vgm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + const uint8_t bits = NUM_VEC_ELEMENT_BITS(es); + const uint8_t i2 = get_field(s, i2) & (bits - 1); + const uint8_t i3 = get_field(s, i3) & (bits - 1); + uint64_t mask = 0; + int i; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + /* generate the mask - take care of wrapping */ + for (i = i2; ; i = (i + 1) % bits) { + mask |= 1ull << (bits - i - 1); + if (i == i3) { + break; + } + } + + gen_gvec_dupi(tcg_ctx, es, get_field(s, v1), mask); + return DISAS_NEXT; +} + +static DisasJumpType op_vl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_qemu_ld_i64(tcg_ctx, t0, o->addr1, get_mem_index(s), MO_TEQ); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->addr1, get_mem_index(s), MO_TEQ); + write_vec_element_i64(tcg_ctx, t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(tcg_ctx, t1, get_field(s, v1), 1, ES_64); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + return DISAS_NEXT; +} + +static DisasJumpType op_vlr(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_mov(tcg_ctx, get_field(s, v1), get_field(s, v2)); + return DISAS_NEXT; +} + +static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m3); + TCGv_i64 tmp; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); + gen_gvec_dup_i64(tcg_ctx, es, get_field(s, v1), tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vle(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = s->insn->data; + const uint8_t enr = get_field(s, m3); + TCGv_i64 tmp; + + if (!valid_vec_element(enr, es)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); + write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vlei(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = s->insn->data; + const uint8_t enr = get_field(s, m3); + TCGv_i64 tmp; + + if (!valid_vec_element(enr, es)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_const_i64(tcg_ctx, (int16_t)get_field(s, i2)); + write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + TCGv_ptr ptr; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + /* fast path if we don't need the register content */ + if (!get_field(s, b2)) { + uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); + + read_vec_element_i64(tcg_ctx, o->out, get_field(s, v3), enr, es); + return DISAS_NEXT; + } + + ptr = tcg_temp_new_ptr(tcg_ctx); + get_vec_element_ptr_i64(tcg_ctx, ptr, get_field(s, v3), o->addr1, es); + switch (es) { + case ES_8: + tcg_gen_ld8u_i64(tcg_ctx, o->out, ptr, 0); + break; + case ES_16: + tcg_gen_ld16u_i64(tcg_ctx, o->out, ptr, 0); + break; + case ES_32: + tcg_gen_ld32u_i64(tcg_ctx, o->out, ptr, 0); + break; + case ES_64: + tcg_gen_ld_i64(tcg_ctx, o->out, ptr, 0); + break; + default: + // g_assert_not_reached(); + break; + } + tcg_temp_free_ptr(tcg_ctx, ptr); + + return DISAS_NEXT; +} + +static DisasJumpType op_vllez(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint8_t es = get_field(s, m3); + uint8_t enr; + TCGv_i64 t; + + switch (es) { + /* rightmost sub-element of leftmost doubleword */ + case ES_8: + enr = 7; + break; + case ES_16: + enr = 3; + break; + case ES_32: + enr = 1; + break; + case ES_64: + enr = 0; + break; + /* leftmost sub-element of leftmost doubleword */ + case 6: + if (s390_has_feat(s->uc, S390_FEAT_VECTOR_ENH)) { + es = ES_32; + enr = 0; + break; + } + /* fallthrough */ + default: + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + t = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(tcg_ctx, t, o->addr1, get_mem_index(s), MO_TE | es); + zero_vec(tcg_ctx, get_field(s, v1)); + write_vec_element_i64(tcg_ctx, t, get_field(s, v1), enr, es); + tcg_temp_free_i64(tcg_ctx, t); + return DISAS_NEXT; +} + +static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t v3 = get_field(s, v3); + uint8_t v1 = get_field(s, v1); + TCGv_i64 t0, t1; + + if (v3 < v1 || (v3 - v1 + 1) > 16) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + /* + * Check for possible access exceptions by trying to load the last + * element. The first element will be checked first next. + */ + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8); + tcg_gen_qemu_ld_i64(tcg_ctx, t0, t0, get_mem_index(s), MO_TEQ); + + for (;; v1++) { + tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->addr1, get_mem_index(s), MO_TEQ); + write_vec_element_i64(tcg_ctx, t1, v1, 0, ES_64); + if (v1 == v3) { + break; + } + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + tcg_gen_qemu_ld_i64(tcg_ctx, t1, o->addr1, get_mem_index(s), MO_TEQ); + write_vec_element_i64(tcg_ctx, t1, v1, 1, ES_64); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + } + + /* Store the last element, loaded first */ + write_vec_element_i64(tcg_ctx, t0, v1, 1, ES_64); + + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + return DISAS_NEXT; +} + +static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const int64_t block_size = (1ull << (get_field(s, m3) + 6)); + const int v1_offs = vec_full_reg_offset(get_field(s, v1)); + TCGv_ptr a0; + TCGv_i64 bytes; + + if (get_field(s, m3) > 6) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + bytes = tcg_temp_new_i64(tcg_ctx); + a0 = tcg_temp_new_ptr(tcg_ctx); + /* calculate the number of bytes until the next block boundary */ + tcg_gen_ori_i64(tcg_ctx, bytes, o->addr1, -block_size); + tcg_gen_neg_i64(tcg_ctx, bytes, bytes); + + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, v1_offs); + gen_helper_vll(tcg_ctx, tcg_ctx->cpu_env, a0, o->addr1, bytes); + tcg_temp_free_i64(tcg_ctx, bytes); + tcg_temp_free_ptr(tcg_ctx, a0); + return DISAS_NEXT; +} + +static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + TCGv_ptr ptr; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + /* fast path if we don't need the register content */ + if (!get_field(s, b2)) { + uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); + + write_vec_element_i64(tcg_ctx, o->in2, get_field(s, v1), enr, es); + return DISAS_NEXT; + } + + ptr = tcg_temp_new_ptr(tcg_ctx); + get_vec_element_ptr_i64(tcg_ctx, ptr, get_field(s, v1), o->addr1, es); + switch (es) { + case ES_8: + tcg_gen_st8_i64(tcg_ctx, o->in2, ptr, 0); + break; + case ES_16: + tcg_gen_st16_i64(tcg_ctx, o->in2, ptr, 0); + break; + case ES_32: + tcg_gen_st32_i64(tcg_ctx, o->in2, ptr, 0); + break; + case ES_64: + tcg_gen_st_i64(tcg_ctx, o->in2, ptr, 0); + break; + default: + // g_assert_not_reached(); + break; + } + tcg_temp_free_ptr(tcg_ctx, ptr); + + return DISAS_NEXT; +} + +static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + write_vec_element_i64(tcg_ctx, o->in1, get_field(s, v1), 0, ES_64); + write_vec_element_i64(tcg_ctx, o->in2, get_field(s, v1), 1, ES_64); + return DISAS_NEXT; +} + +static DisasJumpType op_vll(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const int v1_offs = vec_full_reg_offset(get_field(s, v1)); + TCGv_ptr a0 = tcg_temp_new_ptr(tcg_ctx); + + /* convert highest index into an actual length */ + tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 1); + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, v1_offs); + gen_helper_vll(tcg_ctx, tcg_ctx->cpu_env, a0, o->addr1, o->in2); + tcg_temp_free_ptr(tcg_ctx, a0); + return DISAS_NEXT; +} + +static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t v3 = get_field(s, v3); + const uint8_t es = get_field(s, m4); + int dst_idx, src_idx; + TCGv_i64 tmp; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + if (s->fields.op2 == 0x61) { + /* iterate backwards to avoid overwriting data we might need later */ + for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) { + src_idx = dst_idx / 2; + if (dst_idx % 2 == 0) { + read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, es); + } else { + read_vec_element_i64(tcg_ctx, tmp, v3, src_idx, es); + } + write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, es); + } + } else { + /* iterate forward to avoid overwriting data we might need later */ + for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) { + src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2; + if (dst_idx % 2 == 0) { + read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, es); + } else { + read_vec_element_i64(tcg_ctx, tmp, v3, src_idx, es); + } + write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, es); + } + } + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t v3 = get_field(s, v3); + const uint8_t es = get_field(s, m4); + static gen_helper_gvec_3 * const vpk[3] = { + gen_helper_gvec_vpk16, + gen_helper_gvec_vpk32, + gen_helper_gvec_vpk64, + }; + static gen_helper_gvec_3 * const vpks[3] = { + gen_helper_gvec_vpks16, + gen_helper_gvec_vpks32, + gen_helper_gvec_vpks64, + }; + static gen_helper_gvec_3_ptr * const vpks_cc[3] = { + gen_helper_gvec_vpks_cc16, + gen_helper_gvec_vpks_cc32, + gen_helper_gvec_vpks_cc64, + }; + static gen_helper_gvec_3 * const vpkls[3] = { + gen_helper_gvec_vpkls16, + gen_helper_gvec_vpkls32, + gen_helper_gvec_vpkls64, + }; + static gen_helper_gvec_3_ptr * const vpkls_cc[3] = { + gen_helper_gvec_vpkls_cc16, + gen_helper_gvec_vpkls_cc32, + gen_helper_gvec_vpkls_cc64, + }; + + if (es == ES_8 || es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + switch (s->fields.op2) { + case 0x97: + if (get_field(s, m5) & 0x1) { + gen_gvec_3_ptr(tcg_ctx, v1, v2, v3, tcg_ctx->cpu_env, 0, vpks_cc[es - 1]); + set_cc_static(s); + } else { + gen_gvec_3_ool(tcg_ctx, v1, v2, v3, 0, vpks[es - 1]); + } + break; + case 0x95: + if (get_field(s, m5) & 0x1) { + gen_gvec_3_ptr(tcg_ctx, v1, v2, v3, tcg_ctx->cpu_env, 0, vpkls_cc[es - 1]); + set_cc_static(s); + } else { + gen_gvec_3_ool(tcg_ctx, v1, v2, v3, 0, vpkls[es - 1]); + } + break; + case 0x94: + /* If sources and destination dont't overlap -> fast path */ + if (v1 != v2 && v1 != v3) { + const uint8_t src_es = get_field(s, m4); + const uint8_t dst_es = src_es - 1; + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + int dst_idx, src_idx; + + for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) { + src_idx = dst_idx; + if (src_idx < NUM_VEC_ELEMENTS(src_es)) { + read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, src_es); + } else { + src_idx -= NUM_VEC_ELEMENTS(src_es); + read_vec_element_i64(tcg_ctx, tmp, v3, src_idx, src_es); + } + write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, dst_es); + } + tcg_temp_free_i64(tcg_ctx, tmp); + } else { + gen_gvec_3_ool(tcg_ctx, v1, v2, v3, 0, vpk[es - 1]); + } + break; + default: + g_assert_not_reached(); + } + return DISAS_NEXT; +} + +static DisasJumpType op_vperm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_4_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), + 0, gen_helper_gvec_vperm); + return DISAS_NEXT; +} + +static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t i2 = extract32(get_field(s, m4), 2, 1); + const uint8_t i3 = extract32(get_field(s, m4), 0, 1); + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i64(tcg_ctx, t0, get_field(s, v2), i2, ES_64); + read_vec_element_i64(tcg_ctx, t1, get_field(s, v3), i3, ES_64); + write_vec_element_i64(tcg_ctx, t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(tcg_ctx, t1, get_field(s, v1), 1, ES_64); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); + return DISAS_NEXT; +} + +static DisasJumpType op_vrep(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t enr = get_field(s, i2); + const uint8_t es = get_field(s, m4); + + if (es > ES_64 || !valid_vec_element(enr, es)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tcg_gen_gvec_dup_mem(tcg_ctx, es, vec_full_reg_offset(get_field(s, v1)), + vec_reg_offset(get_field(s, v3), enr, es), + 16, 16); + return DISAS_NEXT; +} + +static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const int64_t data = (int16_t)get_field(s, i2); + const uint8_t es = get_field(s, m3); + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_dupi(tcg_ctx, es, get_field(s, v1), data); + return DISAS_NEXT; +} + +static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = s->insn->data; + const uint8_t enr = get_field(s, m3); + TCGv_i64 tmp; + + if (!valid_vec_element(enr, es)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), enr, es); + tcg_gen_add_i64(tcg_ctx, o->addr1, o->addr1, tmp); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); + + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vsel(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_4(tcg_ctx, bitsel, ES_8, get_field(s, v1), + get_field(s, v4), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static DisasJumpType op_vseg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m3); + int idx1, idx2; + TCGv_i64 tmp; + + switch (es) { + case ES_8: + idx1 = 7; + idx2 = 15; + break; + case ES_16: + idx1 = 3; + idx2 = 7; + break; + case ES_32: + idx1 = 1; + idx2 = 3; + break; + default: + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), idx1, es | MO_SIGN); + write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), 0, ES_64); + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), idx2, es | MO_SIGN); + write_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), 1, ES_64); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vst(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp = tcg_const_i64(tcg_ctx, 16); + + /* Probe write access before actually modifying memory */ + gen_helper_probe_write_access(tcg_ctx, tcg_ctx->cpu_env, o->addr1, tmp); + + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), 0, ES_64); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TEQ); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), 1, ES_64); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TEQ); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vste(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = s->insn->data; + const uint8_t enr = get_field(s, m3); + TCGv_i64 tmp; + + if (!valid_vec_element(enr, es)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v1), enr, es); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TE | es); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t v3 = get_field(s, v3); + uint8_t v1 = get_field(s, v1); + TCGv_i64 tmp; + + while (v3 < v1 || (v3 - v1 + 1) > 16) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + /* Probe write access before actually modifying memory */ + tmp = tcg_const_i64(tcg_ctx, (v3 - v1 + 1) * 16); + gen_helper_probe_write_access(tcg_ctx, tcg_ctx->cpu_env, o->addr1, tmp); + + for (;; v1++) { + read_vec_element_i64(tcg_ctx, tmp, v1, 0, ES_64); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TEQ); + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + read_vec_element_i64(tcg_ctx, tmp, v1, 1, ES_64); + tcg_gen_qemu_st_i64(tcg_ctx, tmp, o->addr1, get_mem_index(s), MO_TEQ); + if (v1 == v3) { + break; + } + gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); + } + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vstl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const int v1_offs = vec_full_reg_offset(get_field(s, v1)); + TCGv_ptr a0 = tcg_temp_new_ptr(tcg_ctx); + + /* convert highest index into an actual length */ + tcg_gen_addi_i64(tcg_ctx, o->in2, o->in2, 1); + tcg_gen_addi_ptr(tcg_ctx, a0, tcg_ctx->cpu_env, v1_offs); + gen_helper_vstl(tcg_ctx, tcg_ctx->cpu_env, a0, o->addr1, o->in2); + tcg_temp_free_ptr(tcg_ctx, a0); + return DISAS_NEXT; +} + +static DisasJumpType op_vup(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5; + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t src_es = get_field(s, m3); + const uint8_t dst_es = src_es + 1; + int dst_idx, src_idx; + TCGv_i64 tmp; + + if (src_es > ES_32) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tmp = tcg_temp_new_i64(tcg_ctx); + if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) { + /* iterate backwards to avoid overwriting data we might need later */ + for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) { + src_idx = dst_idx; + read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, + src_es | (logical ? 0 : MO_SIGN)); + write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, dst_es); + } + + } else { + /* iterate forward to avoid overwriting data we might need later */ + for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) { + src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2; + read_vec_element_i64(tcg_ctx, tmp, v2, src_idx, + src_es | (logical ? 0 : MO_SIGN)); + write_vec_element_i64(tcg_ctx, tmp, v1, dst_idx, dst_es); + } + } + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_va(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + + if (es > ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } else if (es == ES_128) { + gen_gvec128_3_i64(tcg_ctx, tcg_gen_add2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); + return DISAS_NEXT; + } + gen_gvec_fn_3(tcg_ctx, add, es, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static void gen_acc(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es) +{ + const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1; + TCGv_i64 msb_mask = tcg_const_i64(tcg_ctx, dup_const(es, 1ull << msb_bit_nr)); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + /* Calculate the carry into the MSB, ignoring the old MSBs */ + tcg_gen_andc_i64(tcg_ctx, t1, a, msb_mask); + tcg_gen_andc_i64(tcg_ctx, t2, b, msb_mask); + tcg_gen_add_i64(tcg_ctx, t1, t1, t2); + /* Calculate the MSB without any carry into it */ + tcg_gen_xor_i64(tcg_ctx, t3, a, b); + /* Calculate the carry out of the MSB in the MSB bit position */ + tcg_gen_and_i64(tcg_ctx, d, a, b); + tcg_gen_and_i64(tcg_ctx, t1, t1, t3); + tcg_gen_or_i64(tcg_ctx, d, d, t1); + /* Isolate and shift the carry into position */ + tcg_gen_and_i64(tcg_ctx, d, d, msb_mask); + tcg_gen_shri_i64(tcg_ctx, d, d, msb_bit_nr); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +static void gen_acc8_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + gen_acc(tcg_ctx, d, a, b, ES_8); +} + +static void gen_acc16_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + gen_acc(tcg_ctx, d, a, b, ES_16); +} + +static void gen_acc_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_add_i32(tcg_ctx, t, a, b); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, d, t, b); + tcg_temp_free_i32(tcg_ctx, t); +} + +static void gen_acc_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_add_i64(tcg_ctx, t, a, b); + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_LTU, d, t, b); + tcg_temp_free_i64(tcg_ctx, t); +} + +static void gen_acc2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) +{ + TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + + tcg_gen_add2_i64(tcg_ctx, tl, th, al, zero, bl, zero); + tcg_gen_add2_i64(tcg_ctx, tl, th, th, zero, ah, zero); + tcg_gen_add2_i64(tcg_ctx, tl, dl, tl, th, bh, zero); + tcg_gen_mov_i64(tcg_ctx, dh, zero); + + tcg_temp_free_i64(tcg_ctx, th); + tcg_temp_free_i64(tcg_ctx, tl); + tcg_temp_free_i64(tcg_ctx, zero); +} + +static DisasJumpType op_vacc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + static const GVecGen3 g[4] = { + { .fni8 = gen_acc8_i64, }, + { .fni8 = gen_acc16_i64, }, + { .fni4 = gen_acc_i32, }, + { .fni8 = gen_acc_i64, }, + }; + + if (es > ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } else if (es == ES_128) { + gen_gvec128_3_i64(tcg_ctx, gen_acc2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); + return DISAS_NEXT; + } + gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); + return DISAS_NEXT; +} + +static void gen_ac2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, + TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) +{ + TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 th = tcg_const_i64(tcg_ctx, 0); + + /* extract the carry only */ + tcg_gen_extract_i64(tcg_ctx, tl, cl, 0, 1); + tcg_gen_add2_i64(tcg_ctx, dl, dh, al, ah, bl, bh); + tcg_gen_add2_i64(tcg_ctx, dl, dh, dl, dh, tl, th); + + tcg_temp_free_i64(tcg_ctx, tl); + tcg_temp_free_i64(tcg_ctx, th); +} + +static DisasJumpType op_vac(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (get_field(s, m5) != ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec128_4_i64(tcg_ctx, gen_ac2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3), + get_field(s, v4)); + return DISAS_NEXT; +} + +static void gen_accc2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, + TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) +{ + TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + + tcg_gen_andi_i64(tcg_ctx, tl, cl, 1); + tcg_gen_add2_i64(tcg_ctx, tl, th, tl, zero, al, zero); + tcg_gen_add2_i64(tcg_ctx, tl, th, tl, th, bl, zero); + tcg_gen_add2_i64(tcg_ctx, tl, th, th, zero, ah, zero); + tcg_gen_add2_i64(tcg_ctx, tl, dl, tl, th, bh, zero); + tcg_gen_mov_i64(tcg_ctx, dh, zero); + + tcg_temp_free_i64(tcg_ctx, tl); + tcg_temp_free_i64(tcg_ctx, th); + tcg_temp_free_i64(tcg_ctx, zero); +} + +static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (get_field(s, m5) != ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec128_4_i64(tcg_ctx, gen_accc2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3), + get_field(s, v4)); + return DISAS_NEXT; +} + +static DisasJumpType op_vn(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_3(tcg_ctx, and, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static DisasJumpType op_vnc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_3(tcg_ctx, andc, ES_8, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); + return DISAS_NEXT; +} + +static void gen_avg_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i32_i64(tcg_ctx, t0, a); + tcg_gen_ext_i32_i64(tcg_ctx, t1, b); + tcg_gen_add_i64(tcg_ctx, t0, t0, t1); + tcg_gen_addi_i64(tcg_ctx, t0, t0, 1); + tcg_gen_shri_i64(tcg_ctx, t0, t0, 1); + tcg_gen_extrl_i64_i32(tcg_ctx, d, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_avg_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) +{ + TCGv_i64 dh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 ah = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 bh = tcg_temp_new_i64(tcg_ctx); + + /* extending the sign by one bit is sufficient */ + tcg_gen_extract_i64(tcg_ctx, ah, al, 63, 1); + tcg_gen_extract_i64(tcg_ctx, bh, bl, 63, 1); + tcg_gen_add2_i64(tcg_ctx, dl, dh, al, ah, bl, bh); + gen_addi2_i64(tcg_ctx, dl, dh, dl, dh, 1); + tcg_gen_extract2_i64(tcg_ctx, dl, dl, dh, 1); + + tcg_temp_free_i64(tcg_ctx, dh); + tcg_temp_free_i64(tcg_ctx, ah); + tcg_temp_free_i64(tcg_ctx, bh); +} + +static DisasJumpType op_vavg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + static const GVecGen3 g[4] = { + { .fno = gen_helper_gvec_vavg8, }, + { .fno = gen_helper_gvec_vavg16, }, + { .fni4 = gen_avg_i32, }, + { .fni8 = gen_avg_i64, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); + return DISAS_NEXT; +} + +static void gen_avgl_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, t0, a); + tcg_gen_extu_i32_i64(tcg_ctx, t1, b); + tcg_gen_add_i64(tcg_ctx, t0, t0, t1); + tcg_gen_addi_i64(tcg_ctx, t0, t0, 1); + tcg_gen_shri_i64(tcg_ctx, t0, t0, 1); + tcg_gen_extrl_i64_i32(tcg_ctx, d, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_avgl_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) +{ + TCGv_i64 dh = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + + tcg_gen_add2_i64(tcg_ctx, dl, dh, al, zero, bl, zero); + gen_addi2_i64(tcg_ctx, dl, dh, dl, dh, 1); + tcg_gen_extract2_i64(tcg_ctx, dl, dl, dh, 1); + + tcg_temp_free_i64(tcg_ctx, dh); + tcg_temp_free_i64(tcg_ctx, zero); +} + +static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + static const GVecGen3 g[4] = { + { .fno = gen_helper_gvec_vavgl8, }, + { .fno = gen_helper_gvec_vavgl16, }, + { .fni4 = gen_avgl_i32, }, + { .fni8 = gen_avgl_i64, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); + return DISAS_NEXT; +} + +static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 sum = tcg_temp_new_i32(tcg_ctx); + int i; + + read_vec_element_i32(tcg_ctx, sum, get_field(s, v3), 1, ES_32); + for (i = 0; i < 4; i++) { + read_vec_element_i32(tcg_ctx, tmp, get_field(s, v2), i, ES_32); + tcg_gen_add2_i32(tcg_ctx, tmp, sum, sum, sum, tmp, tmp); + } + zero_vec(tcg_ctx, get_field(s, v1)); + write_vec_element_i32(tcg_ctx, sum, get_field(s, v1), 1, ES_32); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, sum); + return DISAS_NEXT; +} + +static DisasJumpType op_vec(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint8_t es = get_field(s, m3); + const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + if (s->fields.op2 == 0xdb) { + es |= MO_SIGN; + } + + o->in1 = tcg_temp_new_i64(tcg_ctx); + o->in2 = tcg_temp_new_i64(tcg_ctx); + read_vec_element_i64(tcg_ctx, o->in1, get_field(s, v1), enr, es); + read_vec_element_i64(tcg_ctx, o->in2, get_field(s, v2), enr, es); + return DISAS_NEXT; +} + +static DisasJumpType op_vc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + TCGCond cond = s->insn->data; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + tcg_gen_gvec_cmp(tcg_ctx, cond, es, + vec_full_reg_offset(get_field(s, v1)), + vec_full_reg_offset(get_field(s, v2)), + vec_full_reg_offset(get_field(s, v3)), 16, 16); + if (get_field(s, m5) & 0x1) { + TCGv_i64 low = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 high = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i64(tcg_ctx, high, get_field(s, v1), 0, ES_64); + read_vec_element_i64(tcg_ctx, low, get_field(s, v1), 1, ES_64); + gen_op_update2_cc_i64(s, CC_OP_VC, low, high); + + tcg_temp_free_i64(tcg_ctx, low); + tcg_temp_free_i64(tcg_ctx, high); + } + return DISAS_NEXT; +} + +static void gen_clz_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a) +{ + tcg_gen_clzi_i32(tcg_ctx, d, a, 32); +} + +static void gen_clz_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a) +{ + tcg_gen_clzi_i64(tcg_ctx, d, a, 64); +} + +static DisasJumpType op_vclz(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m3); + static const GVecGen2 g[4] = { + { .fno = gen_helper_gvec_vclz8, }, + { .fno = gen_helper_gvec_vclz16, }, + { .fni4 = gen_clz_i32, }, + { .fni8 = gen_clz_i64, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_2(tcg_ctx, get_field(s, v1), get_field(s, v2), &g[es]); + return DISAS_NEXT; +} + +static void gen_ctz_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a) +{ + tcg_gen_ctzi_i32(tcg_ctx, d, a, 32); +} + +static void gen_ctz_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a) +{ + tcg_gen_ctzi_i64(tcg_ctx, d, a, 64); +} + +static DisasJumpType op_vctz(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m3); + static const GVecGen2 g[4] = { + { .fno = gen_helper_gvec_vctz8, }, + { .fno = gen_helper_gvec_vctz16, }, + { .fni4 = gen_ctz_i32, }, + { .fni8 = gen_ctz_i64, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_2(tcg_ctx, get_field(s, v1), get_field(s, v2), &g[es]); + return DISAS_NEXT; +} + +static DisasJumpType op_vx(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_3(tcg_ctx, xor, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + static const GVecGen3 g[4] = { + { .fno = gen_helper_gvec_vgfm8, }, + { .fno = gen_helper_gvec_vgfm16, }, + { .fno = gen_helper_gvec_vgfm32, }, + { .fno = gen_helper_gvec_vgfm64, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); + return DISAS_NEXT; +} + +static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m5); + static const GVecGen4 g[4] = { + { .fno = gen_helper_gvec_vgfma8, }, + { .fno = gen_helper_gvec_vgfma16, }, + { .fno = gen_helper_gvec_vgfma32, }, + { .fno = gen_helper_gvec_vgfma64, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_4(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), &g[es]); + return DISAS_NEXT; +} + +static DisasJumpType op_vlc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m3); + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_fn_2(tcg_ctx, neg, es, get_field(s, v1), get_field(s, v2)); + return DISAS_NEXT; +} + +static DisasJumpType op_vlp(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m3); + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_fn_2(tcg_ctx, abs, es, get_field(s, v1), get_field(s, v2)); + return DISAS_NEXT; +} + +static DisasJumpType op_vmx(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t v3 = get_field(s, v3); + const uint8_t es = get_field(s, m4); + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + switch (s->fields.op2) { + case 0xff: + gen_gvec_fn_3(tcg_ctx, smax, es, v1, v2, v3); + break; + case 0xfd: + gen_gvec_fn_3(tcg_ctx, umax, es, v1, v2, v3); + break; + case 0xfe: + gen_gvec_fn_3(tcg_ctx, smin, es, v1, v2, v3); + break; + case 0xfc: + gen_gvec_fn_3(tcg_ctx, umin, es, v1, v2, v3); + break; + default: + g_assert_not_reached(); + } + return DISAS_NEXT; +} + +static void gen_mal_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_mul_i32(tcg_ctx, t0, a, b); + tcg_gen_add_i32(tcg_ctx, d, t0, c); + + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void gen_mah_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i32_i64(tcg_ctx, t0, a); + tcg_gen_ext_i32_i64(tcg_ctx, t1, b); + tcg_gen_ext_i32_i64(tcg_ctx, t2, c); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + tcg_gen_add_i64(tcg_ctx, t0, t0, t2); + tcg_gen_extrh_i64_i32(tcg_ctx, d, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static void gen_malh_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, t0, a); + tcg_gen_extu_i32_i64(tcg_ctx, t1, b); + tcg_gen_extu_i32_i64(tcg_ctx, t2, c); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + tcg_gen_add_i64(tcg_ctx, t0, t0, t2); + tcg_gen_extrh_i64_i32(tcg_ctx, d, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static DisasJumpType op_vma(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m5); + static const GVecGen4 g_vmal[3] = { + { .fno = gen_helper_gvec_vmal8, }, + { .fno = gen_helper_gvec_vmal16, }, + { .fni4 = gen_mal_i32, }, + }; + static const GVecGen4 g_vmah[3] = { + { .fno = gen_helper_gvec_vmah8, }, + { .fno = gen_helper_gvec_vmah16, }, + { .fni4 = gen_mah_i32, }, + }; + static const GVecGen4 g_vmalh[3] = { + { .fno = gen_helper_gvec_vmalh8, }, + { .fno = gen_helper_gvec_vmalh16, }, + { .fni4 = gen_malh_i32, }, + }; + static const GVecGen4 g_vmae[3] = { + { .fno = gen_helper_gvec_vmae8, }, + { .fno = gen_helper_gvec_vmae16, }, + { .fno = gen_helper_gvec_vmae32, }, + }; + static const GVecGen4 g_vmale[3] = { + { .fno = gen_helper_gvec_vmale8, }, + { .fno = gen_helper_gvec_vmale16, }, + { .fno = gen_helper_gvec_vmale32, }, + }; + static const GVecGen4 g_vmao[3] = { + { .fno = gen_helper_gvec_vmao8, }, + { .fno = gen_helper_gvec_vmao16, }, + { .fno = gen_helper_gvec_vmao32, }, + }; + static const GVecGen4 g_vmalo[3] = { + { .fno = gen_helper_gvec_vmalo8, }, + { .fno = gen_helper_gvec_vmalo16, }, + { .fno = gen_helper_gvec_vmalo32, }, + }; + const GVecGen4 *fn; + + if (es > ES_32) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + switch (s->fields.op2) { + case 0xaa: + fn = &g_vmal[es]; + break; + case 0xab: + fn = &g_vmah[es]; + break; + case 0xa9: + fn = &g_vmalh[es]; + break; + case 0xae: + fn = &g_vmae[es]; + break; + case 0xac: + fn = &g_vmale[es]; + break; + case 0xaf: + fn = &g_vmao[es]; + break; + case 0xad: + fn = &g_vmalo[es]; + break; + default: + g_assert_not_reached(); + } + + gen_gvec_4(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), fn); + return DISAS_NEXT; +} + +static void gen_mh_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_muls2_i32(tcg_ctx, t, d, a, b); + tcg_temp_free_i32(tcg_ctx, t); +} + +static void gen_mlh_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_mulu2_i32(tcg_ctx, t, d, a, b); + tcg_temp_free_i32(tcg_ctx, t); +} + +static DisasJumpType op_vm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + static const GVecGen3 g_vmh[3] = { + { .fno = gen_helper_gvec_vmh8, }, + { .fno = gen_helper_gvec_vmh16, }, + { .fni4 = gen_mh_i32, }, + }; + static const GVecGen3 g_vmlh[3] = { + { .fno = gen_helper_gvec_vmlh8, }, + { .fno = gen_helper_gvec_vmlh16, }, + { .fni4 = gen_mlh_i32, }, + }; + static const GVecGen3 g_vme[3] = { + { .fno = gen_helper_gvec_vme8, }, + { .fno = gen_helper_gvec_vme16, }, + { .fno = gen_helper_gvec_vme32, }, + }; + static const GVecGen3 g_vmle[3] = { + { .fno = gen_helper_gvec_vmle8, }, + { .fno = gen_helper_gvec_vmle16, }, + { .fno = gen_helper_gvec_vmle32, }, + }; + static const GVecGen3 g_vmo[3] = { + { .fno = gen_helper_gvec_vmo8, }, + { .fno = gen_helper_gvec_vmo16, }, + { .fno = gen_helper_gvec_vmo32, }, + }; + static const GVecGen3 g_vmlo[3] = { + { .fno = gen_helper_gvec_vmlo8, }, + { .fno = gen_helper_gvec_vmlo16, }, + { .fno = gen_helper_gvec_vmlo32, }, + }; + const GVecGen3 *fn; + + if (es > ES_32) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + switch (s->fields.op2) { + case 0xa2: + gen_gvec_fn_3(tcg_ctx, mul, es, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); + return DISAS_NEXT; + case 0xa3: + fn = &g_vmh[es]; + break; + case 0xa1: + fn = &g_vmlh[es]; + break; + case 0xa6: + fn = &g_vme[es]; + break; + case 0xa4: + fn = &g_vmle[es]; + break; + case 0xa7: + fn = &g_vmo[es]; + break; + case 0xa5: + fn = &g_vmlo[es]; + break; + default: + g_assert_not_reached(); + } + + gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), fn); + return DISAS_NEXT; +} + +static DisasJumpType op_vnn(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_3(tcg_ctx, nand, ES_8, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); + return DISAS_NEXT; +} + +static DisasJumpType op_vno(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_3(tcg_ctx, nor, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static DisasJumpType op_vnx(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_3(tcg_ctx, eqv, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static DisasJumpType op_vo(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_3(tcg_ctx, or, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static DisasJumpType op_voc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_fn_3(tcg_ctx, orc, ES_8, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m3); + static const GVecGen2 g[4] = { + { .fno = gen_helper_gvec_vpopct8, }, + { .fno = gen_helper_gvec_vpopct16, }, + { .fni4 = tcg_gen_ctpop_i32, }, + { .fni8 = tcg_gen_ctpop_i64, }, + }; + + if (es > ES_64 || (es != ES_8 && !s390_has_feat(s->uc, S390_FEAT_VECTOR_ENH))) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_2(tcg_ctx, get_field(s, v1), get_field(s, v2), &g[es]); + return DISAS_NEXT; +} + +static void gen_rll_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_andi_i32(tcg_ctx, t0, b, 31); + tcg_gen_rotl_i32(tcg_ctx, d, a, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static void gen_rll_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_andi_i64(tcg_ctx, t0, b, 63); + tcg_gen_rotl_i64(tcg_ctx, d, a, t0); + tcg_temp_free_i64(tcg_ctx, t0); +} + +static DisasJumpType op_verllv(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + static const GVecGen3 g[4] = { + { .fno = gen_helper_gvec_verllv8, }, + { .fno = gen_helper_gvec_verllv16, }, + { .fni4 = gen_rll_i32, }, + { .fni8 = gen_rll_i64, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); + return DISAS_NEXT; +} + +static DisasJumpType op_verll(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + static const GVecGen2s g[4] = { + { .fno = gen_helper_gvec_verll8, }, + { .fno = gen_helper_gvec_verll16, }, + { .fni4 = gen_rll_i32, }, + { .fni8 = gen_rll_i64, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + gen_gvec_2s(tcg_ctx, get_field(s, v1), get_field(s, v3), o->addr1, + &g[es]); + return DISAS_NEXT; +} + +static void gen_rim_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c) +{ + TCGv_i32 t = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_rotli_i32(tcg_ctx, t, a, c & 31); + tcg_gen_and_i32(tcg_ctx, t, t, b); + tcg_gen_andc_i32(tcg_ctx, d, d, b); + tcg_gen_or_i32(tcg_ctx, d, d, t); + + tcg_temp_free_i32(tcg_ctx, t); +} + +static void gen_rim_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) +{ + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_rotli_i64(tcg_ctx, t, a, c & 63); + tcg_gen_and_i64(tcg_ctx, t, t, b); + tcg_gen_andc_i64(tcg_ctx, d, d, b); + tcg_gen_or_i64(tcg_ctx, d, d, t); + + tcg_temp_free_i64(tcg_ctx, t); +} + +static DisasJumpType op_verim(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m5); + const uint8_t i4 = get_field(s, i4) & + (NUM_VEC_ELEMENT_BITS(es) - 1); + static const GVecGen3i g[4] = { + { .fno = gen_helper_gvec_verim8, }, + { .fno = gen_helper_gvec_verim16, }, + { .fni4 = gen_rim_i32, + .load_dest = true, }, + { .fni8 = gen_rim_i64, + .load_dest = true, }, + }; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec_3i(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), i4, &g[es]); + return DISAS_NEXT; +} + +static DisasJumpType op_vesv(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t v3 = get_field(s, v3); + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + switch (s->fields.op2) { + case 0x70: + gen_gvec_fn_3(tcg_ctx, shlv, es, v1, v2, v3); + break; + case 0x7a: + gen_gvec_fn_3(tcg_ctx, sarv, es, v1, v2, v3); + break; + case 0x78: + gen_gvec_fn_3(tcg_ctx, shrv, es, v1, v2, v3); + break; + default: + g_assert_not_reached(); + } + return DISAS_NEXT; +} + +static DisasJumpType op_ves(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + const uint8_t d2 = get_field(s, d2) & + (NUM_VEC_ELEMENT_BITS(es) - 1); + const uint8_t v1 = get_field(s, v1); + const uint8_t v3 = get_field(s, v3); + TCGv_i32 shift; + + if (es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (likely(!get_field(s, b2))) { + switch (s->fields.op2) { + case 0x30: + gen_gvec_fn_2i(tcg_ctx, shli, es, v1, v3, d2); + break; + case 0x3a: + gen_gvec_fn_2i(tcg_ctx, sari, es, v1, v3, d2); + break; + case 0x38: + gen_gvec_fn_2i(tcg_ctx, shri, es, v1, v3, d2); + break; + default: + g_assert_not_reached(); + } + } else { + shift = tcg_temp_new_i32(tcg_ctx); + tcg_gen_extrl_i64_i32(tcg_ctx, shift, o->addr1); + tcg_gen_andi_i32(tcg_ctx, shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1); + switch (s->fields.op2) { + case 0x30: + gen_gvec_fn_2s(tcg_ctx, shls, es, v1, v3, shift); + break; + case 0x3a: + gen_gvec_fn_2s(tcg_ctx, sars, es, v1, v3, shift); + break; + case 0x38: + gen_gvec_fn_2s(tcg_ctx, shrs, es, v1, v3, shift); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free_i32(tcg_ctx, shift); + } + return DISAS_NEXT; +} + +static DisasJumpType op_vsl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 shift = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i64(tcg_ctx, shift, get_field(s, v3), 7, ES_8); + if (s->fields.op2 == 0x74) { + tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x7); + } else { + tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x78); + } + + gen_gvec_2i_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + shift, 0, gen_helper_gvec_vsl); + tcg_temp_free_i64(tcg_ctx, shift); + return DISAS_NEXT; +} + +static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t i4 = get_field(s, i4) & 0xf; + const int left_shift = (i4 & 7) * 8; + const int right_shift = 64 - left_shift; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + if ((i4 & 8) == 0) { + read_vec_element_i64(tcg_ctx, t0, get_field(s, v2), 0, ES_64); + read_vec_element_i64(tcg_ctx, t1, get_field(s, v2), 1, ES_64); + read_vec_element_i64(tcg_ctx, t2, get_field(s, v3), 0, ES_64); + } else { + read_vec_element_i64(tcg_ctx, t0, get_field(s, v2), 1, ES_64); + read_vec_element_i64(tcg_ctx, t1, get_field(s, v3), 0, ES_64); + read_vec_element_i64(tcg_ctx, t2, get_field(s, v3), 1, ES_64); + } + tcg_gen_extract2_i64(tcg_ctx, t0, t1, t0, right_shift); + tcg_gen_extract2_i64(tcg_ctx, t1, t2, t1, right_shift); + write_vec_element_i64(tcg_ctx, t0, get_field(s, v1), 0, ES_64); + write_vec_element_i64(tcg_ctx, t1, get_field(s, v1), 1, ES_64); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + return DISAS_NEXT; +} + +static DisasJumpType op_vsra(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 shift = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i64(tcg_ctx, shift, get_field(s, v3), 7, ES_8); + if (s->fields.op2 == 0x7e) { + tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x7); + } else { + tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x78); + } + + gen_gvec_2i_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + shift, 0, gen_helper_gvec_vsra); + tcg_temp_free_i64(tcg_ctx, shift); + return DISAS_NEXT; +} + +static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 shift = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i64(tcg_ctx, shift, get_field(s, v3), 7, ES_8); + if (s->fields.op2 == 0x7c) { + tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x7); + } else { + tcg_gen_andi_i64(tcg_ctx, shift, shift, 0x78); + } + + gen_gvec_2i_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + shift, 0, gen_helper_gvec_vsrl); + tcg_temp_free_i64(tcg_ctx, shift); + return DISAS_NEXT; +} + +static DisasJumpType op_vs(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + + if (es > ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } else if (es == ES_128) { + gen_gvec128_3_i64(tcg_ctx, tcg_gen_sub2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); + return DISAS_NEXT; + } + gen_gvec_fn_3(tcg_ctx, sub, es, get_field(s, v1), get_field(s, v2), + get_field(s, v3)); + return DISAS_NEXT; +} + +static void gen_scbi_i32(TCGContext *tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, d, a, b); +} + +static void gen_scbi_i64(TCGContext *tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_GEU, d, a, b); +} + +static void gen_scbi2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) +{ + TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 zero = tcg_const_i64(tcg_ctx, 0); + + tcg_gen_sub2_i64(tcg_ctx, tl, th, al, zero, bl, zero); + tcg_gen_andi_i64(tcg_ctx, th, th, 1); + tcg_gen_sub2_i64(tcg_ctx, tl, th, ah, zero, th, zero); + tcg_gen_sub2_i64(tcg_ctx, tl, th, tl, th, bh, zero); + /* "invert" the result: -1 -> 0; 0 -> 1 */ + tcg_gen_addi_i64(tcg_ctx, dl, th, 1); + tcg_gen_mov_i64(tcg_ctx, dh, zero); + + tcg_temp_free_i64(tcg_ctx, th); + tcg_temp_free_i64(tcg_ctx, tl); + tcg_temp_free_i64(tcg_ctx, zero); +} + +static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + static const GVecGen3 g[4] = { + { .fno = gen_helper_gvec_vscbi8, }, + { .fno = gen_helper_gvec_vscbi16, }, + { .fni4 = gen_scbi_i32, }, + { .fni8 = gen_scbi_i64, }, + }; + + if (es > ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } else if (es == ES_128) { + gen_gvec128_3_i64(tcg_ctx, gen_scbi2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3)); + return DISAS_NEXT; + } + gen_gvec_3(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), &g[es]); + return DISAS_NEXT; +} + +static void gen_sbi2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, + TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) +{ + TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_not_i64(tcg_ctx, tl, bl); + tcg_gen_not_i64(tcg_ctx, th, bh); + gen_ac2_i64(tcg_ctx, dl, dh, al, ah, tl, th, cl, ch); + tcg_temp_free_i64(tcg_ctx, tl); + tcg_temp_free_i64(tcg_ctx, th); +} + +static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (get_field(s, m5) != ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec128_4_i64(tcg_ctx, gen_sbi2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3), + get_field(s, v4)); + return DISAS_NEXT; +} + +static void gen_sbcbi2_i64(TCGContext *tcg_ctx, TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, + TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) +{ + TCGv_i64 th = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tl = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_not_i64(tcg_ctx, tl, bl); + tcg_gen_not_i64(tcg_ctx, th, bh); + gen_accc2_i64(tcg_ctx, dl, dh, al, ah, tl, th, cl, ch); + + tcg_temp_free_i64(tcg_ctx, tl); + tcg_temp_free_i64(tcg_ctx, th); +} + +static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (get_field(s, m5) != ES_128) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + gen_gvec128_4_i64(tcg_ctx, gen_sbcbi2_i64, get_field(s, v1), + get_field(s, v2), get_field(s, v3), + get_field(s, v4)); + return DISAS_NEXT; +} + +static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + TCGv_i64 sum, tmp; + uint8_t dst_idx; + + if (es == ES_8 || es > ES_32) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + sum = tcg_temp_new_i64(tcg_ctx); + tmp = tcg_temp_new_i64(tcg_ctx); + for (dst_idx = 0; dst_idx < 2; dst_idx++) { + uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2; + const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1; + + read_vec_element_i64(tcg_ctx, sum, get_field(s, v3), max_idx, es); + for (; idx <= max_idx; idx++) { + read_vec_element_i64(tcg_ctx, tmp, get_field(s, v2), idx, es); + tcg_gen_add_i64(tcg_ctx, sum, sum, tmp); + } + write_vec_element_i64(tcg_ctx, sum, get_field(s, v1), dst_idx, ES_64); + } + tcg_temp_free_i64(tcg_ctx, sum); + tcg_temp_free_i64(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1; + TCGv_i64 sumh, suml, zero, tmpl; + uint8_t idx; + + if (es < ES_32 || es > ES_64) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + sumh = tcg_const_i64(tcg_ctx, 0); + suml = tcg_temp_new_i64(tcg_ctx); + zero = tcg_const_i64(tcg_ctx, 0); + tmpl = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i64(tcg_ctx, suml, get_field(s, v3), max_idx, es); + for (idx = 0; idx <= max_idx; idx++) { + read_vec_element_i64(tcg_ctx, tmpl, get_field(s, v2), idx, es); + tcg_gen_add2_i64(tcg_ctx, suml, sumh, suml, sumh, tmpl, zero); + } + write_vec_element_i64(tcg_ctx, sumh, get_field(s, v1), 0, ES_64); + write_vec_element_i64(tcg_ctx, suml, get_field(s, v1), 1, ES_64); + + tcg_temp_free_i64(tcg_ctx, sumh); + tcg_temp_free_i64(tcg_ctx, suml); + tcg_temp_free_i64(tcg_ctx, zero); + tcg_temp_free_i64(tcg_ctx, tmpl); + return DISAS_NEXT; +} + +static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + TCGv_i32 sum, tmp; + uint8_t dst_idx; + + if (es > ES_16) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + sum = tcg_temp_new_i32(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + for (dst_idx = 0; dst_idx < 4; dst_idx++) { + uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4; + const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1; + + read_vec_element_i32(tcg_ctx, sum, get_field(s, v3), max_idx, es); + for (; idx <= max_idx; idx++) { + read_vec_element_i32(tcg_ctx, tmp, get_field(s, v2), idx, es); + tcg_gen_add_i32(tcg_ctx, sum, sum, tmp); + } + write_vec_element_i32(tcg_ctx, sum, get_field(s, v1), dst_idx, ES_32); + } + tcg_temp_free_i32(tcg_ctx, sum); + tcg_temp_free_i32(tcg_ctx, tmp); + return DISAS_NEXT; +} + +static DisasJumpType op_vtm(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + tcg_ctx->cpu_env, 0, gen_helper_gvec_vtm); + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_vfae(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + static gen_helper_gvec_3 * const g[3] = { + gen_helper_gvec_vfae8, + gen_helper_gvec_vfae16, + gen_helper_gvec_vfae32, + }; + static gen_helper_gvec_3_ptr * const g_cc[3] = { + gen_helper_gvec_vfae_cc8, + gen_helper_gvec_vfae_cc16, + gen_helper_gvec_vfae_cc32, + }; + if (es > ES_32) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m5, 0, 1)) { + gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), tcg_ctx->cpu_env, m5, g_cc[es]); + set_cc_static(s); + } else { + gen_gvec_3_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), m5, g[es]); + } + return DISAS_NEXT; +} + +static DisasJumpType op_vfee(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + static gen_helper_gvec_3 * const g[3] = { + gen_helper_gvec_vfee8, + gen_helper_gvec_vfee16, + gen_helper_gvec_vfee32, + }; + static gen_helper_gvec_3_ptr * const g_cc[3] = { + gen_helper_gvec_vfee_cc8, + gen_helper_gvec_vfee_cc16, + gen_helper_gvec_vfee_cc32, + }; + + if (es > ES_32 || m5 & ~0x3) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m5, 0, 1)) { + gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), tcg_ctx->cpu_env, m5, g_cc[es]); + set_cc_static(s); + } else { + gen_gvec_3_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), m5, g[es]); + } + return DISAS_NEXT; +} + +static DisasJumpType op_vfene(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + static gen_helper_gvec_3 * const g[3] = { + gen_helper_gvec_vfene8, + gen_helper_gvec_vfene16, + gen_helper_gvec_vfene32, + }; + static gen_helper_gvec_3_ptr * const g_cc[3] = { + gen_helper_gvec_vfene_cc8, + gen_helper_gvec_vfene_cc16, + gen_helper_gvec_vfene_cc32, + }; + + if (es > ES_32 || m5 & ~0x3) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m5, 0, 1)) { + gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), tcg_ctx->cpu_env, m5, g_cc[es]); + set_cc_static(s); + } else { + gen_gvec_3_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), m5, g[es]); + } + return DISAS_NEXT; +} + +static DisasJumpType op_vistr(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + static gen_helper_gvec_2 * const g[3] = { + gen_helper_gvec_vistr8, + gen_helper_gvec_vistr16, + gen_helper_gvec_vistr32, + }; + static gen_helper_gvec_2_ptr * const g_cc[3] = { + gen_helper_gvec_vistr_cc8, + gen_helper_gvec_vistr_cc16, + gen_helper_gvec_vistr_cc32, + }; + + if (es > ES_32 || m5 & ~0x1) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m5, 0, 1)) { + gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + tcg_ctx->cpu_env, 0, g_cc[es]); + set_cc_static(s); + } else { + gen_gvec_2_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), 0, + g[es]); + } + return DISAS_NEXT; +} + +static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t es = get_field(s, m5); + const uint8_t m6 = get_field(s, m6); + static gen_helper_gvec_4 * const g[3] = { + gen_helper_gvec_vstrc8, + gen_helper_gvec_vstrc16, + gen_helper_gvec_vstrc32, + }; + static gen_helper_gvec_4 * const g_rt[3] = { + gen_helper_gvec_vstrc_rt8, + gen_helper_gvec_vstrc_rt16, + gen_helper_gvec_vstrc_rt32, + }; + static gen_helper_gvec_4_ptr * const g_cc[3] = { + gen_helper_gvec_vstrc_cc8, + gen_helper_gvec_vstrc_cc16, + gen_helper_gvec_vstrc_cc32, + }; + static gen_helper_gvec_4_ptr * const g_cc_rt[3] = { + gen_helper_gvec_vstrc_cc_rt8, + gen_helper_gvec_vstrc_cc_rt16, + gen_helper_gvec_vstrc_cc_rt32, + }; + + if (es > ES_32) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m6, 0, 1)) { + if (extract32(m6, 2, 1)) { + gen_gvec_4_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), + tcg_ctx->cpu_env, m6, g_cc_rt[es]); + } else { + gen_gvec_4_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), + tcg_ctx->cpu_env, m6, g_cc[es]); + } + set_cc_static(s); + } else { + if (extract32(m6, 2, 1)) { + gen_gvec_4_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), + m6, g_rt[es]); + } else { + gen_gvec_4_ool(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), + m6, g[es]); + } + } + return DISAS_NEXT; +} + +static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t fpf = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + const bool se = extract32(m5, 3, 1); + gen_helper_gvec_3_ptr *fn; + + if (fpf != FPF_LONG || extract32(m5, 0, 3)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + switch (s->fields.op2) { + case 0xe3: + fn = se ? gen_helper_gvec_vfa64s : gen_helper_gvec_vfa64; + break; + case 0xe5: + fn = se ? gen_helper_gvec_vfd64s : gen_helper_gvec_vfd64; + break; + case 0xe7: + fn = se ? gen_helper_gvec_vfm64s : gen_helper_gvec_vfm64; + break; + case 0xe2: + fn = se ? gen_helper_gvec_vfs64s : gen_helper_gvec_vfs64; + break; + default: + g_assert_not_reached(); + } + gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), tcg_ctx->cpu_env, 0, fn); + return DISAS_NEXT; +} + +static DisasJumpType op_wfc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); + + if (fpf != FPF_LONG || m4) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (s->fields.op2 == 0xcb) { + gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + tcg_ctx->cpu_env, 0, gen_helper_gvec_wfc64); + } else { + gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + tcg_ctx->cpu_env, 0, gen_helper_gvec_wfk64); + } + set_cc_static(s); + return DISAS_NEXT; +} + +static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t fpf = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + const uint8_t m6 = get_field(s, m6); + const bool se = extract32(m5, 3, 1); + const bool cs = extract32(m6, 0, 1); + gen_helper_gvec_3_ptr *fn; + + if (fpf != FPF_LONG || extract32(m5, 0, 3) || extract32(m6, 1, 3)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (cs) { + switch (s->fields.op2) { + case 0xe8: + fn = se ? gen_helper_gvec_vfce64s_cc : gen_helper_gvec_vfce64_cc; + break; + case 0xeb: + fn = se ? gen_helper_gvec_vfch64s_cc : gen_helper_gvec_vfch64_cc; + break; + case 0xea: + fn = se ? gen_helper_gvec_vfche64s_cc : gen_helper_gvec_vfche64_cc; + break; + default: + g_assert_not_reached(); + } + } else { + switch (s->fields.op2) { + case 0xe8: + fn = se ? gen_helper_gvec_vfce64s : gen_helper_gvec_vfce64; + break; + case 0xeb: + fn = se ? gen_helper_gvec_vfch64s : gen_helper_gvec_vfch64; + break; + case 0xea: + fn = se ? gen_helper_gvec_vfche64s : gen_helper_gvec_vfche64; + break; + default: + g_assert_not_reached(); + } + } + gen_gvec_3_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), tcg_ctx->cpu_env, 0, fn); + if (cs) { + set_cc_static(s); + } + return DISAS_NEXT; +} + +static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); + const uint8_t erm = get_field(s, m5); + const bool se = extract32(m4, 3, 1); + gen_helper_gvec_2_ptr *fn; + + if (fpf != FPF_LONG || extract32(m4, 0, 2) || erm > 7 || erm == 2) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + switch (s->fields.op2) { + case 0xc3: + fn = se ? gen_helper_gvec_vcdg64s : gen_helper_gvec_vcdg64; + break; + case 0xc1: + fn = se ? gen_helper_gvec_vcdlg64s : gen_helper_gvec_vcdlg64; + break; + case 0xc2: + fn = se ? gen_helper_gvec_vcgd64s : gen_helper_gvec_vcgd64; + break; + case 0xc0: + fn = se ? gen_helper_gvec_vclgd64s : gen_helper_gvec_vclgd64; + break; + case 0xc7: + fn = se ? gen_helper_gvec_vfi64s : gen_helper_gvec_vfi64; + break; + case 0xc5: + fn = se ? gen_helper_gvec_vflr64s : gen_helper_gvec_vflr64; + break; + default: + g_assert_not_reached(); + } + gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, + deposit32(m4, 4, 4, erm), fn); + return DISAS_NEXT; +} + +static DisasJumpType op_vfll(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); + gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfll32; + + if (fpf != FPF_SHORT || extract32(m4, 0, 3)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m4, 3, 1)) { + fn = gen_helper_gvec_vfll32s; + } + gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, + 0, fn); + return DISAS_NEXT; +} + +static DisasJumpType op_vfma(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t m5 = get_field(s, m5); + const uint8_t fpf = get_field(s, m6); + const bool se = extract32(m5, 3, 1); + gen_helper_gvec_4_ptr *fn; + + if (fpf != FPF_LONG || extract32(m5, 0, 3)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (s->fields.op2 == 0x8f) { + fn = se ? gen_helper_gvec_vfma64s : gen_helper_gvec_vfma64; + } else { + fn = se ? gen_helper_gvec_vfms64s : gen_helper_gvec_vfms64; + } + gen_gvec_4_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), + get_field(s, v3), get_field(s, v4), tcg_ctx->cpu_env, + 0, fn); + return DISAS_NEXT; +} + +static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t v1 = get_field(s, v1); + const uint8_t v2 = get_field(s, v2); + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + TCGv_i64 tmp; + + if (fpf != FPF_LONG || extract32(m4, 0, 3) || m5 > 2) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m4, 3, 1)) { + tmp = tcg_temp_new_i64(tcg_ctx); + read_vec_element_i64(tcg_ctx, tmp, v2, 0, ES_64); + switch (m5) { + case 0: + /* sign bit is inverted (complement) */ + tcg_gen_xori_i64(tcg_ctx, tmp, tmp, 1ull << 63); + break; + case 1: + /* sign bit is set to one (negative) */ + tcg_gen_ori_i64(tcg_ctx, tmp, tmp, 1ull << 63); + break; + case 2: + /* sign bit is set to zero (positive) */ + tcg_gen_andi_i64(tcg_ctx, tmp, tmp, (1ull << 63) - 1); + break; + } + write_vec_element_i64(tcg_ctx, tmp, v1, 0, ES_64); + tcg_temp_free_i64(tcg_ctx, tmp); + } else { + switch (m5) { + case 0: + /* sign bit is inverted (complement) */ + gen_gvec_fn_2i(tcg_ctx, xori, ES_64, v1, v2, 1ull << 63); + break; + case 1: + /* sign bit is set to one (negative) */ + gen_gvec_fn_2i(tcg_ctx, ori, ES_64, v1, v2, 1ull << 63); + break; + case 2: + /* sign bit is set to zero (positive) */ + gen_gvec_fn_2i(tcg_ctx, andi, ES_64, v1, v2, (1ull << 63) - 1); + break; + } + } + return DISAS_NEXT; +} + +static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint8_t fpf = get_field(s, m3); + const uint8_t m4 = get_field(s, m4); + gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfsq64; + + if (fpf != FPF_LONG || extract32(m4, 0, 3)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m4, 3, 1)) { + fn = gen_helper_gvec_vfsq64s; + } + gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, + 0, fn); + return DISAS_NEXT; +} + +static DisasJumpType op_vftci(DisasContext *s, DisasOps *o) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const uint16_t i3 = get_field(s, i3); + const uint8_t fpf = get_field(s, m4); + const uint8_t m5 = get_field(s, m5); + gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vftci64; + + if (fpf != FPF_LONG || extract32(m5, 0, 3)) { + gen_program_exception(s, PGM_SPECIFICATION); + return DISAS_NORETURN; + } + + if (extract32(m5, 3, 1)) { + fn = gen_helper_gvec_vftci64s; + } + gen_gvec_2_ptr(tcg_ctx, get_field(s, v1), get_field(s, v2), tcg_ctx->cpu_env, i3, fn); + set_cc_static(s); + return DISAS_NEXT; +} diff --git a/qemu/target/s390x/unicorn.c b/qemu/target/s390x/unicorn.c new file mode 100644 index 00000000..d5eb4689 --- /dev/null +++ b/qemu/target/s390x/unicorn.c @@ -0,0 +1,173 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015-2021 */ + +#include "sysemu/cpus.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" +#include "unicorn.h" + +S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model); + +static void s390_set_pc(struct uc_struct *uc, uint64_t address) +{ + // ((CPUS390XState *)uc->cpu->env_ptr)->pc = address; +} + +static void s390_release(void* ctx) +{ +#if 0 + int i; + TCGContext *tcg_ctx = (TCGContext *)ctx; + S390XCPU *cpu = (S390XCPU *)tcg_ctx->uc->cpu; + CPUTLBDesc *d = cpu->neg.tlb.d; + CPUTLBDescFast *f = cpu->neg.tlb.f; + CPUTLBDesc *desc; + CPUTLBDescFast *fast; + + release_common(ctx); + for (i = 0; i < NB_MMU_MODES; i++) { + desc = &(d[i]); + fast = &(f[i]); + g_free(desc->iotlb); + g_free(fast->table); + } +#endif +} + +void s390_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + memset(env->regs, 0, sizeof(env->regs)); + memset(env->aregs, 0, sizeof(env->aregs)); + + env->psw.addr = 0; +} + +static void reg_read(CPUS390XState *env, unsigned int regid, void *value) +{ + if (regid >= UC_S390X_REG_R0 && regid <= UC_S390X_REG_R15) { + *(uint64_t *)value = env->regs[regid - UC_S390X_REG_R0]; + return; + } + + if (regid >= UC_S390X_REG_A0 && regid <= UC_S390X_REG_A15) { + *(uint32_t *)value = env->regs[regid - UC_S390X_REG_A0]; + return; + } + + switch(regid) { + default: break; + case UC_S390X_REG_PC: + *(uint64_t *)value = env->psw.addr; + break; + } +} + +static void reg_write(CPUS390XState *env, unsigned int regid, const void *value) +{ + if (regid >= UC_S390X_REG_R0 && regid <= UC_S390X_REG_R15) { + env->regs[regid - UC_S390X_REG_R0] = *(uint64_t *)value; + return; + } + + if (regid >= UC_S390X_REG_A0 && regid <= UC_S390X_REG_A15) { + env->regs[regid - UC_S390X_REG_A0] = *(uint32_t *)value; + return; + } + + switch(regid) { + default: break; + case UC_S390X_REG_PC: + env->psw.addr = *(uint64_t *)value; + break; + } +} + +static int s390_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUS390XState* env = &(S390_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +static int s390_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUS390XState* env = &(S390_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + if (regid == UC_S390X_REG_PC){ + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + } + } + + return 0; +} + +DEFAULT_VISIBILITY +int s390_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count) +{ + CPUS390XState* env = (CPUS390XState* )ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +DEFAULT_VISIBILITY +int s390_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count) +{ + CPUS390XState* env = (CPUS390XState* )ctx->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static int s390_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + S390CPU *cpu; + + cpu = cpu_s390_init(uc, cpu_model); + if (cpu == NULL) { + return -1; + } + return 0; +} + +DEFAULT_VISIBILITY +void s390_uc_init(struct uc_struct* uc) +{ + uc->release = s390_release; + uc->reg_read = s390_reg_read; + uc->reg_write = s390_reg_write; + uc->reg_reset = s390_reg_reset; + uc->set_pc = s390_set_pc; + uc->cpus_init = s390_cpus_init; + uc->cpu_context_size = offsetof(CPUS390XState, end_reset_fields); + uc_common_init(uc); +} diff --git a/qemu/target/s390x/unicorn.h b/qemu/target/s390x/unicorn.h new file mode 100644 index 00000000..44858e67 --- /dev/null +++ b/qemu/target/s390x/unicorn.h @@ -0,0 +1,16 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015-2021 */ + +#ifndef UC_QEMU_TARGET_S390X_H +#define UC_QEMU_TARGET_S390X_H + +// functions to read & write registers +//int s390_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +//int s390_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); +int s390_context_reg_read(struct uc_context *ctx, unsigned int *regs, void **vals, int count); +int s390_context_reg_write(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count); + +void s390_reg_reset(struct uc_struct *uc); + +void s390_uc_init(struct uc_struct* uc); +#endif diff --git a/qemu/target/s390x/vec.h b/qemu/target/s390x/vec.h new file mode 100644 index 00000000..a6e36186 --- /dev/null +++ b/qemu/target/s390x/vec.h @@ -0,0 +1,141 @@ +/* + * QEMU TCG support -- s390x vector utilitites + * + * Copyright (C) 2019 Red Hat Inc + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef S390X_VEC_H +#define S390X_VEC_H + +#include "tcg/tcg.h" + +typedef union S390Vector { + uint64_t doubleword[2]; + uint32_t word[4]; + uint16_t halfword[8]; + uint8_t byte[16]; +} S390Vector; + +/* + * Each vector is stored as two 64bit host values. So when talking about + * byte/halfword/word numbers, we have to take care of proper translation + * between element numbers. + * + * Big Endian (target/possible host) + * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] + * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] + * W: [ 0][ 1] - [ 2][ 3] + * DW: [ 0] - [ 1] + * + * Little Endian (possible host) + * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] + * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] + * W: [ 1][ 0] - [ 3][ 2] + * DW: [ 0] - [ 1] + */ +#ifndef HOST_WORDS_BIGENDIAN +#define H1(x) ((x) ^ 7) +#define H2(x) ((x) ^ 3) +#define H4(x) ((x) ^ 1) +#else +#define H1(x) (x) +#define H2(x) (x) +#define H4(x) (x) +#endif + +static inline uint8_t s390_vec_read_element8(const S390Vector *v, uint8_t enr) +{ + g_assert(enr < 16); + return v->byte[H1(enr)]; +} + +static inline uint16_t s390_vec_read_element16(const S390Vector *v, uint8_t enr) +{ + g_assert(enr < 8); + return v->halfword[H2(enr)]; +} + +static inline uint32_t s390_vec_read_element32(const S390Vector *v, uint8_t enr) +{ + g_assert(enr < 4); + return v->word[H4(enr)]; +} + +static inline uint64_t s390_vec_read_element64(const S390Vector *v, uint8_t enr) +{ + g_assert(enr < 2); + return v->doubleword[enr]; +} + +static inline uint64_t s390_vec_read_element(const S390Vector *v, uint8_t enr, + uint8_t es) +{ + switch (es) { + case MO_8: + return s390_vec_read_element8(v, enr); + case MO_16: + return s390_vec_read_element16(v, enr); + case MO_32: + return s390_vec_read_element32(v, enr); + case MO_64: + return s390_vec_read_element64(v, enr); + default: + g_assert_not_reached(); + } +} + +static inline void s390_vec_write_element8(S390Vector *v, uint8_t enr, + uint8_t data) +{ + g_assert(enr < 16); + v->byte[H1(enr)] = data; +} + +static inline void s390_vec_write_element16(S390Vector *v, uint8_t enr, + uint16_t data) +{ + g_assert(enr < 8); + v->halfword[H2(enr)] = data; +} + +static inline void s390_vec_write_element32(S390Vector *v, uint8_t enr, + uint32_t data) +{ + g_assert(enr < 4); + v->word[H4(enr)] = data; +} + +static inline void s390_vec_write_element64(S390Vector *v, uint8_t enr, + uint64_t data) +{ + g_assert(enr < 2); + v->doubleword[enr] = data; +} + +static inline void s390_vec_write_element(S390Vector *v, uint8_t enr, + uint8_t es, uint64_t data) +{ + switch (es) { + case MO_8: + s390_vec_write_element8(v, enr, data); + break; + case MO_16: + s390_vec_write_element16(v, enr, data); + break; + case MO_32: + s390_vec_write_element32(v, enr, data); + break; + case MO_64: + s390_vec_write_element64(v, enr, data); + break; + default: + g_assert_not_reached(); + } +} + +#endif /* S390X_VEC_H */ diff --git a/qemu/target/s390x/vec_fpu_helper.c b/qemu/target/s390x/vec_fpu_helper.c new file mode 100644 index 00000000..a48bd704 --- /dev/null +++ b/qemu/target/s390x/vec_fpu_helper.c @@ -0,0 +1,625 @@ +/* + * QEMU TCG support -- s390x vector floating point instruction support + * + * Copyright (C) 2019 Red Hat Inc + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "internal.h" +#include "vec.h" +#include "tcg_s390x.h" +#include "tcg/tcg-gvec-desc.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +#define VIC_INVALID 0x1 +#define VIC_DIVBYZERO 0x2 +#define VIC_OVERFLOW 0x3 +#define VIC_UNDERFLOW 0x4 +#define VIC_INEXACT 0x5 + +/* returns the VEX. If the VEX is 0, there is no trap */ +static uint8_t check_ieee_exc(CPUS390XState *env, uint8_t enr, bool XxC, + uint8_t *vec_exc) +{ + uint8_t vece_exc = 0, trap_exc; + unsigned qemu_exc; + + /* Retrieve and clear the softfloat exceptions */ + qemu_exc = env->fpu_status.float_exception_flags; + if (qemu_exc == 0) { + return 0; + } + env->fpu_status.float_exception_flags = 0; + + vece_exc = s390_softfloat_exc_to_ieee(qemu_exc); + + /* Add them to the vector-wide s390x exception bits */ + *vec_exc |= vece_exc; + + /* Check for traps and construct the VXC */ + trap_exc = vece_exc & env->fpc >> 24; + if (trap_exc) { + if (trap_exc & S390_IEEE_MASK_INVALID) { + return enr << 4 | VIC_INVALID; + } else if (trap_exc & S390_IEEE_MASK_DIVBYZERO) { + return enr << 4 | VIC_DIVBYZERO; + } else if (trap_exc & S390_IEEE_MASK_OVERFLOW) { + return enr << 4 | VIC_OVERFLOW; + } else if (trap_exc & S390_IEEE_MASK_UNDERFLOW) { + return enr << 4 | VIC_UNDERFLOW; + } else if (!XxC) { + g_assert(trap_exc & S390_IEEE_MASK_INEXACT); + /* inexact has lowest priority on traps */ + return enr << 4 | VIC_INEXACT; + } + } + return 0; +} + +static void handle_ieee_exc(CPUS390XState *env, uint8_t vxc, uint8_t vec_exc, + uintptr_t retaddr) +{ + if (vxc) { + /* on traps, the fpc flags are not updated, instruction is suppressed */ + tcg_s390_vector_exception(env, vxc, retaddr); + } + if (vec_exc) { + /* indicate exceptions for all elements combined */ + env->fpc |= vec_exc << 16; + } +} + +typedef uint64_t (*vop64_2_fn)(uint64_t a, float_status *s); +static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, + bool s, bool XxC, uint8_t erm, vop64_2_fn fn, + uintptr_t retaddr) +{ + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i, old_mode; + + old_mode = s390_swap_bfp_rounding_mode(env, erm); + for (i = 0; i < 2; i++) { + const uint64_t a = s390_vec_read_element64(v2, i); + + s390_vec_write_element64(&tmp, i, fn(a, &env->fpu_status)); + vxc = check_ieee_exc(env, i, XxC, &vec_exc); + if (s || vxc) { + break; + } + } + s390_restore_bfp_rounding_mode(env, old_mode); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; +} + +typedef uint64_t (*vop64_3_fn)(uint64_t a, uint64_t b, float_status *s); +static void vop64_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + CPUS390XState *env, bool s, vop64_3_fn fn, + uintptr_t retaddr) +{ + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i; + + for (i = 0; i < 2; i++) { + const uint64_t a = s390_vec_read_element64(v2, i); + const uint64_t b = s390_vec_read_element64(v3, i); + + s390_vec_write_element64(&tmp, i, fn(a, b, &env->fpu_status)); + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (s || vxc) { + break; + } + } + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; +} + +static uint64_t vfa64(uint64_t a, uint64_t b, float_status *s) +{ + return float64_add(a, b, s); +} + +void HELPER(gvec_vfa64)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vop64_3(v1, v2, v3, env, false, vfa64, GETPC()); +} + +void HELPER(gvec_vfa64s)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vop64_3(v1, v2, v3, env, true, vfa64, GETPC()); +} + +static int wfc64(const S390Vector *v1, const S390Vector *v2, + CPUS390XState *env, bool signal, uintptr_t retaddr) +{ + /* only the zero-indexed elements are compared */ + const float64 a = s390_vec_read_element64(v1, 0); + const float64 b = s390_vec_read_element64(v2, 0); + uint8_t vxc, vec_exc = 0; + int cmp; + + if (signal) { + cmp = float64_compare(a, b, &env->fpu_status); + } else { + cmp = float64_compare_quiet(a, b, &env->fpu_status); + } + vxc = check_ieee_exc(env, 0, false, &vec_exc); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + + return float_comp_to_cc(env, cmp); +} + +void HELPER(gvec_wfc64)(const void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + env->cc_op = wfc64(v1, v2, env, false, GETPC()); +} + +void HELPER(gvec_wfk64)(const void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + env->cc_op = wfc64(v1, v2, env, true, GETPC()); +} + +typedef int (*vfc64_fn)(float64 a, float64 b, float_status *status); +static int vfc64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + CPUS390XState *env, bool s, vfc64_fn fn, uintptr_t retaddr) +{ + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int match = 0; + int i; + + for (i = 0; i < 2; i++) { + const float64 a = s390_vec_read_element64(v2, i); + const float64 b = s390_vec_read_element64(v3, i); + + /* swap the order of the parameters, so we can use existing functions */ + if (fn(b, a, &env->fpu_status)) { + match++; + s390_vec_write_element64(&tmp, i, -1ull); + } + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (s || vxc) { + break; + } + } + + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; + if (match) { + return s || match == 2 ? 0 : 1; + } + return 3; +} + +void HELPER(gvec_vfce64)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vfc64(v1, v2, v3, env, false, float64_eq_quiet, GETPC()); +} + +void HELPER(gvec_vfce64s)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vfc64(v1, v2, v3, env, true, float64_eq_quiet, GETPC()); +} + +void HELPER(gvec_vfce64_cc)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + env->cc_op = vfc64(v1, v2, v3, env, false, float64_eq_quiet, GETPC()); +} + +void HELPER(gvec_vfce64s_cc)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + env->cc_op = vfc64(v1, v2, v3, env, true, float64_eq_quiet, GETPC()); +} + +void HELPER(gvec_vfch64)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vfc64(v1, v2, v3, env, false, float64_lt_quiet, GETPC()); +} + +void HELPER(gvec_vfch64s)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vfc64(v1, v2, v3, env, true, float64_lt_quiet, GETPC()); +} + +void HELPER(gvec_vfch64_cc)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + env->cc_op = vfc64(v1, v2, v3, env, false, float64_lt_quiet, GETPC()); +} + +void HELPER(gvec_vfch64s_cc)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + env->cc_op = vfc64(v1, v2, v3, env, true, float64_lt_quiet, GETPC()); +} + +void HELPER(gvec_vfche64)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vfc64(v1, v2, v3, env, false, float64_le_quiet, GETPC()); +} + +void HELPER(gvec_vfche64s)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vfc64(v1, v2, v3, env, true, float64_le_quiet, GETPC()); +} + +void HELPER(gvec_vfche64_cc)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + env->cc_op = vfc64(v1, v2, v3, env, false, float64_le_quiet, GETPC()); +} + +void HELPER(gvec_vfche64s_cc)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + env->cc_op = vfc64(v1, v2, v3, env, true, float64_le_quiet, GETPC()); +} + +static uint64_t vcdg64(uint64_t a, float_status *s) +{ + return int64_to_float64(a, s); +} + +void HELPER(gvec_vcdg64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, false, XxC, erm, vcdg64, GETPC()); +} + +void HELPER(gvec_vcdg64s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, true, XxC, erm, vcdg64, GETPC()); +} + +static uint64_t vcdlg64(uint64_t a, float_status *s) +{ + return uint64_to_float64(a, s); +} + +void HELPER(gvec_vcdlg64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, false, XxC, erm, vcdlg64, GETPC()); +} + +void HELPER(gvec_vcdlg64s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, true, XxC, erm, vcdlg64, GETPC()); +} + +static uint64_t vcgd64(uint64_t a, float_status *s) +{ + return float64_to_int64(a, s); +} + +void HELPER(gvec_vcgd64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, false, XxC, erm, vcgd64, GETPC()); +} + +void HELPER(gvec_vcgd64s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, true, XxC, erm, vcgd64, GETPC()); +} + +static uint64_t vclgd64(uint64_t a, float_status *s) +{ + return float64_to_uint64(a, s); +} + +void HELPER(gvec_vclgd64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, false, XxC, erm, vclgd64, GETPC()); +} + +void HELPER(gvec_vclgd64s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, true, XxC, erm, vclgd64, GETPC()); +} + +static uint64_t vfd64(uint64_t a, uint64_t b, float_status *s) +{ + return float64_div(a, b, s); +} + +void HELPER(gvec_vfd64)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vop64_3(v1, v2, v3, env, false, vfd64, GETPC()); +} + +void HELPER(gvec_vfd64s)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vop64_3(v1, v2, v3, env, true, vfd64, GETPC()); +} + +static uint64_t vfi64(uint64_t a, float_status *s) +{ + return float64_round_to_int(a, s); +} + +void HELPER(gvec_vfi64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, false, XxC, erm, vfi64, GETPC()); +} + +void HELPER(gvec_vfi64s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vop64_2(v1, v2, env, true, XxC, erm, vfi64, GETPC()); +} + +static void vfll32(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, + bool s, uintptr_t retaddr) +{ + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i; + + for (i = 0; i < 2; i++) { + /* load from even element */ + const float32 a = s390_vec_read_element32(v2, i * 2); + const uint64_t ret = float32_to_float64(a, &env->fpu_status); + + s390_vec_write_element64(&tmp, i, ret); + /* indicate the source element */ + vxc = check_ieee_exc(env, i * 2, false, &vec_exc); + if (s || vxc) { + break; + } + } + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; +} + +void HELPER(gvec_vfll32)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + vfll32(v1, v2, env, false, GETPC()); +} + +void HELPER(gvec_vfll32s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + vfll32(v1, v2, env, true, GETPC()); +} + +static void vflr64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, + bool s, bool XxC, uint8_t erm, uintptr_t retaddr) +{ + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i, old_mode; + + old_mode = s390_swap_bfp_rounding_mode(env, erm); + for (i = 0; i < 2; i++) { + float64 a = s390_vec_read_element64(v2, i); + uint32_t ret = float64_to_float32(a, &env->fpu_status); + + /* place at even element */ + s390_vec_write_element32(&tmp, i * 2, ret); + /* indicate the source element */ + vxc = check_ieee_exc(env, i, XxC, &vec_exc); + if (s || vxc) { + break; + } + } + s390_restore_bfp_rounding_mode(env, old_mode); + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; +} + +void HELPER(gvec_vflr64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vflr64(v1, v2, env, false, XxC, erm, GETPC()); +} + +void HELPER(gvec_vflr64s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + const uint8_t erm = extract32(simd_data(desc), 4, 4); + const bool XxC = extract32(simd_data(desc), 2, 1); + + vflr64(v1, v2, env, true, XxC, erm, GETPC()); +} + +static uint64_t vfm64(uint64_t a, uint64_t b, float_status *s) +{ + return float64_mul(a, b, s); +} + +void HELPER(gvec_vfm64)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vop64_3(v1, v2, v3, env, false, vfm64, GETPC()); +} + +void HELPER(gvec_vfm64s)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vop64_3(v1, v2, v3, env, true, vfm64, GETPC()); +} + +static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, + const S390Vector *v4, CPUS390XState *env, bool s, int flags, + uintptr_t retaddr) +{ + uint8_t vxc, vec_exc = 0; + S390Vector tmp = {}; + int i; + + for (i = 0; i < 2; i++) { + const uint64_t a = s390_vec_read_element64(v2, i); + const uint64_t b = s390_vec_read_element64(v3, i); + const uint64_t c = s390_vec_read_element64(v4, i); + uint64_t ret = float64_muladd(a, b, c, flags, &env->fpu_status); + + s390_vec_write_element64(&tmp, i, ret); + vxc = check_ieee_exc(env, i, false, &vec_exc); + if (s || vxc) { + break; + } + } + handle_ieee_exc(env, vxc, vec_exc, retaddr); + *v1 = tmp; +} + +void HELPER(gvec_vfma64)(void *v1, const void *v2, const void *v3, + const void *v4, CPUS390XState *env, uint32_t desc) +{ + vfma64(v1, v2, v3, v4, env, false, 0, GETPC()); +} + +void HELPER(gvec_vfma64s)(void *v1, const void *v2, const void *v3, + const void *v4, CPUS390XState *env, uint32_t desc) +{ + vfma64(v1, v2, v3, v4, env, true, 0, GETPC()); +} + +void HELPER(gvec_vfms64)(void *v1, const void *v2, const void *v3, + const void *v4, CPUS390XState *env, uint32_t desc) +{ + vfma64(v1, v2, v3, v4, env, false, float_muladd_negate_c, GETPC()); +} + +void HELPER(gvec_vfms64s)(void *v1, const void *v2, const void *v3, + const void *v4, CPUS390XState *env, uint32_t desc) +{ + vfma64(v1, v2, v3, v4, env, true, float_muladd_negate_c, GETPC()); +} + +static uint64_t vfsq64(uint64_t a, float_status *s) +{ + return float64_sqrt(a, s); +} + +void HELPER(gvec_vfsq64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + vop64_2(v1, v2, env, false, false, 0, vfsq64, GETPC()); +} + +void HELPER(gvec_vfsq64s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + vop64_2(v1, v2, env, true, false, 0, vfsq64, GETPC()); +} + +static uint64_t vfs64(uint64_t a, uint64_t b, float_status *s) +{ + return float64_sub(a, b, s); +} + +void HELPER(gvec_vfs64)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vop64_3(v1, v2, v3, env, false, vfs64, GETPC()); +} + +void HELPER(gvec_vfs64s)(void *v1, const void *v2, const void *v3, + CPUS390XState *env, uint32_t desc) +{ + vop64_3(v1, v2, v3, env, true, vfs64, GETPC()); +} + +static int vftci64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, + bool s, uint16_t i3) +{ + int i, match = 0; + + for (i = 0; i < 2; i++) { + float64 a = s390_vec_read_element64(v2, i); + + if (float64_dcmask(env, a) & i3) { + match++; + s390_vec_write_element64(v1, i, -1ull); + } else { + s390_vec_write_element64(v1, i, 0); + } + if (s) { + break; + } + } + + if (match) { + return s || match == 2 ? 0 : 1; + } + return 3; +} + +void HELPER(gvec_vftci64)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + env->cc_op = vftci64(v1, v2, env, false, simd_data(desc)); +} + +void HELPER(gvec_vftci64s)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + env->cc_op = vftci64(v1, v2, env, true, simd_data(desc)); +} diff --git a/qemu/target/s390x/vec_helper.c b/qemu/target/s390x/vec_helper.c new file mode 100644 index 00000000..986e7cc8 --- /dev/null +++ b/qemu/target/s390x/vec_helper.c @@ -0,0 +1,192 @@ +/* + * QEMU TCG support -- s390x vector support instructions + * + * Copyright (C) 2019 Red Hat Inc + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "vec.h" +#include "tcg/tcg.h" +#include "tcg/tcg-gvec-desc.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" + +void HELPER(vll)(CPUS390XState *env, void *v1, uint64_t addr, uint64_t bytes) +{ + if (likely(bytes >= 16)) { + uint64_t t0, t1; + + t0 = cpu_ldq_data_ra(env, addr, GETPC()); + addr = wrap_address(env, addr + 8); + t1 = cpu_ldq_data_ra(env, addr, GETPC()); + s390_vec_write_element64(v1, 0, t0); + s390_vec_write_element64(v1, 1, t1); + } else { + S390Vector tmp = {}; + int i; + + for (i = 0; i < bytes; i++) { + uint8_t byte = cpu_ldub_data_ra(env, addr, GETPC()); + + s390_vec_write_element8(&tmp, i, byte); + addr = wrap_address(env, addr + 1); + } + *(S390Vector *)v1 = tmp; + } +} + +#define DEF_VPK_HFN(BITS, TBITS) \ +typedef uint##TBITS##_t (*vpk##BITS##_fn)(uint##BITS##_t, int *); \ +static int vpk##BITS##_hfn(S390Vector *v1, const S390Vector *v2, \ + const S390Vector *v3, vpk##BITS##_fn fn) \ +{ \ + int i, saturated = 0; \ + S390Vector tmp; \ + \ + for (i = 0; i < (128 / TBITS); i++) { \ + uint##BITS##_t src; \ + \ + if (i < (128 / BITS)) { \ + src = s390_vec_read_element##BITS(v2, i); \ + } else { \ + src = s390_vec_read_element##BITS(v3, i - (128 / BITS)); \ + } \ + s390_vec_write_element##TBITS(&tmp, i, fn(src, &saturated)); \ + } \ + *v1 = tmp; \ + return saturated; \ +} +DEF_VPK_HFN(64, 32) +DEF_VPK_HFN(32, 16) +DEF_VPK_HFN(16, 8) + +#define DEF_VPK(BITS, TBITS) \ +static uint##TBITS##_t vpk##BITS##e(uint##BITS##_t src, int *saturated) \ +{ \ + return src; \ +} \ +void HELPER(gvec_vpk##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + vpk##BITS##_hfn(v1, v2, v3, vpk##BITS##e); \ +} +DEF_VPK(64, 32) +DEF_VPK(32, 16) +DEF_VPK(16, 8) + +#define DEF_VPKS(BITS, TBITS) \ +static uint##TBITS##_t vpks##BITS##e(uint##BITS##_t src, int *saturated) \ +{ \ + if ((int##BITS##_t)src > INT##TBITS##_MAX) { \ + (*saturated)++; \ + return INT##TBITS##_MAX; \ + } else if ((int##BITS##_t)src < INT##TBITS##_MIN) { \ + (*saturated)++; \ + return INT##TBITS##_MIN; \ + } \ + return src; \ +} \ +void HELPER(gvec_vpks##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ +} \ +void HELPER(gvec_vpks_cc##BITS)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + int saturated = vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ + \ + if (saturated == (128 / TBITS)) { \ + env->cc_op = 3; \ + } else if (saturated) { \ + env->cc_op = 1; \ + } else { \ + env->cc_op = 0; \ + } \ +} +DEF_VPKS(64, 32) +DEF_VPKS(32, 16) +DEF_VPKS(16, 8) + +#define DEF_VPKLS(BITS, TBITS) \ +static uint##TBITS##_t vpkls##BITS##e(uint##BITS##_t src, int *saturated) \ +{ \ + if (src > UINT##TBITS##_MAX) { \ + (*saturated)++; \ + return UINT##TBITS##_MAX; \ + } \ + return src; \ +} \ +void HELPER(gvec_vpkls##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ +} \ +void HELPER(gvec_vpkls_cc##BITS)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + int saturated = vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ + \ + if (saturated == (128 / TBITS)) { \ + env->cc_op = 3; \ + } else if (saturated) { \ + env->cc_op = 1; \ + } else { \ + env->cc_op = 0; \ + } \ +} +DEF_VPKLS(64, 32) +DEF_VPKLS(32, 16) +DEF_VPKLS(16, 8) + +void HELPER(gvec_vperm)(void *v1, const void *v2, const void *v3, + const void *v4, uint32_t desc) +{ + S390Vector tmp; + int i; + + for (i = 0; i < 16; i++) { + const uint8_t selector = s390_vec_read_element8(v4, i) & 0x1f; + uint8_t byte; + + if (selector < 16) { + byte = s390_vec_read_element8(v2, selector); + } else { + byte = s390_vec_read_element8(v3, selector - 16); + } + s390_vec_write_element8(&tmp, i, byte); + } + *(S390Vector *)v1 = tmp; +} + +void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr, + uint64_t bytes) +{ + /* Probe write access before actually modifying memory */ + probe_write_access(env, addr, bytes, GETPC()); + + if (likely(bytes >= 16)) { + cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 0), GETPC()); + addr = wrap_address(env, addr + 8); + cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 1), GETPC()); + } else { + S390Vector tmp = {}; + int i; + + for (i = 0; i < bytes; i++) { + uint8_t byte = s390_vec_read_element8(v1, i); + + cpu_stb_data_ra(env, addr, byte, GETPC()); + addr = wrap_address(env, addr + 1); + } + *(S390Vector *)v1 = tmp; + } +} diff --git a/qemu/target/s390x/vec_int_helper.c b/qemu/target/s390x/vec_int_helper.c new file mode 100644 index 00000000..0d6bc13d --- /dev/null +++ b/qemu/target/s390x/vec_int_helper.c @@ -0,0 +1,618 @@ +/* + * QEMU TCG support -- s390x vector integer instruction support + * + * Copyright (C) 2019 Red Hat Inc + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "vec.h" +#include "exec/helper-proto.h" +#include "tcg/tcg-gvec-desc.h" + +static bool s390_vec_is_zero(const S390Vector *v) +{ + return !v->doubleword[0] && !v->doubleword[1]; +} + +static void s390_vec_xor(S390Vector *res, const S390Vector *a, + const S390Vector *b) +{ + res->doubleword[0] = a->doubleword[0] ^ b->doubleword[0]; + res->doubleword[1] = a->doubleword[1] ^ b->doubleword[1]; +} + +static void s390_vec_and(S390Vector *res, const S390Vector *a, + const S390Vector *b) +{ + res->doubleword[0] = a->doubleword[0] & b->doubleword[0]; + res->doubleword[1] = a->doubleword[1] & b->doubleword[1]; +} + +static bool s390_vec_equal(const S390Vector *a, const S390Vector *b) +{ + return a->doubleword[0] == b->doubleword[0] && + a->doubleword[1] == b->doubleword[1]; +} + +static void s390_vec_shl(S390Vector *d, const S390Vector *a, uint64_t count) +{ + uint64_t tmp; + + g_assert(count < 128); + if (count == 0) { + d->doubleword[0] = a->doubleword[0]; + d->doubleword[1] = a->doubleword[1]; + } else if (count == 64) { + d->doubleword[0] = a->doubleword[1]; + d->doubleword[1] = 0; + } else if (count < 64) { + tmp = extract64(a->doubleword[1], 64 - count, count); + d->doubleword[1] = a->doubleword[1] << count; + d->doubleword[0] = (a->doubleword[0] << count) | tmp; + } else { + d->doubleword[0] = a->doubleword[1] << (count - 64); + d->doubleword[1] = 0; + } +} + +static void s390_vec_sar(S390Vector *d, const S390Vector *a, uint64_t count) +{ + uint64_t tmp; + + if (count == 0) { + d->doubleword[0] = a->doubleword[0]; + d->doubleword[1] = a->doubleword[1]; + } else if (count == 64) { + tmp = (int64_t)a->doubleword[0] >> 63; + d->doubleword[1] = a->doubleword[0]; + d->doubleword[0] = tmp; + } else if (count < 64) { + tmp = a->doubleword[1] >> count; + d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]); + d->doubleword[0] = (int64_t)a->doubleword[0] >> count; + } else { + tmp = (int64_t)a->doubleword[0] >> 63; + d->doubleword[1] = (int64_t)a->doubleword[0] >> (count - 64); + d->doubleword[0] = tmp; + } +} + +static void s390_vec_shr(S390Vector *d, const S390Vector *a, uint64_t count) +{ + uint64_t tmp; + + g_assert(count < 128); + if (count == 0) { + d->doubleword[0] = a->doubleword[0]; + d->doubleword[1] = a->doubleword[1]; + } else if (count == 64) { + d->doubleword[1] = a->doubleword[0]; + d->doubleword[0] = 0; + } else if (count < 64) { + tmp = a->doubleword[1] >> count; + d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]); + d->doubleword[0] = a->doubleword[0] >> count; + } else { + d->doubleword[1] = a->doubleword[0] >> (count - 64); + d->doubleword[0] = 0; + } +} +#define DEF_VAVG(BITS) \ +void HELPER(gvec_vavg##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ + const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ + \ + s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \ + } \ +} +DEF_VAVG(8) +DEF_VAVG(16) + +#define DEF_VAVGL(BITS) \ +void HELPER(gvec_vavgl##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ + \ + s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \ + } \ +} +DEF_VAVGL(8) +DEF_VAVGL(16) + +#define DEF_VCLZ(BITS) \ +void HELPER(gvec_vclz##BITS)(void *v1, const void *v2, uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + \ + s390_vec_write_element##BITS(v1, i, clz32(a) - 32 + BITS); \ + } \ +} +DEF_VCLZ(8) +DEF_VCLZ(16) + +#define DEF_VCTZ(BITS) \ +void HELPER(gvec_vctz##BITS)(void *v1, const void *v2, uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + \ + s390_vec_write_element##BITS(v1, i, a ? ctz32(a) : BITS); \ + } \ +} +DEF_VCTZ(8) +DEF_VCTZ(16) + +/* like binary multiplication, but XOR instead of addition */ +#define DEF_GALOIS_MULTIPLY(BITS, TBITS) \ +static uint##TBITS##_t galois_multiply##BITS(uint##TBITS##_t a, \ + uint##TBITS##_t b) \ +{ \ + uint##TBITS##_t res = 0; \ + \ + while (b) { \ + if (b & 0x1) { \ + res = res ^ a; \ + } \ + a = a << 1; \ + b = b >> 1; \ + } \ + return res; \ +} +DEF_GALOIS_MULTIPLY(8, 16) +DEF_GALOIS_MULTIPLY(16, 32) +DEF_GALOIS_MULTIPLY(32, 64) + +static S390Vector galois_multiply64(uint64_t a, uint64_t b) +{ + S390Vector res = {}; + S390Vector va = { + .doubleword[1] = a, + }; + S390Vector vb = { + .doubleword[1] = b, + }; + + while (!s390_vec_is_zero(&vb)) { + if (vb.doubleword[1] & 0x1) { + s390_vec_xor(&res, &res, &va); + } + s390_vec_shl(&va, &va, 1); + s390_vec_shr(&vb, &vb, 1); + } + return res; +} + +#define DEF_VGFM(BITS, TBITS) \ +void HELPER(gvec_vgfm##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / TBITS); i++) { \ + uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \ + uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \ + uint##TBITS##_t d = galois_multiply##BITS(a, b); \ + \ + a = s390_vec_read_element##BITS(v2, i * 2 + 1); \ + b = s390_vec_read_element##BITS(v3, i * 2 + 1); \ + d = d ^ galois_multiply32(a, b); \ + s390_vec_write_element##TBITS(v1, i, d); \ + } \ +} +DEF_VGFM(8, 16) +DEF_VGFM(16, 32) +DEF_VGFM(32, 64) + +void HELPER(gvec_vgfm64)(void *v1, const void *v2, const void *v3, + uint32_t desc) +{ + S390Vector tmp1, tmp2; + uint64_t a, b; + + a = s390_vec_read_element64(v2, 0); + b = s390_vec_read_element64(v3, 0); + tmp1 = galois_multiply64(a, b); + a = s390_vec_read_element64(v2, 1); + b = s390_vec_read_element64(v3, 1); + tmp2 = galois_multiply64(a, b); + s390_vec_xor(v1, &tmp1, &tmp2); +} + +#define DEF_VGFMA(BITS, TBITS) \ +void HELPER(gvec_vgfma##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / TBITS); i++) { \ + uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \ + uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \ + uint##TBITS##_t d = galois_multiply##BITS(a, b); \ + \ + a = s390_vec_read_element##BITS(v2, i * 2 + 1); \ + b = s390_vec_read_element##BITS(v3, i * 2 + 1); \ + d = d ^ galois_multiply32(a, b); \ + d = d ^ s390_vec_read_element##TBITS(v4, i); \ + s390_vec_write_element##TBITS(v1, i, d); \ + } \ +} +DEF_VGFMA(8, 16) +DEF_VGFMA(16, 32) +DEF_VGFMA(32, 64) + +void HELPER(gvec_vgfma64)(void *v1, const void *v2, const void *v3, + const void *v4, uint32_t desc) +{ + S390Vector tmp1, tmp2; + uint64_t a, b; + + a = s390_vec_read_element64(v2, 0); + b = s390_vec_read_element64(v3, 0); + tmp1 = galois_multiply64(a, b); + a = s390_vec_read_element64(v2, 1); + b = s390_vec_read_element64(v3, 1); + tmp2 = galois_multiply64(a, b); + s390_vec_xor(&tmp1, &tmp1, &tmp2); + s390_vec_xor(v1, &tmp1, v4); +} + +#define DEF_VMAL(BITS) \ +void HELPER(gvec_vmal##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ + const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \ + \ + s390_vec_write_element##BITS(v1, i, a * b + c); \ + } \ +} +DEF_VMAL(8) +DEF_VMAL(16) + +#define DEF_VMAH(BITS) \ +void HELPER(gvec_vmah##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ + const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ + const int32_t c = (int##BITS##_t)s390_vec_read_element##BITS(v4, i); \ + \ + s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \ + } \ +} +DEF_VMAH(8) +DEF_VMAH(16) + +#define DEF_VMALH(BITS) \ +void HELPER(gvec_vmalh##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ + const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \ + \ + s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \ + } \ +} +DEF_VMALH(8) +DEF_VMALH(16) + +#define DEF_VMAE(BITS, TBITS) \ +void HELPER(gvec_vmae##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + int i, j; \ + \ + for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ + int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ + int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ + int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ + \ + s390_vec_write_element##TBITS(v1, i, a * b + c); \ + } \ +} +DEF_VMAE(8, 16) +DEF_VMAE(16, 32) +DEF_VMAE(32, 64) + +#define DEF_VMALE(BITS, TBITS) \ +void HELPER(gvec_vmale##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + int i, j; \ + \ + for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ + uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ + uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ + uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ + \ + s390_vec_write_element##TBITS(v1, i, a * b + c); \ + } \ +} +DEF_VMALE(8, 16) +DEF_VMALE(16, 32) +DEF_VMALE(32, 64) + +#define DEF_VMAO(BITS, TBITS) \ +void HELPER(gvec_vmao##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + int i, j; \ + \ + for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ + int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ + int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ + int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ + \ + s390_vec_write_element##TBITS(v1, i, a * b + c); \ + } \ +} +DEF_VMAO(8, 16) +DEF_VMAO(16, 32) +DEF_VMAO(32, 64) + +#define DEF_VMALO(BITS, TBITS) \ +void HELPER(gvec_vmalo##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + int i, j; \ + \ + for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ + uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ + uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ + uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ + \ + s390_vec_write_element##TBITS(v1, i, a * b + c); \ + } \ +} +DEF_VMALO(8, 16) +DEF_VMALO(16, 32) +DEF_VMALO(32, 64) + +#define DEF_VMH(BITS) \ +void HELPER(gvec_vmh##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ + const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ + \ + s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \ + } \ +} +DEF_VMH(8) +DEF_VMH(16) + +#define DEF_VMLH(BITS) \ +void HELPER(gvec_vmlh##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ + \ + s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \ + } \ +} +DEF_VMLH(8) +DEF_VMLH(16) + +#define DEF_VME(BITS, TBITS) \ +void HELPER(gvec_vme##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i, j; \ + \ + for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ + int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ + int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ + \ + s390_vec_write_element##TBITS(v1, i, a * b); \ + } \ +} +DEF_VME(8, 16) +DEF_VME(16, 32) +DEF_VME(32, 64) + +#define DEF_VMLE(BITS, TBITS) \ +void HELPER(gvec_vmle##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i, j; \ + \ + for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ + const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ + const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ + \ + s390_vec_write_element##TBITS(v1, i, a * b); \ + } \ +} +DEF_VMLE(8, 16) +DEF_VMLE(16, 32) +DEF_VMLE(32, 64) + +#define DEF_VMO(BITS, TBITS) \ +void HELPER(gvec_vmo##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i, j; \ + \ + for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ + int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ + int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ + \ + s390_vec_write_element##TBITS(v1, i, a * b); \ + } \ +} +DEF_VMO(8, 16) +DEF_VMO(16, 32) +DEF_VMO(32, 64) + +#define DEF_VMLO(BITS, TBITS) \ +void HELPER(gvec_vmlo##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i, j; \ + \ + for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ + const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ + const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ + \ + s390_vec_write_element##TBITS(v1, i, a * b); \ + } \ +} +DEF_VMLO(8, 16) +DEF_VMLO(16, 32) +DEF_VMLO(32, 64) + +#define DEF_VPOPCT(BITS) \ +void HELPER(gvec_vpopct##BITS)(void *v1, const void *v2, uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + \ + s390_vec_write_element##BITS(v1, i, ctpop32(a)); \ + } \ +} +DEF_VPOPCT(8) +DEF_VPOPCT(16) + +#define DEF_VERLLV(BITS) \ +void HELPER(gvec_verllv##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ + \ + s390_vec_write_element##BITS(v1, i, rol##BITS(a, b)); \ + } \ +} +DEF_VERLLV(8) +DEF_VERLLV(16) + +#define DEF_VERLL(BITS) \ +void HELPER(gvec_verll##BITS)(void *v1, const void *v2, uint64_t count, \ + uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + \ + s390_vec_write_element##BITS(v1, i, rol##BITS(a, count)); \ + } \ +} +DEF_VERLL(8) +DEF_VERLL(16) + +#define DEF_VERIM(BITS) \ +void HELPER(gvec_verim##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + const uint8_t count = simd_data(desc); \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v1, i); \ + const uint##BITS##_t b = s390_vec_read_element##BITS(v2, i); \ + const uint##BITS##_t mask = s390_vec_read_element##BITS(v3, i); \ + const uint##BITS##_t d = (a & ~mask) | (rol##BITS(b, count) & mask); \ + \ + s390_vec_write_element##BITS(v1, i, d); \ + } \ +} +DEF_VERIM(8) +DEF_VERIM(16) + +void HELPER(gvec_vsl)(void *v1, const void *v2, uint64_t count, + uint32_t desc) +{ + s390_vec_shl(v1, v2, count); +} + +void HELPER(gvec_vsra)(void *v1, const void *v2, uint64_t count, + uint32_t desc) +{ + s390_vec_sar(v1, v2, count); +} + +void HELPER(gvec_vsrl)(void *v1, const void *v2, uint64_t count, + uint32_t desc) +{ + s390_vec_shr(v1, v2, count); +} + +#define DEF_VSCBI(BITS) \ +void HELPER(gvec_vscbi##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + int i; \ + \ + for (i = 0; i < (128 / BITS); i++) { \ + const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ + const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ + \ + s390_vec_write_element##BITS(v1, i, a >= b); \ + } \ +} +DEF_VSCBI(8) +DEF_VSCBI(16) + +void HELPER(gvec_vtm)(void *v1, const void *v2, CPUS390XState *env, + uint32_t desc) +{ + S390Vector tmp; + + s390_vec_and(&tmp, v1, v2); + if (s390_vec_is_zero(&tmp)) { + /* Selected bits all zeros; or all mask bits zero */ + env->cc_op = 0; + } else if (s390_vec_equal(&tmp, v2)) { + /* Selected bits all ones */ + env->cc_op = 3; + } else { + /* Selected bits a mix of zeros and ones */ + env->cc_op = 1; + } +} diff --git a/qemu/target/s390x/vec_string_helper.c b/qemu/target/s390x/vec_string_helper.c new file mode 100644 index 00000000..c516c0ce --- /dev/null +++ b/qemu/target/s390x/vec_string_helper.c @@ -0,0 +1,473 @@ +/* + * QEMU TCG support -- s390x vector string instruction support + * + * Copyright (C) 2019 Red Hat Inc + * + * Authors: + * David Hildenbrand + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "internal.h" +#include "vec.h" +#include "tcg/tcg.h" +#include "tcg/tcg-gvec-desc.h" +#include "exec/helper-proto.h" + +/* + * Returns a bit set in the MSB of each element that is zero, + * as defined by the mask. + */ +static inline uint64_t zero_search(uint64_t a, uint64_t mask) +{ + return ~(((a & mask) + mask) | a | mask); +} + +/* + * Returns a bit set in the MSB of each element that is not zero, + * as defined by the mask. + */ +static inline uint64_t nonzero_search(uint64_t a, uint64_t mask) +{ + return (((a & mask) + mask) | a) & ~mask; +} + +/* + * Returns the byte offset for the first match, or 16 for no match. + */ +static inline int match_index(uint64_t c0, uint64_t c1) +{ + return (c0 ? clz64(c0) : clz64(c1) + 64) >> 3; +} + +/* + * Returns the number of bits composing one element. + */ +static uint8_t get_element_bits(uint8_t es) +{ + return (1 << es) * BITS_PER_BYTE; +} + +/* + * Returns the bitmask for a single element. + */ +static uint64_t get_single_element_mask(uint8_t es) +{ + return -1ull >> (64 - get_element_bits(es)); +} + +/* + * Returns the bitmask for a single element (excluding the MSB). + */ +static uint64_t get_single_element_lsbs_mask(uint8_t es) +{ + return -1ull >> (65 - get_element_bits(es)); +} + +/* + * Returns the bitmasks for multiple elements (excluding the MSBs). + */ +static uint64_t get_element_lsbs_mask(uint8_t es) +{ + return dup_const(es, get_single_element_lsbs_mask(es)); +} + +static int vfae(void *v1, const void *v2, const void *v3, bool in, + bool rt, bool zs, uint8_t es) +{ + const uint64_t mask = get_element_lsbs_mask(es); + const int bits = get_element_bits(es); + uint64_t a0, a1, b0, b1, e0, e1, t0, t1, z0, z1; + uint64_t first_zero = 16; + uint64_t first_equal; + int i; + + a0 = s390_vec_read_element64(v2, 0); + a1 = s390_vec_read_element64(v2, 1); + b0 = s390_vec_read_element64(v3, 0); + b1 = s390_vec_read_element64(v3, 1); + e0 = 0; + e1 = 0; + /* compare against equality with every other element */ + for (i = 0; i < 64; i += bits) { + t0 = rol64(b0, i); + t1 = rol64(b1, i); + e0 |= zero_search(a0 ^ t0, mask); + e0 |= zero_search(a0 ^ t1, mask); + e1 |= zero_search(a1 ^ t0, mask); + e1 |= zero_search(a1 ^ t1, mask); + } + /* invert the result if requested - invert only the MSBs */ + if (in) { + e0 = ~e0 & ~mask; + e1 = ~e1 & ~mask; + } + first_equal = match_index(e0, e1); + + if (zs) { + z0 = zero_search(a0, mask); + z1 = zero_search(a1, mask); + first_zero = match_index(z0, z1); + } + + if (rt) { + e0 = (e0 >> (bits - 1)) * get_single_element_mask(es); + e1 = (e1 >> (bits - 1)) * get_single_element_mask(es); + s390_vec_write_element64(v1, 0, e0); + s390_vec_write_element64(v1, 1, e1); + } else { + s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero)); + s390_vec_write_element64(v1, 1, 0); + } + + if (first_zero == 16 && first_equal == 16) { + return 3; /* no match */ + } else if (first_zero == 16) { + return 1; /* matching elements, no match for zero */ + } else if (first_equal < first_zero) { + return 2; /* matching elements before match for zero */ + } + return 0; /* match for zero */ +} + +#define DEF_VFAE_HELPER(BITS) \ +void HELPER(gvec_vfae##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + const bool in = extract32(simd_data(desc), 3, 1); \ + const bool rt = extract32(simd_data(desc), 2, 1); \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \ +} +DEF_VFAE_HELPER(8) +DEF_VFAE_HELPER(16) +DEF_VFAE_HELPER(32) + +#define DEF_VFAE_CC_HELPER(BITS) \ +void HELPER(gvec_vfae_cc##BITS)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + const bool in = extract32(simd_data(desc), 3, 1); \ + const bool rt = extract32(simd_data(desc), 2, 1); \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + env->cc_op = vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \ +} +DEF_VFAE_CC_HELPER(8) +DEF_VFAE_CC_HELPER(16) +DEF_VFAE_CC_HELPER(32) + +static int vfee(void *v1, const void *v2, const void *v3, bool zs, uint8_t es) +{ + const uint64_t mask = get_element_lsbs_mask(es); + uint64_t a0, a1, b0, b1, e0, e1, z0, z1; + uint64_t first_zero = 16; + uint64_t first_equal; + + a0 = s390_vec_read_element64(v2, 0); + a1 = s390_vec_read_element64(v2, 1); + b0 = s390_vec_read_element64(v3, 0); + b1 = s390_vec_read_element64(v3, 1); + e0 = zero_search(a0 ^ b0, mask); + e1 = zero_search(a1 ^ b1, mask); + first_equal = match_index(e0, e1); + + if (zs) { + z0 = zero_search(a0, mask); + z1 = zero_search(a1, mask); + first_zero = match_index(z0, z1); + } + + s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero)); + s390_vec_write_element64(v1, 1, 0); + if (first_zero == 16 && first_equal == 16) { + return 3; /* no match */ + } else if (first_zero == 16) { + return 1; /* matching elements, no match for zero */ + } else if (first_equal < first_zero) { + return 2; /* matching elements before match for zero */ + } + return 0; /* match for zero */ +} + +#define DEF_VFEE_HELPER(BITS) \ +void HELPER(gvec_vfee##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + vfee(v1, v2, v3, zs, MO_##BITS); \ +} +DEF_VFEE_HELPER(8) +DEF_VFEE_HELPER(16) +DEF_VFEE_HELPER(32) + +#define DEF_VFEE_CC_HELPER(BITS) \ +void HELPER(gvec_vfee_cc##BITS)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + env->cc_op = vfee(v1, v2, v3, zs, MO_##BITS); \ +} +DEF_VFEE_CC_HELPER(8) +DEF_VFEE_CC_HELPER(16) +DEF_VFEE_CC_HELPER(32) + +static int vfene(void *v1, const void *v2, const void *v3, bool zs, uint8_t es) +{ + const uint64_t mask = get_element_lsbs_mask(es); + uint64_t a0, a1, b0, b1, e0, e1, z0, z1; + uint64_t first_zero = 16; + uint64_t first_inequal; + bool smaller = false; + + a0 = s390_vec_read_element64(v2, 0); + a1 = s390_vec_read_element64(v2, 1); + b0 = s390_vec_read_element64(v3, 0); + b1 = s390_vec_read_element64(v3, 1); + e0 = nonzero_search(a0 ^ b0, mask); + e1 = nonzero_search(a1 ^ b1, mask); + first_inequal = match_index(e0, e1); + + /* identify the smaller element */ + if (first_inequal < 16) { + uint8_t enr = first_inequal / (1 << es); + uint32_t a = s390_vec_read_element(v2, enr, es); + uint32_t b = s390_vec_read_element(v3, enr, es); + + smaller = a < b; + } + + if (zs) { + z0 = zero_search(a0, mask); + z1 = zero_search(a1, mask); + first_zero = match_index(z0, z1); + } + + s390_vec_write_element64(v1, 0, MIN(first_inequal, first_zero)); + s390_vec_write_element64(v1, 1, 0); + if (first_zero == 16 && first_inequal == 16) { + return 3; + } else if (first_zero < first_inequal) { + return 0; + } + return smaller ? 1 : 2; +} + +#define DEF_VFENE_HELPER(BITS) \ +void HELPER(gvec_vfene##BITS)(void *v1, const void *v2, const void *v3, \ + uint32_t desc) \ +{ \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + vfene(v1, v2, v3, zs, MO_##BITS); \ +} +DEF_VFENE_HELPER(8) +DEF_VFENE_HELPER(16) +DEF_VFENE_HELPER(32) + +#define DEF_VFENE_CC_HELPER(BITS) \ +void HELPER(gvec_vfene_cc##BITS)(void *v1, const void *v2, const void *v3, \ + CPUS390XState *env, uint32_t desc) \ +{ \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + env->cc_op = vfene(v1, v2, v3, zs, MO_##BITS); \ +} +DEF_VFENE_CC_HELPER(8) +DEF_VFENE_CC_HELPER(16) +DEF_VFENE_CC_HELPER(32) + +static int vistr(void *v1, const void *v2, uint8_t es) +{ + const uint64_t mask = get_element_lsbs_mask(es); + uint64_t a0 = s390_vec_read_element64(v2, 0); + uint64_t a1 = s390_vec_read_element64(v2, 1); + uint64_t z; + int cc = 3; + + z = zero_search(a0, mask); + if (z) { + a0 &= ~(-1ull >> clz64(z)); + a1 = 0; + cc = 0; + } else { + z = zero_search(a1, mask); + if (z) { + a1 &= ~(-1ull >> clz64(z)); + cc = 0; + } + } + + s390_vec_write_element64(v1, 0, a0); + s390_vec_write_element64(v1, 1, a1); + return cc; +} + +#define DEF_VISTR_HELPER(BITS) \ +void HELPER(gvec_vistr##BITS)(void *v1, const void *v2, uint32_t desc) \ +{ \ + vistr(v1, v2, MO_##BITS); \ +} +DEF_VISTR_HELPER(8) +DEF_VISTR_HELPER(16) +DEF_VISTR_HELPER(32) + +#define DEF_VISTR_CC_HELPER(BITS) \ +void HELPER(gvec_vistr_cc##BITS)(void *v1, const void *v2, CPUS390XState *env, \ + uint32_t desc) \ +{ \ + env->cc_op = vistr(v1, v2, MO_##BITS); \ +} +DEF_VISTR_CC_HELPER(8) +DEF_VISTR_CC_HELPER(16) +DEF_VISTR_CC_HELPER(32) + +static bool element_compare(uint32_t data, uint32_t l, uint8_t c) +{ + const bool equal = extract32(c, 7, 1); + const bool lower = extract32(c, 6, 1); + const bool higher = extract32(c, 5, 1); + + if (data < l) { + return lower; + } else if (data > l) { + return higher; + } + return equal; +} + +static int vstrc(void *v1, const void *v2, const void *v3, const void *v4, + bool in, bool rt, bool zs, uint8_t es) +{ + const uint64_t mask = get_element_lsbs_mask(es); + uint64_t a0 = s390_vec_read_element64(v2, 0); + uint64_t a1 = s390_vec_read_element64(v2, 1); + int first_zero = 16, first_match = 16; + S390Vector rt_result = {}; + uint64_t z0, z1; + int i, j; + + if (zs) { + z0 = zero_search(a0, mask); + z1 = zero_search(a1, mask); + first_zero = match_index(z0, z1); + } + + for (i = 0; i < 16 / (1 << es); i++) { + const uint32_t data = s390_vec_read_element(v2, i, es); + const int cur_byte = i * (1 << es); + bool any_match = false; + + /* if we don't need a bit vector, we can stop early */ + if (cur_byte == first_zero && !rt) { + break; + } + + for (j = 0; j < 16 / (1 << es); j += 2) { + const uint32_t l1 = s390_vec_read_element(v3, j, es); + const uint32_t l2 = s390_vec_read_element(v3, j + 1, es); + /* we are only interested in the highest byte of each element */ + const uint8_t c1 = s390_vec_read_element8(v4, j * (1 << es)); + const uint8_t c2 = s390_vec_read_element8(v4, (j + 1) * (1 << es)); + + if (element_compare(data, l1, c1) && + element_compare(data, l2, c2)) { + any_match = true; + break; + } + } + /* invert the result if requested */ + any_match = in ^ any_match; + + if (any_match) { + /* indicate bit vector if requested */ + if (rt) { + const uint64_t val = -1ull; + + first_match = MIN(cur_byte, first_match); + s390_vec_write_element(&rt_result, i, es, val); + } else { + /* stop on the first match */ + first_match = cur_byte; + break; + } + } + } + + if (rt) { + *(S390Vector *)v1 = rt_result; + } else { + s390_vec_write_element64(v1, 0, MIN(first_match, first_zero)); + s390_vec_write_element64(v1, 1, 0); + } + + if (first_zero == 16 && first_match == 16) { + return 3; /* no match */ + } else if (first_zero == 16) { + return 1; /* matching elements, no match for zero */ + } else if (first_match < first_zero) { + return 2; /* matching elements before match for zero */ + } + return 0; /* match for zero */ +} + +#define DEF_VSTRC_HELPER(BITS) \ +void HELPER(gvec_vstrc##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + const bool in = extract32(simd_data(desc), 3, 1); \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \ +} +DEF_VSTRC_HELPER(8) +DEF_VSTRC_HELPER(16) +DEF_VSTRC_HELPER(32) + +#define DEF_VSTRC_RT_HELPER(BITS) \ +void HELPER(gvec_vstrc_rt##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, uint32_t desc) \ +{ \ + const bool in = extract32(simd_data(desc), 3, 1); \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \ +} +DEF_VSTRC_RT_HELPER(8) +DEF_VSTRC_RT_HELPER(16) +DEF_VSTRC_RT_HELPER(32) + +#define DEF_VSTRC_CC_HELPER(BITS) \ +void HELPER(gvec_vstrc_cc##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, CPUS390XState *env, \ + uint32_t desc) \ +{ \ + const bool in = extract32(simd_data(desc), 3, 1); \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + env->cc_op = vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \ +} +DEF_VSTRC_CC_HELPER(8) +DEF_VSTRC_CC_HELPER(16) +DEF_VSTRC_CC_HELPER(32) + +#define DEF_VSTRC_CC_RT_HELPER(BITS) \ +void HELPER(gvec_vstrc_cc_rt##BITS)(void *v1, const void *v2, const void *v3, \ + const void *v4, CPUS390XState *env, \ + uint32_t desc) \ +{ \ + const bool in = extract32(simd_data(desc), 3, 1); \ + const bool zs = extract32(simd_data(desc), 1, 1); \ + \ + env->cc_op = vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \ +} +DEF_VSTRC_CC_RT_HELPER(8) +DEF_VSTRC_CC_RT_HELPER(16) +DEF_VSTRC_CC_RT_HELPER(32) diff --git a/samples/sample_s390x.c b/samples/sample_s390x.c new file mode 100644 index 00000000..f1d813ab --- /dev/null +++ b/samples/sample_s390x.c @@ -0,0 +1,83 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2021 */ + +/* Sample code to demonstrate how to emulate S390X code */ + +#include +#include + + +// code to be emulated +#define S390X_CODE "\x18\x23" // lr %r2, %r3 + +// memory address where emulation starts +#define ADDRESS 0x10000 + +static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); +} + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); +} + +static void test_s390x(void) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + + uint64_t r2 = 2, r3 = 3; + + printf("Emulate S390X code\n"); + + // Initialize emulator in S390X mode + err = uc_open(UC_ARCH_S390X, UC_MODE_BIG_ENDIAN, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 1MB memory for this emulation + uc_mem_map(uc, ADDRESS, 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, S390X_CODE, sizeof(S390X_CODE) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_S390X_REG_R2, &r2); + uc_reg_write(uc, UC_S390X_REG_R3, &r3); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing all instruction + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(S390X_CODE) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_S390X_REG_R2, &r2); + uc_reg_read(uc, UC_S390X_REG_R3, &r3); + + printf(">>> R2 = 0x%"PRIx64 "\t\t>>> R3 = 0x%"PRIx64 "\n", r2, r3); + + uc_close(uc); +} + +int main(int argc, char **argv, char **envp) +{ + test_s390x(); + + return 0; +} diff --git a/tests/unit/test_s390x.c b/tests/unit/test_s390x.c new file mode 100644 index 00000000..7c31da6d --- /dev/null +++ b/tests/unit/test_s390x.c @@ -0,0 +1,8 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x1000; +const uint64_t code_len = 0x4000; + +TEST_LIST = { + { NULL, NULL } +}; From eca359989cce95ec02b8e65f396634e1e189e6a1 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Mon, 6 Dec 2021 04:55:35 +0800 Subject: [PATCH 03/38] update TODO --- TODO-s390 | 1 + 1 file changed, 1 insertion(+) diff --git a/TODO-s390 b/TODO-s390 index a208265e..b9a64711 100644 --- a/TODO-s390 +++ b/TODO-s390 @@ -9,4 +9,5 @@ Todo: - enable building all arch to fix conflicts - remove all static vars in qemu/target/s390x/translate.c - support more registers in qemu/target/s390x/unicorn.c +- storage-keys needed? - find & fix potential memory leaking with valgrind From 7918a6e46203c36626f3c2c2c7dcab4313498bda Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Tue, 7 Dec 2021 04:32:05 +0800 Subject: [PATCH 04/38] TODO --- TODO-s390 | 1 + 1 file changed, 1 insertion(+) diff --git a/TODO-s390 b/TODO-s390 index b9a64711..f40f67ec 100644 --- a/TODO-s390 +++ b/TODO-s390 @@ -11,3 +11,4 @@ Todo: - support more registers in qemu/target/s390x/unicorn.c - storage-keys needed? - find & fix potential memory leaking with valgrind +- sync with "dev" branch of github unicorn From 09b0c66f11f94f328cb97f35ee57a76173df0aa6 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Tue, 7 Dec 2021 04:53:32 +0800 Subject: [PATCH 05/38] move all static vars in translate.c to tcg.h --- TODO-s390 | 2 +- qemu/include/tcg/tcg.h | 13 + qemu/target/s390x/translate.c | 490 +++++++++++++++++----------------- 3 files changed, 256 insertions(+), 249 deletions(-) diff --git a/TODO-s390 b/TODO-s390 index f40f67ec..c532002d 100644 --- a/TODO-s390 +++ b/TODO-s390 @@ -7,7 +7,7 @@ Todo: - fix qemu/target/s390x/cpu.c, so sample_s390x works - enable building all arch to fix conflicts -- remove all static vars in qemu/target/s390x/translate.c +- remove all static vars in qemu/target/s390x/translate.c [DONE in branch "static-vars"] - support more registers in qemu/target/s390x/unicorn.c - storage-keys needed? - find & fix potential memory leaking with valgrind diff --git a/qemu/include/tcg/tcg.h b/qemu/include/tcg/tcg.h index f9b87452..6318909a 100644 --- a/qemu/include/tcg/tcg.h +++ b/qemu/include/tcg/tcg.h @@ -791,6 +791,19 @@ struct TCGContext { TCGv NULL_QREG; /* Used to distinguish stores from bad addressing modes. */ TCGv store_dummy; + + // target/s390x/translate.c + TCGv_i64 psw_addr; + TCGv_i64 psw_mask; + TCGv_i64 gbea; + + TCGv_i32 cc_op; + TCGv_i64 cc_src; + TCGv_i64 cc_dst; + TCGv_i64 cc_vr; + + char s390x_cpu_reg_names[16][4]; // renamed from original cpu_reg_names[][] to avoid name clash with m68k + TCGv_i64 regs[16]; }; static inline size_t temp_idx(TCGContext *tcg_ctx, TCGTemp *ts) diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c index 4eed38c2..5ec72e3f 100644 --- a/qemu/target/s390x/translate.c +++ b/qemu/target/s390x/translate.c @@ -186,47 +186,35 @@ static void pc_to_link_info(TCGContext *tcg_ctx, TCGv_i64 out, DisasContext *s, tcg_temp_free_i64(tcg_ctx, tmp); } -static TCGv_i64 psw_addr; -static TCGv_i64 psw_mask; -static TCGv_i64 gbea; - -static TCGv_i32 cc_op; -static TCGv_i64 cc_src; -static TCGv_i64 cc_dst; -static TCGv_i64 cc_vr; - -static char cpu_reg_names[16][4]; -static TCGv_i64 regs[16]; - void s390x_translate_init(struct uc_struct *uc) { TCGContext *tcg_ctx = uc->tcg_ctx; int i; - psw_addr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + tcg_ctx->psw_addr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, psw.addr), "psw_addr"); - psw_mask = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + tcg_ctx->psw_mask = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, psw.mask), "psw_mask"); - gbea = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, + tcg_ctx->gbea = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, gbea), "gbea"); - cc_op = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_op), + tcg_ctx->cc_op = tcg_global_mem_new_i32(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_op), "cc_op"); - cc_src = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_src), + tcg_ctx->cc_src = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_src), "cc_src"); - cc_dst = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_dst), + tcg_ctx->cc_dst = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_dst), "cc_dst"); - cc_vr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_vr), + tcg_ctx->cc_vr = tcg_global_mem_new_i64(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, cc_vr), "cc_vr"); for (i = 0; i < 16; i++) { - snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i); - regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + snprintf(tcg_ctx->s390x_cpu_reg_names[i], sizeof(tcg_ctx->s390x_cpu_reg_names[0]), "r%d", i); + tcg_ctx->regs[i] = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, offsetof(CPUS390XState, regs[i]), - cpu_reg_names[i]); + tcg_ctx->s390x_cpu_reg_names[i]); } } @@ -286,7 +274,7 @@ static inline int freg32_offset(uint8_t reg) static TCGv_i64 load_reg(TCGContext *tcg_ctx, int reg) { TCGv_i64 r = tcg_temp_new_i64(tcg_ctx); - tcg_gen_mov_i64(tcg_ctx, r, regs[reg]); + tcg_gen_mov_i64(tcg_ctx, r, tcg_ctx->regs[reg]); return r; } @@ -308,7 +296,7 @@ static TCGv_i64 load_freg32_i64(TCGContext *tcg_ctx, int reg) static void store_reg(TCGContext *tcg_ctx, int reg, TCGv_i64 v) { - tcg_gen_mov_i64(tcg_ctx, regs[reg], v); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->regs[reg], v); } static void store_freg(TCGContext *tcg_ctx, int reg, TCGv_i64 v) @@ -319,12 +307,12 @@ static void store_freg(TCGContext *tcg_ctx, int reg, TCGv_i64 v) static void store_reg32_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) { /* 32 bit register writes keep the upper half */ - tcg_gen_deposit_i64(tcg_ctx, regs[reg], regs[reg], v, 0, 32); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->regs[reg], tcg_ctx->regs[reg], v, 0, 32); } static void store_reg32h_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) { - tcg_gen_deposit_i64(tcg_ctx, regs[reg], regs[reg], v, 32, 32); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->regs[reg], tcg_ctx->regs[reg], v, 32, 32); } static void store_freg32_i64(TCGContext *tcg_ctx, int reg, TCGv_i64 v) @@ -341,17 +329,17 @@ static void update_psw_addr(DisasContext *s) { /* psw.addr */ TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i64(tcg_ctx, psw_addr, s->base.pc_next); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, s->base.pc_next); } static void per_branch(DisasContext *s, bool to_next) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i64(tcg_ctx, gbea, s->base.pc_next); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->gbea, s->base.pc_next); if (s->base.tb->flags & FLAG_MASK_PER) { - TCGv_i64 next_pc = to_next ? tcg_const_i64(tcg_ctx, s->pc_tmp) : psw_addr; - gen_helper_per_branch(tcg_ctx, tcg_ctx->cpu_env, gbea, next_pc); + TCGv_i64 next_pc = to_next ? tcg_const_i64(tcg_ctx, s->pc_tmp) : tcg_ctx->psw_addr; + gen_helper_per_branch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->gbea, next_pc); if (to_next) { tcg_temp_free_i64(tcg_ctx, next_pc); } @@ -367,13 +355,13 @@ static void per_branch_cond(DisasContext *s, TCGCond cond, TCGLabel *lab = gen_new_label(tcg_ctx); tcg_gen_brcond_i64(tcg_ctx, tcg_invert_cond(cond), arg1, arg2, lab); - tcg_gen_movi_i64(tcg_ctx, gbea, s->base.pc_next); - gen_helper_per_branch(tcg_ctx, tcg_ctx->cpu_env, gbea, psw_addr); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->gbea, s->base.pc_next); + gen_helper_per_branch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->gbea, tcg_ctx->psw_addr); gen_set_label(tcg_ctx, lab); } else { TCGv_i64 pc = tcg_const_i64(tcg_ctx, s->base.pc_next); - tcg_gen_movcond_i64(tcg_ctx, cond, gbea, arg1, arg2, gbea, pc); + tcg_gen_movcond_i64(tcg_ctx, cond, tcg_ctx->gbea, arg1, arg2, tcg_ctx->gbea, pc); tcg_temp_free_i64(tcg_ctx, pc); } } @@ -381,14 +369,14 @@ static void per_branch_cond(DisasContext *s, TCGCond cond, static void per_breaking_event(DisasContext *s) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i64(tcg_ctx, gbea, s->base.pc_next); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->gbea, s->base.pc_next); } static void update_cc_op(DisasContext *s) { if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_movi_i32(tcg_ctx, cc_op, s->cc_op); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cc_op, s->cc_op); } } @@ -495,12 +483,12 @@ static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) * displacements early we create larger immedate addends. */ if (b2 && x2) { - tcg_gen_add_i64(tcg_ctx, tmp, regs[b2], regs[x2]); + tcg_gen_add_i64(tcg_ctx, tmp, tcg_ctx->regs[b2], tcg_ctx->regs[x2]); gen_addi_and_wrap_i64(s, tmp, tmp, d2); } else if (b2) { - gen_addi_and_wrap_i64(s, tmp, regs[b2], d2); + gen_addi_and_wrap_i64(s, tmp, tcg_ctx->regs[b2], d2); } else if (x2) { - gen_addi_and_wrap_i64(s, tmp, regs[x2], d2); + gen_addi_and_wrap_i64(s, tmp, tcg_ctx->regs[x2], d2); } else if (!(s->base.tb->flags & FLAG_MASK_64)) { if (s->base.tb->flags & FLAG_MASK_32) { tcg_gen_movi_i64(tcg_ctx, tmp, d2 & 0x7fffffff); @@ -526,9 +514,9 @@ static inline void gen_op_movi_cc(DisasContext *s, uint32_t val) if (live_cc_data(s)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_discard_i64(tcg_ctx, cc_src); - tcg_gen_discard_i64(tcg_ctx, cc_dst); - tcg_gen_discard_i64(tcg_ctx, cc_vr); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_src); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_dst); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_vr); } s->cc_op = CC_OP_CONST0 + val; } @@ -538,10 +526,10 @@ static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) TCGContext *tcg_ctx = s->uc->tcg_ctx; if (live_cc_data(s)) { - tcg_gen_discard_i64(tcg_ctx, cc_src); - tcg_gen_discard_i64(tcg_ctx, cc_vr); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_src); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_vr); } - tcg_gen_mov_i64(tcg_ctx, cc_dst, dst); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_dst, dst); s->cc_op = op; } @@ -551,10 +539,10 @@ static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, TCGContext *tcg_ctx = s->uc->tcg_ctx; if (live_cc_data(s)) { - tcg_gen_discard_i64(tcg_ctx, cc_vr); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_vr); } - tcg_gen_mov_i64(tcg_ctx, cc_src, src); - tcg_gen_mov_i64(tcg_ctx, cc_dst, dst); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_src, src); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_dst, dst); s->cc_op = op; } @@ -563,9 +551,9 @@ static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_mov_i64(tcg_ctx, cc_src, src); - tcg_gen_mov_i64(tcg_ctx, cc_dst, dst); - tcg_gen_mov_i64(tcg_ctx, cc_vr, vr); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_src, src); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_dst, dst); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cc_vr, vr); s->cc_op = op; } @@ -595,9 +583,9 @@ static void set_cc_static(DisasContext *s) if (live_cc_data(s)) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_discard_i64(tcg_ctx, cc_src); - tcg_gen_discard_i64(tcg_ctx, cc_dst); - tcg_gen_discard_i64(tcg_ctx, cc_vr); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_src); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_dst); + tcg_gen_discard_i64(tcg_ctx, tcg_ctx->cc_vr); } s->cc_op = CC_OP_STATIC; } @@ -642,7 +630,7 @@ static void gen_op_calc_cc(DisasContext *s) case CC_OP_CONST2: case CC_OP_CONST3: /* s->cc_op is the cc value */ - tcg_gen_movi_i32(tcg_ctx, cc_op, s->cc_op - CC_OP_CONST0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cc_op, s->cc_op - CC_OP_CONST0); break; case CC_OP_STATIC: /* env->cc_op already is the cc value */ @@ -661,7 +649,7 @@ static void gen_op_calc_cc(DisasContext *s) case CC_OP_FLOGR: case CC_OP_LCBB: /* 1 argument */ - gen_helper_calc_cc(tcg_ctx, cc_op, tcg_ctx->cpu_env, local_cc_op, dummy, cc_dst, dummy); + gen_helper_calc_cc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, local_cc_op, dummy, tcg_ctx->cc_dst, dummy); break; case CC_OP_ICM: case CC_OP_LTGT_32: @@ -675,7 +663,7 @@ static void gen_op_calc_cc(DisasContext *s) case CC_OP_NZ_F128: case CC_OP_VC: /* 2 arguments */ - gen_helper_calc_cc(tcg_ctx, cc_op, tcg_ctx->cpu_env, local_cc_op, cc_src, cc_dst, dummy); + gen_helper_calc_cc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, local_cc_op, tcg_ctx->cc_src, tcg_ctx->cc_dst, dummy); break; case CC_OP_ADD_64: case CC_OP_ADDU_64: @@ -690,11 +678,11 @@ static void gen_op_calc_cc(DisasContext *s) case CC_OP_SUBU_32: case CC_OP_SUBB_32: /* 3 arguments */ - gen_helper_calc_cc(tcg_ctx, cc_op, tcg_ctx->cpu_env, local_cc_op, cc_src, cc_dst, cc_vr); + gen_helper_calc_cc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, local_cc_op, tcg_ctx->cc_src, tcg_ctx->cc_dst, tcg_ctx->cc_vr); break; case CC_OP_DYNAMIC: /* unknown operation - assume 3 arguments and cc_op in env */ - gen_helper_calc_cc(tcg_ctx, cc_op, tcg_ctx->cpu_env, cc_op, cc_src, cc_dst, cc_vr); + gen_helper_calc_cc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tcg_ctx->cc_op, tcg_ctx->cc_src, tcg_ctx->cc_dst, tcg_ctx->cc_vr); break; default: tcg_abort(); @@ -777,8 +765,8 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) if (mask == 15 || mask == 0) { c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); - c->u.s32.a = cc_op; - c->u.s32.b = cc_op; + c->u.s32.a = tcg_ctx->cc_op; + c->u.s32.b = tcg_ctx->cc_op; c->g1 = c->g2 = true; c->is_64 = false; return; @@ -923,7 +911,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_LTGT0_32: c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); - tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, cc_dst); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_dst); c->u.s32.b = tcg_const_i32(tcg_ctx, 0); break; case CC_OP_LTGT_32: @@ -931,23 +919,23 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_SUBU_32: c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); - tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, cc_src); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_src); c->u.s32.b = tcg_temp_new_i32(tcg_ctx); - tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.b, cc_dst); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.b, tcg_ctx->cc_dst); break; case CC_OP_LTGT0_64: case CC_OP_NZ: case CC_OP_FLOGR: - c->u.s64.a = cc_dst; + c->u.s64.a = tcg_ctx->cc_dst; c->u.s64.b = tcg_const_i64(tcg_ctx, 0); c->g1 = true; break; case CC_OP_LTGT_64: case CC_OP_LTUGTU_64: case CC_OP_SUBU_64: - c->u.s64.a = cc_src; - c->u.s64.b = cc_dst; + c->u.s64.a = tcg_ctx->cc_src; + c->u.s64.b = tcg_ctx->cc_dst; c->g1 = c->g2 = true; break; @@ -956,35 +944,35 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_ICM: c->u.s64.a = tcg_temp_new_i64(tcg_ctx); c->u.s64.b = tcg_const_i64(tcg_ctx, 0); - tcg_gen_and_i64(tcg_ctx, c->u.s64.a, cc_src, cc_dst); + tcg_gen_and_i64(tcg_ctx, c->u.s64.a, tcg_ctx->cc_src, tcg_ctx->cc_dst); break; case CC_OP_ADDU_32: c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); c->u.s32.b = tcg_temp_new_i32(tcg_ctx); - tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, cc_vr); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_vr); if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { tcg_gen_movi_i32(tcg_ctx, c->u.s32.b, 0); } else { - tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.b, cc_src); + tcg_gen_extrl_i64_i32(tcg_ctx, c->u.s32.b, tcg_ctx->cc_src); } break; case CC_OP_ADDU_64: - c->u.s64.a = cc_vr; + c->u.s64.a = tcg_ctx->cc_vr; c->g1 = true; if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { c->u.s64.b = tcg_const_i64(tcg_ctx, 0); } else { - c->u.s64.b = cc_src; + c->u.s64.b = tcg_ctx->cc_src; c->g2 = true; } break; case CC_OP_STATIC: c->is_64 = false; - c->u.s32.a = cc_op; + c->u.s32.a = tcg_ctx->cc_op; c->g1 = true; switch (mask) { case 0x8 | 0x4 | 0x2: /* cc != 3 */ @@ -1004,7 +992,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) c->g1 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); c->u.s32.b = tcg_const_i32(tcg_ctx, 0); - tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, cc_op, 1); + tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_op, 1); break; case 0x8 | 0x4: /* cc < 2 */ cond = TCG_COND_LTU; @@ -1023,7 +1011,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) c->g1 = false; c->u.s32.a = tcg_temp_new_i32(tcg_ctx); c->u.s32.b = tcg_const_i32(tcg_ctx, 0); - tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, cc_op, 1); + tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, tcg_ctx->cc_op, 1); break; case 0x4: /* cc == 1 */ cond = TCG_COND_EQ; @@ -1047,7 +1035,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) c->g1 = false; c->u.s32.a = tcg_const_i32(tcg_ctx, 8); c->u.s32.b = tcg_const_i32(tcg_ctx, 0); - tcg_gen_shr_i32(tcg_ctx, c->u.s32.a, c->u.s32.a, cc_op); + tcg_gen_shr_i32(tcg_ctx, c->u.s32.a, c->u.s32.a, tcg_ctx->cc_op); tcg_gen_andi_i32(tcg_ctx, c->u.s32.a, c->u.s32.a, mask); break; } @@ -1282,11 +1270,11 @@ static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) update_cc_op(s); per_breaking_event(s); tcg_gen_goto_tb(tcg_ctx, 0); - tcg_gen_movi_i64(tcg_ctx, psw_addr, dest); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, dest); tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); return DISAS_GOTO_TB; } else { - tcg_gen_movi_i64(tcg_ctx, psw_addr, dest); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, dest); per_branch(s, false); return DISAS_PC_UPDATED; } @@ -1323,7 +1311,7 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, goto egress; } if (c->cond == TCG_COND_ALWAYS) { - tcg_gen_mov_i64(tcg_ctx, psw_addr, cdest); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->psw_addr, cdest); per_branch(s, false); ret = DISAS_PC_UPDATED; goto egress; @@ -1344,14 +1332,14 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, /* Branch not taken. */ tcg_gen_goto_tb(tcg_ctx, 0); - tcg_gen_movi_i64(tcg_ctx, psw_addr, s->pc_tmp); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, s->pc_tmp); tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); /* Branch taken. */ gen_set_label(tcg_ctx, lab); per_breaking_event(s); tcg_gen_goto_tb(tcg_ctx, 1); - tcg_gen_movi_i64(tcg_ctx, psw_addr, dest); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, dest); tcg_gen_exit_tb(tcg_ctx, s->base.tb, 1); ret = DISAS_GOTO_TB; @@ -1361,7 +1349,7 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, avoids having to allocate a new local temp to hold it. We'll overwrite this in the not taken case anyway. */ if (!is_imm) { - tcg_gen_mov_i64(tcg_ctx, psw_addr, cdest); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->psw_addr, cdest); } lab = gen_new_label(tcg_ctx); @@ -1374,12 +1362,12 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, /* Branch not taken. */ update_cc_op(s); tcg_gen_goto_tb(tcg_ctx, 0); - tcg_gen_movi_i64(tcg_ctx, psw_addr, s->pc_tmp); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, s->pc_tmp); tcg_gen_exit_tb(tcg_ctx, s->base.tb, 0); gen_set_label(tcg_ctx, lab); if (is_imm) { - tcg_gen_movi_i64(tcg_ctx, psw_addr, dest); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, dest); } per_breaking_event(s); ret = DISAS_PC_UPDATED; @@ -1395,7 +1383,7 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, } if (c->is_64) { - tcg_gen_movcond_i64(tcg_ctx, c->cond, psw_addr, c->u.s64.a, c->u.s64.b, + tcg_gen_movcond_i64(tcg_ctx, c->cond, tcg_ctx->psw_addr, c->u.s64.a, c->u.s64.b, cdest, next); per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b); } else { @@ -1405,7 +1393,7 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, tcg_gen_setcond_i32(tcg_ctx, c->cond, t0, c->u.s32.a, c->u.s32.b); tcg_gen_extu_i32_i64(tcg_ctx, t1, t0); tcg_temp_free_i32(tcg_ctx, t0); - tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, psw_addr, t1, z, cdest, next); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, tcg_ctx->psw_addr, t1, z, cdest, next); per_branch_cond(s, TCG_COND_NE, t1, z); tcg_temp_free_i64(tcg_ctx, t1); tcg_temp_free_i64(tcg_ctx, z); @@ -1555,8 +1543,8 @@ static DisasJumpType op_andi(DisasContext *s, DisasOps *o) tcg_gen_and_i64(tcg_ctx, o->out, o->in1, o->in2); /* Produce the CC from only the bits manipulated. */ - tcg_gen_andi_i64(tcg_ctx, cc_dst, o->out, mask); - set_cc_nz_u64(s, cc_dst); + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cc_dst, o->out, mask); + set_cc_nz_u64(s, tcg_ctx->cc_dst); return DISAS_NEXT; } @@ -1587,7 +1575,7 @@ static DisasJumpType op_bas(DisasContext *s, DisasOps *o) TCGContext *tcg_ctx = s->uc->tcg_ctx; pc_to_link_info(tcg_ctx, o->out, s, s->pc_tmp); if (o->in2) { - tcg_gen_mov_i64(tcg_ctx, psw_addr, o->in2); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->psw_addr, o->in2); per_branch(s, false); return DISAS_PC_UPDATED; } else { @@ -1608,10 +1596,10 @@ static void save_link_info(DisasContext *s, DisasOps *o) tcg_gen_andi_i64(tcg_ctx, o->out, o->out, 0xffffffff00000000ull); tcg_gen_ori_i64(tcg_ctx, o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp); t = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, t, psw_mask, 16); + tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->psw_mask, 16); tcg_gen_andi_i64(tcg_ctx, t, t, 0x0f000000); tcg_gen_or_i64(tcg_ctx, o->out, o->out, t); - tcg_gen_extu_i32_i64(tcg_ctx, t, cc_op); + tcg_gen_extu_i32_i64(tcg_ctx, t, tcg_ctx->cc_op); tcg_gen_shli_i64(tcg_ctx, t, t, 28); tcg_gen_or_i64(tcg_ctx, o->out, o->out, t); tcg_temp_free_i64(tcg_ctx, t); @@ -1622,7 +1610,7 @@ static DisasJumpType op_bal(DisasContext *s, DisasOps *o) TCGContext *tcg_ctx = s->uc->tcg_ctx; save_link_info(s, o); if (o->in2) { - tcg_gen_mov_i64(tcg_ctx, psw_addr, o->in2); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->psw_addr, o->in2); per_branch(s, false); return DISAS_PC_UPDATED; } else { @@ -1679,7 +1667,7 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) c.g2 = false; t = tcg_temp_new_i64(tcg_ctx); - tcg_gen_subi_i64(tcg_ctx, t, regs[r1], 1); + tcg_gen_subi_i64(tcg_ctx, t, tcg_ctx->regs[r1], 1); store_reg32_i64(tcg_ctx, r1, t); c.u.s32.a = tcg_temp_new_i32(tcg_ctx); c.u.s32.b = tcg_const_i32(tcg_ctx, 0); @@ -1703,7 +1691,7 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) c.g2 = false; t = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, t, regs[r1], 32); + tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->regs[r1], 32); tcg_gen_subi_i64(tcg_ctx, t, t, 1); store_reg32h_i64(tcg_ctx, r1, t); c.u.s32.a = tcg_temp_new_i32(tcg_ctx); @@ -1727,8 +1715,8 @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) c.g1 = true; c.g2 = false; - tcg_gen_subi_i64(tcg_ctx, regs[r1], regs[r1], 1); - c.u.s64.a = regs[r1]; + tcg_gen_subi_i64(tcg_ctx, tcg_ctx->regs[r1], tcg_ctx->regs[r1], 1); + c.u.s64.a = tcg_ctx->regs[r1]; c.u.s64.b = tcg_const_i64(tcg_ctx, 0); return help_branch(s, &c, is_imm, imm, o->in2); @@ -1750,11 +1738,11 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) c.g2 = false; t = tcg_temp_new_i64(tcg_ctx); - tcg_gen_add_i64(tcg_ctx, t, regs[r1], regs[r3]); + tcg_gen_add_i64(tcg_ctx, t, tcg_ctx->regs[r1], tcg_ctx->regs[r3]); c.u.s32.a = tcg_temp_new_i32(tcg_ctx); c.u.s32.b = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.a, t); - tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.b, regs[r3 | 1]); + tcg_gen_extrl_i64_i32(tcg_ctx, c.u.s32.b, tcg_ctx->regs[r3 | 1]); store_reg32_i64(tcg_ctx, r1, t); tcg_temp_free_i64(tcg_ctx, t); @@ -1777,12 +1765,12 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) c.u.s64.b = load_reg(tcg_ctx, r3 | 1); c.g2 = false; } else { - c.u.s64.b = regs[r3 | 1]; + c.u.s64.b = tcg_ctx->regs[r3 | 1]; c.g2 = true; } - tcg_gen_add_i64(tcg_ctx, regs[r1], regs[r1], regs[r3]); - c.u.s64.a = regs[r1]; + tcg_gen_add_i64(tcg_ctx, tcg_ctx->regs[r1], tcg_ctx->regs[r1], tcg_ctx->regs[r3]); + c.u.s64.a = tcg_ctx->regs[r1]; c.g1 = true; return help_branch(s, &c, is_imm, imm, o->in2); @@ -1817,7 +1805,7 @@ static DisasJumpType op_cj(DisasContext *s, DisasOps *o) static DisasJumpType op_ceb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_ceb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + gen_helper_ceb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -1825,7 +1813,7 @@ static DisasJumpType op_ceb(DisasContext *s, DisasOps *o) static DisasJumpType op_cdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_cdb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + gen_helper_cdb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -1833,7 +1821,7 @@ static DisasJumpType op_cdb(DisasContext *s, DisasOps *o) static DisasJumpType op_cxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_cxb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); + gen_helper_cxb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -2118,12 +2106,12 @@ static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) int r2 = get_field(s, r2); TCGv_i64 len = tcg_temp_new_i64(tcg_ctx); - gen_helper_cksm(tcg_ctx, len, tcg_ctx->cpu_env, o->in1, o->in2, regs[r2 + 1]); + gen_helper_cksm(tcg_ctx, len, tcg_ctx->cpu_env, o->in1, o->in2, tcg_ctx->regs[r2 + 1]); set_cc_static(s); return_low128(tcg_ctx, o->out); - tcg_gen_add_i64(tcg_ctx, regs[r2], regs[r2], len); - tcg_gen_sub_i64(tcg_ctx, regs[r2 + 1], regs[r2 + 1], len); + tcg_gen_add_i64(tcg_ctx, tcg_ctx->regs[r2], tcg_ctx->regs[r2], len); + tcg_gen_sub_i64(tcg_ctx, tcg_ctx->regs[r2 + 1], tcg_ctx->regs[r2 + 1], len); tcg_temp_free_i64(tcg_ctx, len); return DISAS_NEXT; @@ -2137,29 +2125,29 @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o) switch (l + 1) { case 1: - tcg_gen_qemu_ld8u(tcg_ctx, cc_src, o->addr1, get_mem_index(s)); - tcg_gen_qemu_ld8u(tcg_ctx, cc_dst, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld8u(tcg_ctx, tcg_ctx->cc_src, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld8u(tcg_ctx, tcg_ctx->cc_dst, o->in2, get_mem_index(s)); break; case 2: - tcg_gen_qemu_ld16u(tcg_ctx, cc_src, o->addr1, get_mem_index(s)); - tcg_gen_qemu_ld16u(tcg_ctx, cc_dst, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld16u(tcg_ctx, tcg_ctx->cc_src, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld16u(tcg_ctx, tcg_ctx->cc_dst, o->in2, get_mem_index(s)); break; case 4: - tcg_gen_qemu_ld32u(tcg_ctx, cc_src, o->addr1, get_mem_index(s)); - tcg_gen_qemu_ld32u(tcg_ctx, cc_dst, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld32u(tcg_ctx, tcg_ctx->cc_src, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld32u(tcg_ctx, tcg_ctx->cc_dst, o->in2, get_mem_index(s)); break; case 8: - tcg_gen_qemu_ld64(tcg_ctx, cc_src, o->addr1, get_mem_index(s)); - tcg_gen_qemu_ld64(tcg_ctx, cc_dst, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->cc_src, o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->cc_dst, o->in2, get_mem_index(s)); break; default: vl = tcg_const_i32(tcg_ctx, l); - gen_helper_clc(tcg_ctx, cc_op, tcg_ctx->cpu_env, vl, o->addr1, o->in2); + gen_helper_clc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, vl, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, vl); set_cc_static(s); return DISAS_NEXT; } - gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); + gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, tcg_ctx->cc_src, tcg_ctx->cc_dst); return DISAS_NEXT; } @@ -2178,7 +2166,7 @@ static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) t1 = tcg_const_i32(tcg_ctx, r1); t2 = tcg_const_i32(tcg_ctx, r2); - gen_helper_clcl(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, t2); + gen_helper_clcl(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); set_cc_static(s); @@ -2200,7 +2188,7 @@ static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) t1 = tcg_const_i32(tcg_ctx, r1); t3 = tcg_const_i32(tcg_ctx, r3); - gen_helper_clcle(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); + gen_helper_clcle(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t3); set_cc_static(s); @@ -2222,7 +2210,7 @@ static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) t1 = tcg_const_i32(tcg_ctx, r1); t3 = tcg_const_i32(tcg_ctx, r3); - gen_helper_clclu(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); + gen_helper_clclu(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t3); set_cc_static(s); @@ -2235,7 +2223,7 @@ static DisasJumpType op_clm(DisasContext *s, DisasOps *o) TCGv_i32 m3 = tcg_const_i32(tcg_ctx, get_field(s, m3)); TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); tcg_gen_extrl_i64_i32(tcg_ctx, t1, o->in1); - gen_helper_clm(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, m3, o->in2); + gen_helper_clm(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, m3, o->in2); set_cc_static(s); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, m3); @@ -2245,7 +2233,7 @@ static DisasJumpType op_clm(DisasContext *s, DisasOps *o) static DisasJumpType op_clst(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_clst(tcg_ctx, o->in1, tcg_ctx->cpu_env, regs[0], o->in1, o->in2); + gen_helper_clst(tcg_ctx, o->in1, tcg_ctx->cpu_env, tcg_ctx->regs[0], o->in1, o->in2); set_cc_static(s); return_low128(tcg_ctx, o->in2); return DISAS_NEXT; @@ -2281,7 +2269,7 @@ static DisasJumpType op_cs(DisasContext *s, DisasOps *o) produces the output CC value, thus the NE sense of the test. */ cc = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, cc, o->in2, o->out); - tcg_gen_extrl_i64_i32(tcg_ctx, cc_op, cc); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cc_op, cc); tcg_temp_free_i64(tcg_ctx, cc); set_cc_static(s); @@ -2326,9 +2314,9 @@ static DisasJumpType op_csst(DisasContext *s, DisasOps *o) TCGv_i32 t_r3 = tcg_const_i32(tcg_ctx, r3); if (tb_cflags(s->base.tb) & CF_PARALLEL) { - gen_helper_csst_parallel(tcg_ctx, cc_op, tcg_ctx->cpu_env, t_r3, o->addr1, o->in2); + gen_helper_csst_parallel(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t_r3, o->addr1, o->in2); } else { - gen_helper_csst(tcg_ctx, cc_op, tcg_ctx->cpu_env, t_r3, o->addr1, o->in2); + gen_helper_csst(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t_r3, o->addr1, o->in2); } tcg_temp_free_i32(tcg_ctx, t_r3); @@ -2356,7 +2344,7 @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o) /* Are the memory and expected values (un)equal? */ cc = tcg_temp_new_i64(tcg_ctx); tcg_gen_setcond_i64(tcg_ctx, TCG_COND_NE, cc, o->in1, old); - tcg_gen_extrl_i64_i32(tcg_ctx, cc_op, cc); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cc_op, cc); /* Write back the output now, so that it happens before the following branch, so that we don't need local temps. */ @@ -2436,22 +2424,22 @@ static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) switch (s->insn->data) { case 12: - gen_helper_cu12(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + gen_helper_cu12(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 14: - gen_helper_cu14(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + gen_helper_cu14(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 21: - gen_helper_cu21(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + gen_helper_cu21(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 24: - gen_helper_cu24(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + gen_helper_cu24(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 41: - gen_helper_cu41(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + gen_helper_cu41(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; case 42: - gen_helper_cu42(tcg_ctx, cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); + gen_helper_cu42(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tr1, tr2, chk); break; default: //g_assert_not_reached(); @@ -2566,10 +2554,10 @@ static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) /* Note the "subsequently" in the PoO, which implies a defined result if r1 == r2. Thus we cannot defer these writes to an output hook. */ - tcg_gen_shri_i64(tcg_ctx, t, psw_mask, 32); + tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->psw_mask, 32); store_reg32_i64(tcg_ctx, r1, t); if (r2 != 0) { - store_reg32_i64(tcg_ctx, r2, psw_mask); + store_reg32_i64(tcg_ctx, r2, tcg_ctx->psw_mask); } tcg_temp_free_i64(tcg_ctx, t); @@ -2595,7 +2583,7 @@ static DisasJumpType op_ex(DisasContext *s, DisasOps *o) if (r1 == 0) { v1 = tcg_const_i64(tcg_ctx, 0); } else { - v1 = regs[r1]; + v1 = tcg_ctx->regs[r1]; } ilen = tcg_const_i32(tcg_ctx, s->ilen); @@ -2666,7 +2654,7 @@ static DisasJumpType op_flogr(DisasContext *s, DisasOps *o) input is zero, we still get the correct result after and'ing. */ tcg_gen_movi_i64(tcg_ctx, o->out2, 0x8000000000000000ull); tcg_gen_shr_i64(tcg_ctx, o->out2, o->out2, o->out); - tcg_gen_andc_i64(tcg_ctx, o->out2, cc_dst, o->out2); + tcg_gen_andc_i64(tcg_ctx, o->out2, tcg_ctx->cc_dst, o->out2); return DISAS_NEXT; } @@ -2747,9 +2735,9 @@ static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) gen_op_calc_cc(s); t1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_extract_i64(tcg_ctx, t1, psw_mask, 40, 4); + tcg_gen_extract_i64(tcg_ctx, t1, tcg_ctx->psw_mask, 40, 4); t2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_extu_i32_i64(tcg_ctx, t2, cc_op); + tcg_gen_extu_i32_i64(tcg_ctx, t2, tcg_ctx->cc_op); tcg_gen_deposit_i64(tcg_ctx, t1, t1, t2, 4, 60); tcg_gen_deposit_i64(tcg_ctx, o->out, o->out, t1, 24, 8); tcg_temp_free_i64(tcg_ctx, t1); @@ -2839,7 +2827,7 @@ static DisasJumpType op_msa(DisasContext *s, DisasOps *o) t_r2 = tcg_const_i32(tcg_ctx, r2); t_r3 = tcg_const_i32(tcg_ctx, r3); type = tcg_const_i32(tcg_ctx, s->insn->data); - gen_helper_msa(tcg_ctx, cc_op, tcg_ctx->cpu_env, t_r1, t_r2, t_r3, type); + gen_helper_msa(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t_r1, t_r2, t_r3, type); set_cc_static(s); tcg_temp_free_i32(tcg_ctx, t_r1); tcg_temp_free_i32(tcg_ctx, t_r2); @@ -2851,7 +2839,7 @@ static DisasJumpType op_msa(DisasContext *s, DisasOps *o) static DisasJumpType op_keb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_keb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + gen_helper_keb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -2859,7 +2847,7 @@ static DisasJumpType op_keb(DisasContext *s, DisasOps *o) static DisasJumpType op_kdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_kdb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + gen_helper_kdb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -2867,7 +2855,7 @@ static DisasJumpType op_kdb(DisasContext *s, DisasOps *o) static DisasJumpType op_kxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_kxb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); + gen_helper_kxb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -3329,7 +3317,7 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) /* Only one register to read. */ if (unlikely(r1 == r3)) { - tcg_gen_qemu_ld64(tcg_ctx, regs[r1], o->in2, get_mem_index(s)); + tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->regs[r1], o->in2, get_mem_index(s)); return DISAS_NEXT; } @@ -3339,8 +3327,8 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) t2 = tcg_temp_new_i64(tcg_ctx); tcg_gen_qemu_ld64(tcg_ctx, t1, o->in2, get_mem_index(s)); tcg_gen_addi_i64(tcg_ctx, t2, o->in2, 8 * ((r3 - r1) & 15)); - tcg_gen_qemu_ld64(tcg_ctx, regs[r3], t2, get_mem_index(s)); - tcg_gen_mov_i64(tcg_ctx, regs[r1], t1); + tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->regs[r3], t2, get_mem_index(s)); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->regs[r1], t1); tcg_temp_free(tcg_ctx, t2); /* Only two registers to read. */ @@ -3355,7 +3343,7 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) while (r1 != r3) { r1 = (r1 + 1) & 15; tcg_gen_add_i64(tcg_ctx, o->in2, o->in2, t1); - tcg_gen_qemu_ld64(tcg_ctx, regs[r1], o->in2, get_mem_index(s)); + tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->regs[r1], o->in2, get_mem_index(s)); } tcg_temp_free(tcg_ctx, t1); @@ -3527,7 +3515,7 @@ static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) t1 = tcg_const_i32(tcg_ctx, r1); t2 = tcg_const_i32(tcg_ctx, r2); - gen_helper_mvcl(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, t2); + gen_helper_mvcl(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); set_cc_static(s); @@ -3549,7 +3537,7 @@ static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) t1 = tcg_const_i32(tcg_ctx, r1); t3 = tcg_const_i32(tcg_ctx, r3); - gen_helper_mvcle(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); + gen_helper_mvcle(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t3); set_cc_static(s); @@ -3571,7 +3559,7 @@ static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) t1 = tcg_const_i32(tcg_ctx, r1); t3 = tcg_const_i32(tcg_ctx, r3); - gen_helper_mvclu(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); + gen_helper_mvclu(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, o->in2, t3); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t3); set_cc_static(s); @@ -3582,7 +3570,7 @@ static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r3 = get_field(s, r3); - gen_helper_mvcos(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, o->in2, regs[r3]); + gen_helper_mvcos(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, o->in2, tcg_ctx->regs[r3]); set_cc_static(s); return DISAS_NEXT; } @@ -3591,7 +3579,7 @@ static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, l1); - gen_helper_mvcp(tcg_ctx, cc_op, tcg_ctx->cpu_env, regs[r1], o->addr1, o->in2); + gen_helper_mvcp(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tcg_ctx->regs[r1], o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -3600,7 +3588,7 @@ static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, l1); - gen_helper_mvcs(tcg_ctx, cc_op, tcg_ctx->cpu_env, regs[r1], o->addr1, o->in2); + gen_helper_mvcs(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tcg_ctx->regs[r1], o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -3626,7 +3614,7 @@ static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_mvpg(tcg_ctx, cc_op, tcg_ctx->cpu_env, regs[0], o->in1, o->in2); + gen_helper_mvpg(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, tcg_ctx->regs[0], o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -3637,7 +3625,7 @@ static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) TCGv_i32 t1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 t2 = tcg_const_i32(tcg_ctx, get_field(s, r2)); - gen_helper_mvst(tcg_ctx, cc_op, tcg_ctx->cpu_env, t1, t2); + gen_helper_mvst(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t1, t2); tcg_temp_free_i32(tcg_ctx, t1); tcg_temp_free_i32(tcg_ctx, t2); set_cc_static(s); @@ -3779,7 +3767,7 @@ static DisasJumpType op_nc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); - gen_helper_nc(tcg_ctx, cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); + gen_helper_nc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; @@ -3818,7 +3806,7 @@ static DisasJumpType op_oc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); - gen_helper_oc(tcg_ctx, cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); + gen_helper_oc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; @@ -3843,8 +3831,8 @@ static DisasJumpType op_ori(DisasContext *s, DisasOps *o) tcg_gen_or_i64(tcg_ctx, o->out, o->in1, o->in2); /* Produce the CC from only the bits manipulated. */ - tcg_gen_andi_i64(tcg_ctx, cc_dst, o->out, mask); - set_cc_nz_u64(s, cc_dst); + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cc_dst, o->out, mask); + set_cc_nz_u64(s, tcg_ctx->cc_dst); return DISAS_NEXT; } @@ -4068,8 +4056,8 @@ static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) } /* Set the CC. */ - tcg_gen_andi_i64(tcg_ctx, cc_dst, o->out, mask); - set_cc_nz_u64(s, cc_dst); + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cc_dst, o->out, mask); + set_cc_nz_u64(s, tcg_ctx->cc_dst); return DISAS_NEXT; } @@ -4120,7 +4108,7 @@ static DisasJumpType op_rll64(DisasContext *s, DisasOps *o) static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_rrbe(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2); + gen_helper_rrbe(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -4162,7 +4150,7 @@ static DisasJumpType op_sam(DisasContext *s, DisasOps *o) s->pc_tmp &= mask; tsam = tcg_const_i64(tcg_ctx, sam); - tcg_gen_deposit_i64(tcg_ctx, psw_mask, psw_mask, tsam, 31, 2); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, tsam, 31, 2); tcg_temp_free_i64(tcg_ctx, tsam); /* Always exit the TB, since we (may have) changed execution mode. */ @@ -4224,7 +4212,7 @@ static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o) static DisasJumpType op_servc(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_servc(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2, o->in1); + gen_helper_servc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2, o->in1); set_cc_static(s); return DISAS_NEXT; } @@ -4234,7 +4222,7 @@ static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 r1 = tcg_const_i32(tcg_ctx, get_field(s, r1)); TCGv_i32 r3 = tcg_const_i32(tcg_ctx, get_field(s, r3)); - gen_helper_sigp(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2, r1, r3); + gen_helper_sigp(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2, r1, r3); set_cc_static(s); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r3); @@ -4267,14 +4255,14 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o) a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); switch (s->insn->data) { case 1: /* STOCG */ - tcg_gen_qemu_st64(tcg_ctx, regs[r1], a, get_mem_index(s)); + tcg_gen_qemu_st64(tcg_ctx, tcg_ctx->regs[r1], a, get_mem_index(s)); break; case 0: /* STOC */ - tcg_gen_qemu_st32(tcg_ctx, regs[r1], a, get_mem_index(s)); + tcg_gen_qemu_st32(tcg_ctx, tcg_ctx->regs[r1], a, get_mem_index(s)); break; case 2: /* STOCFH */ h = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, h, regs[r1], 32); + tcg_gen_shri_i64(tcg_ctx, h, tcg_ctx->regs[r1], 32); tcg_gen_qemu_st32(tcg_ctx, h, a, get_mem_index(s)); tcg_temp_free_i64(tcg_ctx, h); break; @@ -4376,12 +4364,12 @@ static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) static DisasJumpType op_spm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_extrl_i64_i32(tcg_ctx, cc_op, o->in1); - tcg_gen_extract_i32(tcg_ctx, cc_op, cc_op, 28, 2); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cc_op, o->in1); + tcg_gen_extract_i32(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cc_op, 28, 2); set_cc_static(s); tcg_gen_shri_i64(tcg_ctx, o->in1, o->in1, 24); - tcg_gen_deposit_i64(tcg_ctx, psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4); return DISAS_NEXT; } @@ -4397,20 +4385,20 @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) /* fetch all operands first */ o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_addi_i64(tcg_ctx, o->in1, regs[b1], d1); + tcg_gen_addi_i64(tcg_ctx, o->in1, tcg_ctx->regs[b1], d1); o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_addi_i64(tcg_ctx, o->in2, regs[b2], d2); + tcg_gen_addi_i64(tcg_ctx, o->in2, tcg_ctx->regs[b2], d2); o->addr1 = get_address(s, 0, r3, 0); /* load the third operand into r3 before modifying anything */ - tcg_gen_qemu_ld64(tcg_ctx, regs[r3], o->addr1, get_mem_index(s)); + tcg_gen_qemu_ld64(tcg_ctx, tcg_ctx->regs[r3], o->addr1, get_mem_index(s)); /* subtract CPU timer from first operand and store in GR0 */ gen_helper_stpt(tcg_ctx, tmp, tcg_ctx->cpu_env); - tcg_gen_sub_i64(tcg_ctx, regs[0], o->in1, tmp); + tcg_gen_sub_i64(tcg_ctx, tcg_ctx->regs[0], o->in1, tmp); /* store second operand in GR1 */ - tcg_gen_mov_i64(tcg_ctx, regs[1], o->in2); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->regs[1], o->in2); tcg_temp_free_i64(tcg_ctx, tmp); return DISAS_NEXT; @@ -4420,7 +4408,7 @@ static DisasJumpType op_spka(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_shri_i64(tcg_ctx, o->in2, o->in2, 4); - tcg_gen_deposit_i64(tcg_ctx, psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, o->in2, PSW_SHIFT_KEY, 4); return DISAS_NEXT; } @@ -4434,7 +4422,7 @@ static DisasJumpType op_sske(DisasContext *s, DisasOps *o) static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_deposit_i64(tcg_ctx, psw_mask, psw_mask, o->in2, 56, 8); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, o->in2, 56, 8); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ return DISAS_PC_STALE_NOCHAIN; } @@ -4487,7 +4475,7 @@ static DisasJumpType op_sck(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; tcg_gen_qemu_ld_i64(tcg_ctx, o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); - gen_helper_sck(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1); + gen_helper_sck(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1); set_cc_static(s); return DISAS_NEXT; } @@ -4502,7 +4490,7 @@ static DisasJumpType op_sckc(DisasContext *s, DisasOps *o) static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_sckpf(tcg_ctx, tcg_ctx->cpu_env, regs[0]); + gen_helper_sckpf(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[0]); return DISAS_NEXT; } @@ -4566,7 +4554,7 @@ static DisasJumpType op_stpt(DisasContext *s, DisasOps *o) static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_stsi(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2, regs[0], regs[1]); + gen_helper_stsi(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2, tcg_ctx->regs[0], tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } @@ -4581,7 +4569,7 @@ static DisasJumpType op_spx(DisasContext *s, DisasOps *o) static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_xsch(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + gen_helper_xsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } @@ -4589,7 +4577,7 @@ static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) static DisasJumpType op_csch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_csch(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + gen_helper_csch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } @@ -4597,7 +4585,7 @@ static DisasJumpType op_csch(DisasContext *s, DisasOps *o) static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_hsch(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + gen_helper_hsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } @@ -4605,7 +4593,7 @@ static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) static DisasJumpType op_msch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_msch(tcg_ctx, tcg_ctx->cpu_env, regs[1], o->in2); + gen_helper_msch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -4613,7 +4601,7 @@ static DisasJumpType op_msch(DisasContext *s, DisasOps *o) static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_rchp(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + gen_helper_rchp(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } @@ -4621,7 +4609,7 @@ static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_rsch(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + gen_helper_rsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); set_cc_static(s); return DISAS_NEXT; } @@ -4629,14 +4617,14 @@ static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) static DisasJumpType op_sal(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_sal(tcg_ctx, tcg_ctx->cpu_env, regs[1]); + gen_helper_sal(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1]); return DISAS_NEXT; } static DisasJumpType op_schm(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_schm(tcg_ctx, tcg_ctx->cpu_env, regs[1], regs[2], o->in2); + gen_helper_schm(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], tcg_ctx->regs[2], o->in2); return DISAS_NEXT; } @@ -4656,7 +4644,7 @@ static DisasJumpType op_stcps(DisasContext *s, DisasOps *o) static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_ssch(tcg_ctx, tcg_ctx->cpu_env, regs[1], o->in2); + gen_helper_ssch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -4664,7 +4652,7 @@ static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_stsch(tcg_ctx, tcg_ctx->cpu_env, regs[1], o->in2); + gen_helper_stsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -4680,7 +4668,7 @@ static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_tpi(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1); + gen_helper_tpi(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1); set_cc_static(s); return DISAS_NEXT; } @@ -4688,7 +4676,7 @@ static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_tsch(tcg_ctx, tcg_ctx->cpu_env, regs[1], o->in2); + gen_helper_tsch(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -4719,15 +4707,15 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) If we let the output hook perform the store then if we fault and restart, we'll have the wrong SYSTEM MASK in place. */ t = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, t, psw_mask, 56); + tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->psw_mask, 56); tcg_gen_qemu_st8(tcg_ctx, t, o->addr1, get_mem_index(s)); tcg_temp_free_i64(tcg_ctx, t); if (s->fields.op == 0xac) { - tcg_gen_andi_i64(tcg_ctx, psw_mask, psw_mask, + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, (i2 << 56) | 0x00ffffffffffffffull); } else { - tcg_gen_ori_i64(tcg_ctx, psw_mask, psw_mask, i2 << 56); + tcg_gen_ori_i64(tcg_ctx, tcg_ctx->psw_mask, tcg_ctx->psw_mask, i2 << 56); } /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ @@ -4750,7 +4738,7 @@ static DisasJumpType op_stura(DisasContext *s, DisasOps *o) static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_stfle(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2); + gen_helper_stfle(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -4854,9 +4842,9 @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o) while (1) { if (size == 8) { - tcg_gen_qemu_st64(tcg_ctx, regs[r1], o->in2, get_mem_index(s)); + tcg_gen_qemu_st64(tcg_ctx, tcg_ctx->regs[r1], o->in2, get_mem_index(s)); } else { - tcg_gen_qemu_st32(tcg_ctx, regs[r1], o->in2, get_mem_index(s)); + tcg_gen_qemu_st32(tcg_ctx, tcg_ctx->regs[r1], o->in2, get_mem_index(s)); } if (r1 == r3) { break; @@ -4879,7 +4867,7 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) TCGv_i64 t32 = tcg_const_i64(tcg_ctx, 32); while (1) { - tcg_gen_shl_i64(tcg_ctx, t, regs[r1], t32); + tcg_gen_shl_i64(tcg_ctx, t, tcg_ctx->regs[r1], t32); tcg_gen_qemu_st32(tcg_ctx, t, o->in2, get_mem_index(s)); if (r1 == r3) { break; @@ -5003,7 +4991,7 @@ static DisasJumpType op_tam(DisasContext *s, DisasOps *o) static DisasJumpType op_tceb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_tceb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + gen_helper_tceb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -5011,7 +4999,7 @@ static DisasJumpType op_tceb(DisasContext *s, DisasOps *o) static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_tcdb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in1, o->in2); + gen_helper_tcdb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -5019,7 +5007,7 @@ static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o) static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_tcxb(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in2); + gen_helper_tcxb(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->out, o->out2, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -5027,7 +5015,7 @@ static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_testblock(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->in2); + gen_helper_testblock(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -5035,7 +5023,7 @@ static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) static DisasJumpType op_tprot(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - gen_helper_tprot(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, o->in2); + gen_helper_tprot(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; } @@ -5044,7 +5032,7 @@ static DisasJumpType op_tp(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l1 = tcg_const_i32(tcg_ctx, get_field(s, l1) + 1); - gen_helper_tp(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, l1); + gen_helper_tp(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, l1); tcg_temp_free_i32(tcg_ctx, l1); set_cc_static(s); return DISAS_NEXT; @@ -5073,7 +5061,7 @@ static DisasJumpType op_trt(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); - gen_helper_trt(tcg_ctx, cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); + gen_helper_trt(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; @@ -5083,7 +5071,7 @@ static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 l = tcg_const_i32(tcg_ctx, get_field(s, l1)); - gen_helper_trtr(tcg_ctx, cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); + gen_helper_trtr(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, l, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; @@ -5104,14 +5092,14 @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) if (m3 & 1) { tcg_gen_movi_i32(tcg_ctx, tst, -1); } else { - tcg_gen_extrl_i64_i32(tcg_ctx, tst, regs[0]); + tcg_gen_extrl_i64_i32(tcg_ctx, tst, tcg_ctx->regs[0]); if (s->insn->opc & 3) { tcg_gen_ext8u_i32(tcg_ctx, tst, tst); } else { tcg_gen_ext16u_i32(tcg_ctx, tst, tst); } } - gen_helper_trXX(tcg_ctx, cc_op, tcg_ctx->cpu_env, r1, r2, tst, sizes); + gen_helper_trXX(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, r1, r2, tst, sizes); tcg_temp_free_i32(tcg_ctx, r1); tcg_temp_free_i32(tcg_ctx, r2); @@ -5126,7 +5114,7 @@ static DisasJumpType op_ts(DisasContext *s, DisasOps *o) TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0xff); tcg_gen_atomic_xchg_i32(tcg_ctx, t1, o->in2, t1, get_mem_index(s), MO_UB); - tcg_gen_extract_i32(tcg_ctx, cc_op, t1, 7, 1); + tcg_gen_extract_i32(tcg_ctx, tcg_ctx->cc_op, t1, 7, 1); tcg_temp_free_i32(tcg_ctx, t1); set_cc_static(s); return DISAS_NEXT; @@ -5153,7 +5141,7 @@ static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } l = tcg_const_i32(tcg_ctx, l1); - gen_helper_unpka(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, l, o->in2); + gen_helper_unpka(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, l, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; @@ -5171,7 +5159,7 @@ static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) return DISAS_NORETURN; } l = tcg_const_i32(tcg_ctx, l1); - gen_helper_unpku(tcg_ctx, cc_op, tcg_ctx->cpu_env, o->addr1, l, o->in2); + gen_helper_unpku(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, o->addr1, l, o->in2); tcg_temp_free_i32(tcg_ctx, l); set_cc_static(s); return DISAS_NEXT; @@ -5226,7 +5214,7 @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o) /* But in general we'll defer to a helper. */ o->in2 = get_address(s, 0, b2, d2); t32 = tcg_const_i32(tcg_ctx, l); - gen_helper_xc(tcg_ctx, cc_op, tcg_ctx->cpu_env, t32, o->addr1, o->in2); + gen_helper_xc(tcg_ctx, tcg_ctx->cc_op, tcg_ctx->cpu_env, t32, o->addr1, o->in2); tcg_temp_free_i32(tcg_ctx, t32); set_cc_static(s); return DISAS_NEXT; @@ -5251,8 +5239,8 @@ static DisasJumpType op_xori(DisasContext *s, DisasOps *o) tcg_gen_xor_i64(tcg_ctx, o->out, o->in1, o->in2); /* Produce the CC from only the bits manipulated. */ - tcg_gen_andi_i64(tcg_ctx, cc_dst, o->out, mask); - set_cc_nz_u64(s, cc_dst); + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cc_dst, o->out, mask); + set_cc_nz_u64(s, tcg_ctx->cc_dst); return DISAS_NEXT; } @@ -5497,8 +5485,8 @@ static void cout_neg64(DisasContext *s, DisasOps *o) static void cout_nz32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; - tcg_gen_ext32u_i64(tcg_ctx, cc_dst, o->out); - gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst); + tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cc_dst, o->out); + gen_op_update1_cc_i64(s, CC_OP_NZ, tcg_ctx->cc_dst); } static void cout_nz64(DisasContext *s, DisasOps *o) @@ -5579,16 +5567,18 @@ static void prep_new_P(DisasContext *s, DisasOps *o) static void prep_r1(DisasContext *s, DisasOps *o) { - o->out = regs[get_field(s, r1)]; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->out = tcg_ctx->regs[get_field(s, r1)]; o->g_out = true; } #define SPEC_prep_r1 0 static void prep_r1_P(DisasContext *s, DisasOps *o) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); - o->out = regs[r1]; - o->out2 = regs[r1 + 1]; + o->out = tcg_ctx->regs[r1]; + o->out2 = tcg_ctx->regs[r1 + 1]; o->g_out = o->g_out2 = true; } #define SPEC_prep_r1_P SPEC_r1_even @@ -5619,7 +5609,7 @@ static void wout_r1_8(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); - tcg_gen_deposit_i64(tcg_ctx, regs[r1], regs[r1], o->out, 0, 8); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->regs[r1], tcg_ctx->regs[r1], o->out, 0, 8); } #define SPEC_wout_r1_8 0 @@ -5627,7 +5617,7 @@ static void wout_r1_16(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); - tcg_gen_deposit_i64(tcg_ctx, regs[r1], regs[r1], o->out, 0, 16); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->regs[r1], tcg_ctx->regs[r1], o->out, 0, 16); } #define SPEC_wout_r1_16 0 @@ -5805,7 +5795,8 @@ static void in1_r1(DisasContext *s, DisasOps *o) static void in1_r1_o(DisasContext *s, DisasOps *o) { - o->in1 = regs[get_field(s, r1)]; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_ctx->regs[get_field(s, r1)]; o->g_in1 = true; } #define SPEC_in1_r1_o 0 @@ -5814,7 +5805,7 @@ static void in1_r1_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32s_i64(tcg_ctx, o->in1, regs[get_field(s, r1)]); + tcg_gen_ext32s_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1)]); } #define SPEC_in1_r1_32s 0 @@ -5822,7 +5813,7 @@ static void in1_r1_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32u_i64(tcg_ctx, o->in1, regs[get_field(s, r1)]); + tcg_gen_ext32u_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1)]); } #define SPEC_in1_r1_32u 0 @@ -5830,7 +5821,7 @@ static void in1_r1_sr32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, o->in1, regs[get_field(s, r1)], 32); + tcg_gen_shri_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1)], 32); } #define SPEC_in1_r1_sr32 0 @@ -5845,7 +5836,7 @@ static void in1_r1p1_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32s_i64(tcg_ctx, o->in1, regs[get_field(s, r1) + 1]); + tcg_gen_ext32s_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1) + 1]); } #define SPEC_in1_r1p1_32s SPEC_r1_even @@ -5853,7 +5844,7 @@ static void in1_r1p1_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32u_i64(tcg_ctx, o->in1, regs[get_field(s, r1) + 1]); + tcg_gen_ext32u_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r1) + 1]); } #define SPEC_in1_r1p1_32u SPEC_r1_even @@ -5862,7 +5853,7 @@ static void in1_r1_D32(DisasContext *s, DisasOps *o) TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_concat32_i64(tcg_ctx, o->in1, regs[r1 + 1], regs[r1]); + tcg_gen_concat32_i64(tcg_ctx, o->in1, tcg_ctx->regs[r1 + 1], tcg_ctx->regs[r1]); } #define SPEC_in1_r1_D32 SPEC_r1_even @@ -5877,7 +5868,7 @@ static void in1_r2_sr32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, o->in1, regs[get_field(s, r2)], 32); + tcg_gen_shri_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r2)], 32); } #define SPEC_in1_r2_sr32 0 @@ -5890,7 +5881,8 @@ static void in1_r3(DisasContext *s, DisasOps *o) static void in1_r3_o(DisasContext *s, DisasOps *o) { - o->in1 = regs[get_field(s, r3)]; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in1 = tcg_ctx->regs[get_field(s, r3)]; o->g_in1 = true; } #define SPEC_in1_r3_o 0 @@ -5899,7 +5891,7 @@ static void in1_r3_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32s_i64(tcg_ctx, o->in1, regs[get_field(s, r3)]); + tcg_gen_ext32s_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r3)]); } #define SPEC_in1_r3_32s 0 @@ -5907,7 +5899,7 @@ static void in1_r3_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32u_i64(tcg_ctx, o->in1, regs[get_field(s, r3)]); + tcg_gen_ext32u_i64(tcg_ctx, o->in1, tcg_ctx->regs[get_field(s, r3)]); } #define SPEC_in1_r3_32u 0 @@ -5916,7 +5908,7 @@ static void in1_r3_D32(DisasContext *s, DisasOps *o) TCGContext *tcg_ctx = s->uc->tcg_ctx; int r3 = get_field(s, r3); o->in1 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_concat32_i64(tcg_ctx, o->in1, regs[r3 + 1], regs[r3]); + tcg_gen_concat32_i64(tcg_ctx, o->in1, tcg_ctx->regs[r3 + 1], tcg_ctx->regs[r3]); } #define SPEC_in1_r3_D32 SPEC_r3_even @@ -6021,7 +6013,8 @@ static void in1_m1_64(DisasContext *s, DisasOps *o) static void in2_r1_o(DisasContext *s, DisasOps *o) { - o->in2 = regs[get_field(s, r1)]; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_ctx->regs[get_field(s, r1)]; o->g_in2 = true; } #define SPEC_in2_r1_o 0 @@ -6030,7 +6023,7 @@ static void in2_r1_16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext16u_i64(tcg_ctx, o->in2, regs[get_field(s, r1)]); + tcg_gen_ext16u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r1)]); } #define SPEC_in2_r1_16u 0 @@ -6038,7 +6031,7 @@ static void in2_r1_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32u_i64(tcg_ctx, o->in2, regs[get_field(s, r1)]); + tcg_gen_ext32u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r1)]); } #define SPEC_in2_r1_32u 0 @@ -6047,7 +6040,7 @@ static void in2_r1_D32(DisasContext *s, DisasOps *o) TCGContext *tcg_ctx = s->uc->tcg_ctx; int r1 = get_field(s, r1); o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_concat32_i64(tcg_ctx, o->in2, regs[r1 + 1], regs[r1]); + tcg_gen_concat32_i64(tcg_ctx, o->in2, tcg_ctx->regs[r1 + 1], tcg_ctx->regs[r1]); } #define SPEC_in2_r1_D32 SPEC_r1_even @@ -6060,7 +6053,8 @@ static void in2_r2(DisasContext *s, DisasOps *o) static void in2_r2_o(DisasContext *s, DisasOps *o) { - o->in2 = regs[get_field(s, r2)]; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + o->in2 = tcg_ctx->regs[get_field(s, r2)]; o->g_in2 = true; } #define SPEC_in2_r2_o 0 @@ -6079,7 +6073,7 @@ static void in2_r2_8s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext8s_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); + tcg_gen_ext8s_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_8s 0 @@ -6087,7 +6081,7 @@ static void in2_r2_8u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext8u_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); + tcg_gen_ext8u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_8u 0 @@ -6095,7 +6089,7 @@ static void in2_r2_16s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext16s_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); + tcg_gen_ext16s_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_16s 0 @@ -6103,7 +6097,7 @@ static void in2_r2_16u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext16u_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); + tcg_gen_ext16u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_16u 0 @@ -6118,7 +6112,7 @@ static void in2_r3_sr32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, o->in2, regs[get_field(s, r3)], 32); + tcg_gen_shri_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r3)], 32); } #define SPEC_in2_r3_sr32 0 @@ -6126,7 +6120,7 @@ static void in2_r3_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32u_i64(tcg_ctx, o->in2, regs[get_field(s, r3)]); + tcg_gen_ext32u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r3)]); } #define SPEC_in2_r3_32u 0 @@ -6134,7 +6128,7 @@ static void in2_r2_32s(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32s_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); + tcg_gen_ext32s_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_32s 0 @@ -6142,7 +6136,7 @@ static void in2_r2_32u(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_ext32u_i64(tcg_ctx, o->in2, regs[get_field(s, r2)]); + tcg_gen_ext32u_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)]); } #define SPEC_in2_r2_32u 0 @@ -6150,7 +6144,7 @@ static void in2_r2_sr32(DisasContext *s, DisasOps *o) { TCGContext *tcg_ctx = s->uc->tcg_ctx; o->in2 = tcg_temp_new_i64(tcg_ctx); - tcg_gen_shri_i64(tcg_ctx, o->in2, regs[get_field(s, r2)], 32); + tcg_gen_shri_i64(tcg_ctx, o->in2, tcg_ctx->regs[get_field(s, r2)], 32); } #define SPEC_in2_r2_sr32 0 @@ -6781,7 +6775,7 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) if (s->base.tb->flags & FLAG_MASK_PER) { /* An exception might be triggered, save PSW if not already done. */ if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { - tcg_gen_movi_i64(tcg_ctx, psw_addr, s->pc_tmp); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->psw_addr, s->pc_tmp); } /* Call the helper to check for a possible PER exception. */ From 607aff44a2e78c010e3525b2a430b5a367779256 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Wed, 8 Dec 2021 10:00:57 +0800 Subject: [PATCH 06/38] Update TODO-s390 --- TODO-s390 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TODO-s390 b/TODO-s390 index f40f67ec..5e5402c4 100644 --- a/TODO-s390 +++ b/TODO-s390 @@ -7,7 +7,7 @@ Todo: - fix qemu/target/s390x/cpu.c, so sample_s390x works - enable building all arch to fix conflicts -- remove all static vars in qemu/target/s390x/translate.c +- remove all static vars in qemu/target/s390x/translate.c [DONE in branch "static-vars"] - support more registers in qemu/target/s390x/unicorn.c - storage-keys needed? - find & fix potential memory leaking with valgrind From e977f8181388a2964db690480c9de9807975edf6 Mon Sep 17 00:00:00 2001 From: mio Date: Sun, 26 Dec 2021 23:09:25 +0100 Subject: [PATCH 07/38] Make s390x build --- qemu/s390x.h | 5 ++++- qemu/target/s390x/cpu.c | 1 + qemu/target/s390x/helper.h | 1 + qemu/target/s390x/translate.c | 2 +- 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/qemu/s390x.h b/qemu/s390x.h index ffa6c59e..b14ea4cf 100644 --- a/qemu/s390x.h +++ b/qemu/s390x.h @@ -4,6 +4,9 @@ #ifndef UNICORN_ARCH_POSTFIX #define UNICORN_ARCH_POSTFIX _s390x #endif +#define uc_add_inline_hook uc_add_inline_hook_s390x +#define uc_del_inline_hook uc_del_inline_hook_s390x +#define tb_invalidate_phys_range tb_invalidate_phys_range_s390x #define use_idiv_instructions use_idiv_instructions_s390x #define arm_arch arm_arch_s390x #define tb_target_set_jmp_target tb_target_set_jmp_target_s390x @@ -48,7 +51,7 @@ #define address_space_dispatch_compact address_space_dispatch_compact_s390x #define flatview_translate flatview_translate_s390x #define address_space_translate_for_iotlb address_space_translate_for_iotlb_s390x -//#define qemu_get_cpu qemu_get_cpu_s390x +#define qemu_get_cpu qemu_get_cpu_s390x #define cpu_address_space_init cpu_address_space_init_s390x #define cpu_get_address_space cpu_get_address_space_s390x #define cpu_exec_unrealizefn cpu_exec_unrealizefn_s390x diff --git a/qemu/target/s390x/cpu.c b/qemu/target/s390x/cpu.c index 9f2fbf34..9573cb27 100644 --- a/qemu/target/s390x/cpu.c +++ b/qemu/target/s390x/cpu.c @@ -27,6 +27,7 @@ #include "sysemu/sysemu.h" #include "sysemu/tcg.h" #include "fpu/softfloat-helpers.h" +#include "exec/exec-all.h" #define CR0_RESET 0xE0UL #define CR14_RESET 0xC2000000UL; diff --git a/qemu/target/s390x/helper.h b/qemu/target/s390x/helper.h index 43eabb81..2ab70698 100644 --- a/qemu/target/s390x/helper.h +++ b/qemu/target/s390x/helper.h @@ -1,4 +1,5 @@ DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) +DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) DEF_HELPER_2(exception, noreturn, env, i32) DEF_HELPER_2(data_exception, noreturn, env, i32) diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c index 5ec72e3f..79b83b0b 100644 --- a/qemu/target/s390x/translate.c +++ b/qemu/target/s390x/translate.c @@ -6844,7 +6844,7 @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) DisasContext *dc = container_of(dcbase, DisasContext, base); // Unicorn: end address tells us to stop emulation - if (dcbase->pc_next == dc->uc->addr_end) { + if (uc_addr_is_exit(dc->uc, dcbase->pc_next)) { // imitate PGM exception to halt emulation dcbase->is_jmp = DISAS_UNICORN_HALT; } else { From a38151bf77757715c1ed519a48bbc04dd96bc727 Mon Sep 17 00:00:00 2001 From: mio Date: Mon, 27 Dec 2021 23:19:17 +0100 Subject: [PATCH 08/38] Make s390x skey work --- include/unicorn/s390x.h | 42 +++++ qemu/hw/s390x/s390-skeys.c | 228 +++++---------------------- qemu/include/hw/s390x/storage-keys.h | 21 ++- qemu/target/s390x/cpu.c | 32 ++-- qemu/target/s390x/cpu.h | 1 + qemu/target/s390x/cpu_models.c | 47 ++---- qemu/target/s390x/cpu_models.h | 6 + qemu/target/s390x/mmu_helper.c | 10 +- qemu/target/s390x/unicorn.c | 7 + 9 files changed, 144 insertions(+), 250 deletions(-) diff --git a/include/unicorn/s390x.h b/include/unicorn/s390x.h index 7cf3f8c4..68bb0210 100644 --- a/include/unicorn/s390x.h +++ b/include/unicorn/s390x.h @@ -12,6 +12,48 @@ extern "C" { #pragma warning(disable : 4201) #endif +//> S390X CPU +typedef enum uc_cpu_s390x { + UC_CPU_S390X_Z900 = 0, + UC_CPU_S390X_Z900_2, + UC_CPU_S390X_Z900_3, + UC_CPU_S390X_Z800, + UC_CPU_S390X_Z990, + UC_CPU_S390X_Z990_2, + UC_CPU_S390X_Z990_3, + UC_CPU_S390X_Z890, + UC_CPU_S390X_Z990_4, + UC_CPU_S390X_Z890_2, + UC_CPU_S390X_Z990_5, + UC_CPU_S390X_Z890_3, + UC_CPU_S390X_Z9EC, + UC_CPU_S390X_Z9EC_2, + UC_CPU_S390X_Z9BC, + UC_CPU_S390X_Z9EC_3, + UC_CPU_S390X_Z9BC_2, + UC_CPU_S390X_Z10EC, + UC_CPU_S390X_Z10EC_2, + UC_CPU_S390X_Z10BC, + UC_CPU_S390X_Z10EC_3, + UC_CPU_S390X_Z10BC_2, + UC_CPU_S390X_Z196, + UC_CPU_S390X_Z196_2, + UC_CPU_S390X_Z114, + UC_CPU_S390X_ZEC12, + UC_CPU_S390X_ZEC12_2, + UC_CPU_S390X_ZBC12, + UC_CPU_S390X_Z13, + UC_CPU_S390X_Z13_2, + UC_CPU_S390X_Z13S, + UC_CPU_S390X_Z14, + UC_CPU_S390X_Z14_2, + UC_CPU_S390X_Z14ZR1, + UC_CPU_S390X_GEN15A, + UC_CPU_S390X_GEN15B, + UC_CPU_S390X_QEMU, + UC_CPU_S390X_MAX +} uc_cpu_s390x; + //> S390X registers typedef enum uc_s390x_reg { UC_S390X_REG_INVALID = 0, diff --git a/qemu/hw/s390x/s390-skeys.c b/qemu/hw/s390x/s390-skeys.c index 3f943f14..04985d7b 100644 --- a/qemu/hw/s390x/s390-skeys.c +++ b/qemu/hw/s390x/s390-skeys.c @@ -19,34 +19,39 @@ #define S390_SKEYS_SAVE_FLAG_SKEYS 0x02 #define S390_SKEYS_SAVE_FLAG_ERROR 0x04 -#if 0 -S390SKeysState *s390_get_skeys_device(void) -{ - S390SKeysState *ss; +static void s390_skeys_class_init(uc_engine *uc, S390SKeysClass* class); +static void qemu_s390_skeys_class_init(uc_engine *uc, S390SKeysClass* skeyclass); +static void s390_skeys_instance_init(uc_engine *uc, S390SKeysState* ss); +static void qemu_s390_skeys_init(uc_engine *uc, QEMUS390SKeysState *skey); - ss = S390_SKEYS(object_resolve_path_type("", TYPE_S390_SKEYS, NULL)); - assert(ss); - return ss; +S390SKeysState *s390_get_skeys_device(uc_engine *uc) +{ + S390CPU *cpu = S390_CPU(uc->cpu); + + return (S390SKeysState*)&cpu->ss; } -void s390_skeys_init(void) +void s390_skeys_init(uc_engine *uc) { - Object *obj; + S390CPU *cpu = S390_CPU(uc->cpu); - obj = object_new(TYPE_QEMU_S390_SKEYS); - object_property_add_child(qdev_get_machine(), TYPE_S390_SKEYS, - obj, NULL); - object_unref(obj); + s390_skeys_class_init(uc, &cpu->skey); + qemu_s390_skeys_class_init(uc, &cpu->skey); - qdev_init_nofail(DEVICE(obj)); + s390_skeys_instance_init(uc, (S390SKeysState*)&cpu->ss); + qemu_s390_skeys_init(uc, &cpu->ss); + + cpu->ss.class = &cpu->skey; } -static void qemu_s390_skeys_init(Object *obj) +static void qemu_s390_skeys_init(uc_engine *uc, QEMUS390SKeysState *skeys) { - QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(obj); - MachineState *machine = MACHINE(qdev_get_machine()); + //QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(obj); + //MachineState *machine = MACHINE(qdev_get_machine()); - skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE; + //skeys->key_count = machine->ram_size / TARGET_PAGE_SIZE; + // Unicorn: Allow users to configure this value? + skeys->key_count = 0x20000000 / TARGET_PAGE_SIZE; skeys->keydata = g_malloc0(skeys->key_count); } @@ -68,9 +73,9 @@ static int qemu_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn, /* Check for uint64 overflow and access beyond end of key data */ if (start_gfn + count > skeydev->key_count || start_gfn + count < count) { - error_report("Error: Setting storage keys for page beyond the end " - "of memory: gfn=%" PRIx64 " count=%" PRId64, - start_gfn, count); + // error_report("Error: Setting storage keys for page beyond the end " + // "of memory: gfn=%" PRIx64 " count=%" PRId64, + // start_gfn, count); return -EINVAL; } @@ -88,9 +93,9 @@ static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn, /* Check for uint64 overflow and access beyond end of key data */ if (start_gfn + count > skeydev->key_count || start_gfn + count < count) { - error_report("Error: Getting storage keys for page beyond the end " - "of memory: gfn=%" PRIx64 " count=%" PRId64, - start_gfn, count); + // error_report("Error: Getting storage keys for page beyond the end " + // "of memory: gfn=%" PRIx64 " count=%" PRId64, + // start_gfn, count); return -EINVAL; } @@ -100,183 +105,28 @@ static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn, return 0; } -static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data) +static void qemu_s390_skeys_class_init(uc_engine *uc, S390SKeysClass* skeyclass) { - S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); - DeviceClass *dc = DEVICE_CLASS(oc); + // S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc); + // DeviceClass *dc = DEVICE_CLASS(oc); skeyclass->skeys_enabled = qemu_s390_skeys_enabled; skeyclass->get_skeys = qemu_s390_skeys_get; skeyclass->set_skeys = qemu_s390_skeys_set; /* Reason: Internal device (only one skeys device for the whole memory) */ - dc->user_creatable = false; + // dc->user_creatable = false; } -static const TypeInfo qemu_s390_skeys_info = { - .name = TYPE_QEMU_S390_SKEYS, - .parent = TYPE_S390_SKEYS, - .instance_init = qemu_s390_skeys_init, - .instance_size = sizeof(QEMUS390SKeysState), - .class_init = qemu_s390_skeys_class_init, - .class_size = sizeof(S390SKeysClass), -}; - -static void write_keys(FILE *f, uint8_t *keys, uint64_t startgfn, - uint64_t count, Error **errp) +static void s390_skeys_instance_init(uc_engine *uc, S390SKeysState* ss) { - uint64_t curpage = startgfn; - uint64_t maxpage = curpage + count - 1; - - for (; curpage <= maxpage; curpage++) { - uint8_t acc = (*keys & 0xF0) >> 4; - int fp = (*keys & 0x08); - int ref = (*keys & 0x04); - int ch = (*keys & 0x02); - int res = (*keys & 0x01); - - fprintf(f, "page=%03" PRIx64 ": key(%d) => ACC=%X, FP=%d, REF=%d," - " ch=%d, reserved=%d\n", - curpage, *keys, acc, fp, ref, ch, res); - keys++; - } + ss->migration_enabled = true; } -static void s390_storage_keys_save(QEMUFile *f, void *opaque) +static void s390_skeys_class_init(uc_engine *uc, S390SKeysClass* class) { - S390SKeysState *ss = S390_SKEYS(opaque); - S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); - uint64_t pages_left = ram_size / TARGET_PAGE_SIZE; - uint64_t read_count, eos = S390_SKEYS_SAVE_FLAG_EOS; - vaddr cur_gfn = 0; - int error = 0; - uint8_t *buf; + // DeviceClass *dc = DEVICE_CLASS(oc); - if (!skeyclass->skeys_enabled(ss)) { - goto end_stream; - } - - buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE); - if (!buf) { - error_report("storage key save could not allocate memory"); - goto end_stream; - } - - /* We only support initial memory. Standby memory is not handled yet. */ - qemu_put_be64(f, (cur_gfn * TARGET_PAGE_SIZE) | S390_SKEYS_SAVE_FLAG_SKEYS); - qemu_put_be64(f, pages_left); - - while (pages_left) { - read_count = MIN(pages_left, S390_SKEYS_BUFFER_SIZE); - - if (!error) { - error = skeyclass->get_skeys(ss, cur_gfn, read_count, buf); - if (error) { - /* - * If error: we want to fill the stream with valid data instead - * of stopping early so we pad the stream with 0x00 values and - * use S390_SKEYS_SAVE_FLAG_ERROR to indicate failure to the - * reading side. - */ - error_report("S390_GET_KEYS error %d", error); - memset(buf, 0, S390_SKEYS_BUFFER_SIZE); - eos = S390_SKEYS_SAVE_FLAG_ERROR; - } - } - - qemu_put_buffer(f, buf, read_count); - cur_gfn += read_count; - pages_left -= read_count; - } - - g_free(buf); -end_stream: - qemu_put_be64(f, eos); + // dc->hotpluggable = false; + // set_bit(DEVICE_CATEGORY_MISC, dc->categories); } - -static int s390_storage_keys_load(QEMUFile *f, void *opaque, int version_id) -{ - S390SKeysState *ss = S390_SKEYS(opaque); - S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); - int ret = 0; - - while (!ret) { - ram_addr_t addr; - int flags; - - addr = qemu_get_be64(f); - flags = addr & ~TARGET_PAGE_MASK; - addr &= TARGET_PAGE_MASK; - - switch (flags) { - case S390_SKEYS_SAVE_FLAG_SKEYS: { - const uint64_t total_count = qemu_get_be64(f); - uint64_t handled_count = 0, cur_count; - uint64_t cur_gfn = addr / TARGET_PAGE_SIZE; - uint8_t *buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE); - - if (!buf) { - error_report("storage key load could not allocate memory"); - ret = -ENOMEM; - break; - } - - while (handled_count < total_count) { - cur_count = MIN(total_count - handled_count, - S390_SKEYS_BUFFER_SIZE); - qemu_get_buffer(f, buf, cur_count); - - ret = skeyclass->set_skeys(ss, cur_gfn, cur_count, buf); - if (ret < 0) { - error_report("S390_SET_KEYS error %d", ret); - break; - } - handled_count += cur_count; - cur_gfn += cur_count; - } - g_free(buf); - break; - } - case S390_SKEYS_SAVE_FLAG_ERROR: { - error_report("Storage key data is incomplete"); - ret = -EINVAL; - break; - } - case S390_SKEYS_SAVE_FLAG_EOS: - /* normal exit */ - return 0; - default: - error_report("Unexpected storage key flag data: %#x", flags); - ret = -EINVAL; - } - } - - return ret; -} - -static void s390_skeys_class_init(ObjectClass *oc, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - dc->hotpluggable = false; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); -} - -static const TypeInfo s390_skeys_info = { - .name = TYPE_S390_SKEYS, - .parent = TYPE_DEVICE, - .instance_init = NULL, - .instance_size = sizeof(S390SKeysState), - .class_init = s390_skeys_class_init, - .class_size = sizeof(S390SKeysClass), - .abstract = true, -}; - -static void qemu_s390_skeys_register_types(void) -{ - type_register_static(&s390_skeys_info); - type_register_static(&qemu_s390_skeys_info); -} - -type_init(qemu_s390_skeys_register_types) -#endif diff --git a/qemu/include/hw/s390x/storage-keys.h b/qemu/include/hw/s390x/storage-keys.h index 427b6774..811d616e 100644 --- a/qemu/include/hw/s390x/storage-keys.h +++ b/qemu/include/hw/s390x/storage-keys.h @@ -12,6 +12,8 @@ #ifndef S390_STORAGE_KEYS_H #define S390_STORAGE_KEYS_H +#include "uc_priv.h" + /* #define TYPE_S390_SKEYS "s390-skeys" #define S390_SKEYS(obj) \ @@ -23,7 +25,8 @@ */ typedef struct S390SKeysState { - CPUState parent_obj; + //CPUState parent_obj; + bool migration_enabled; // Unicorn: Dummy struct member } S390SKeysState; /* @@ -36,11 +39,11 @@ typedef struct S390SKeysState { #define S390_SKEYS_GET_CLASS(obj) \ OBJECT_GET_CLASS(S390SKeysClass, (obj), TYPE_S390_SKEYS) */ -#define S390_SKEYS_GET_CLASS(obj) (&((S390CPU *)obj)->skey) +#define S390_SKEYS_GET_CLASS(obj) (((QEMUS390SKeysState *)obj)->class) typedef struct S390SKeysClass { - CPUClass parent_class; + //CPUClass parent_class; int (*skeys_enabled)(S390SKeysState *ks); int (*get_skeys)(S390SKeysState *ks, uint64_t start_gfn, uint64_t count, uint8_t *keys); @@ -50,17 +53,21 @@ typedef struct S390SKeysClass { #define TYPE_KVM_S390_SKEYS "s390-skeys-kvm" #define TYPE_QEMU_S390_SKEYS "s390-skeys-qemu" +// #define QEMU_S390_SKEYS(obj) \ +// OBJECT_CHECK(QEMUS390SKeysState, (obj), TYPE_QEMU_S390_SKEYS) #define QEMU_S390_SKEYS(obj) \ - OBJECT_CHECK(QEMUS390SKeysState, (obj), TYPE_QEMU_S390_SKEYS) - + (QEMUS390SKeysState*)(obj) typedef struct QEMUS390SKeysState { S390SKeysState parent_obj; uint8_t *keydata; uint32_t key_count; + + // Unicorn + S390SKeysClass *class; } QEMUS390SKeysState; -void s390_skeys_init(void); +void s390_skeys_init(uc_engine *uc); -S390SKeysState *s390_get_skeys_device(void); +S390SKeysState *s390_get_skeys_device(uc_engine *uc); #endif /* S390_STORAGE_KEYS_H */ diff --git a/qemu/target/s390x/cpu.c b/qemu/target/s390x/cpu.c index 9573cb27..dd824580 100644 --- a/qemu/target/s390x/cpu.c +++ b/qemu/target/s390x/cpu.c @@ -137,6 +137,8 @@ static void s390_cpu_initfn(struct uc_struct *uc, CPUState *obj) // cpu->env.cpu_timer = // timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); + + cpu->env.uc = uc; } static unsigned s390_count_running_cpus(void) @@ -255,6 +257,13 @@ S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model) return NULL; } + if (uc->cpu_model == INT_MAX) { + uc->cpu_model = 36; // qemu-s390x-cpu + } else if (uc->cpu_model >= 38) { + free(cpu); + return NULL; + } + cs = (CPUState *)cpu; cc = (CPUClass *)&cpu->cc; cs->cc = cc; @@ -267,29 +276,18 @@ S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model) /* init CPUClass */ s390_cpu_class_init(uc, cc); + // init skeys + s390_skeys_init(uc); + + // init s390 models + s390_init_cpu_model(uc, uc->cpu_model); + /* init CPUState */ cpu_common_initfn(uc, cs); /* init CPU */ s390_cpu_initfn(uc, cs); - /* init specific CPU model */ -/* - for (i = 0; i < ARRAY_SIZE(cpu_models); i++) { - if (strcmp(cpu_model, cpu_models[i].name) == 0) { - cpu_models[i].initfn(cs); - - if (arm_cpus[i].class_init) { - arm_cpus[i].class_init(uc, cc, uc); - } - if (arm_cpus[i].initfn) { - arm_cpus[i].initfn(uc, cs); - } - break; - } - } -*/ - /* realize CPU */ s390_cpu_realizefn(uc, cs); diff --git a/qemu/target/s390x/cpu.h b/qemu/target/s390x/cpu.h index 2548d653..368b0ef7 100644 --- a/qemu/target/s390x/cpu.h +++ b/qemu/target/s390x/cpu.h @@ -166,6 +166,7 @@ struct S390CPU { // unicorn struct S390CPUClass cc; struct S390SKeysClass skey; + struct QEMUS390SKeysState ss; }; diff --git a/qemu/target/s390x/cpu_models.c b/qemu/target/s390x/cpu_models.c index a9bc83f7..37192fe0 100644 --- a/qemu/target/s390x/cpu_models.c +++ b/qemu/target/s390x/cpu_models.c @@ -409,7 +409,7 @@ static void s390_max_cpu_model_initfn(CPUState *obj) memcpy(cpu->model, max_model, sizeof(*cpu->model)); } -static void s390_cpu_model_finalize(CPUState *obj) +void s390_cpu_model_finalize(CPUState *obj) { S390CPU *cpu = S390_CPU(obj); @@ -424,7 +424,7 @@ static void s390_base_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, v /* all base models are migration safe */ xcc->cpu_def = (const S390CPUDef *) data; xcc->is_static = true; - //xcc->desc = xcc->cpu_def->desc; + // xcc->desc = xcc->cpu_def->desc; } static void s390_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) @@ -433,7 +433,8 @@ static void s390_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void * /* model that can change between QEMU versions */ xcc->cpu_def = (const S390CPUDef *) data; - //xcc->desc = xcc->cpu_def->desc; + // xcc->is_migration_safe = true; + // xcc->desc = xcc->cpu_def->desc; } static void s390_qemu_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, void *data) @@ -524,9 +525,8 @@ static void init_ignored_base_feat(void) } } -static void register_types(void) +void s390_init_cpu_model(uc_engine *uc, uc_cpu_s390x cpu_model) { -#if 0 static const S390FeatInit qemu_latest_init = { S390_FEAT_LIST_QEMU_LATEST }; int i; @@ -547,33 +547,16 @@ static void register_types(void) s390_set_qemu_cpu_model(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN, QEMU_MAX_CPU_EC_GA, qemu_latest_init); - for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) { - char *base_name = s390_base_cpu_type_name(s390_cpu_defs[i].name); - TypeInfo ti_base = { - .name = base_name, - .parent = TYPE_S390_CPU, - .instance_init = s390_cpu_model_initfn, - .instance_finalize = s390_cpu_model_finalize, - .class_init = s390_base_cpu_model_class_init, - .class_data = (void *) &s390_cpu_defs[i], - }; - char *name = s390_cpu_type_name(s390_cpu_defs[i].name); - TypeInfo ti = { - .name = name, - .parent = TYPE_S390_CPU, - .instance_init = s390_cpu_model_initfn, - .instance_finalize = s390_cpu_model_finalize, - .class_init = s390_cpu_model_class_init, - .class_data = (void *) &s390_cpu_defs[i], - }; - - type_register_static(&ti_base); - type_register_static(&ti); - g_free(base_name); - g_free(name); + if (cpu_model < ARRAY_SIZE(s390_cpu_defs)) { + s390_base_cpu_model_class_init(uc, uc->cpu->cc, (void *) &s390_cpu_defs[cpu_model]); + s390_cpu_model_class_init(uc, uc->cpu->cc, (void *) &s390_cpu_defs[cpu_model]); + s390_cpu_model_initfn(uc->cpu); + } else if (cpu_model == UC_CPU_S390X_MAX) { + s390_max_cpu_model_class_init(uc, uc->cpu->cc, NULL); + s390_max_cpu_model_initfn(uc->cpu); + } else if (cpu_model == UC_CPU_S390X_QEMU) { + s390_qemu_cpu_model_class_init(uc, uc->cpu->cc, NULL); + s390_qemu_cpu_model_initfn(uc->cpu); } - type_register_static(&qemu_s390_cpu_type_info); - type_register_static(&max_s390_cpu_type_info); -#endif } diff --git a/qemu/target/s390x/cpu_models.h b/qemu/target/s390x/cpu_models.h index 23e53796..be3655ab 100644 --- a/qemu/target/s390x/cpu_models.h +++ b/qemu/target/s390x/cpu_models.h @@ -16,6 +16,8 @@ #include "cpu_features.h" #include "gen-features.h" #include "hw/core/cpu.h" +#include "unicorn/s390x.h" +#include "uc_priv.h" /* static CPU definition */ struct S390CPUDef { @@ -106,4 +108,8 @@ static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model) S390CPUDef const *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga, S390FeatBitmap features); +void s390_init_cpu_model(uc_engine *uc, uc_cpu_s390x cpu_model); + +void s390_cpu_model_finalize(CPUState *obj); + #endif /* TARGET_S390X_CPU_MODELS_H */ diff --git a/qemu/target/s390x/mmu_helper.c b/qemu/target/s390x/mmu_helper.c index f9dc73e8..829dec55 100644 --- a/qemu/target/s390x/mmu_helper.c +++ b/qemu/target/s390x/mmu_helper.c @@ -282,7 +282,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, return 0; } -static void mmu_handle_skey(target_ulong addr, int rw, int *flags) +static void mmu_handle_skey(uc_engine *uc, target_ulong addr, int rw, int *flags) { static S390SKeysClass *skeyclass; static S390SKeysState *ss; @@ -296,8 +296,8 @@ static void mmu_handle_skey(target_ulong addr, int rw, int *flags) #endif if (unlikely(!ss)) { - // ss = s390_get_skeys_device(); - // skeyclass = S390_SKEYS_GET_CLASS(ss); + ss = s390_get_skeys_device(uc); + skeyclass = S390_SKEYS_GET_CLASS(ss); } /* @@ -437,7 +437,7 @@ nodat: /* Convert real address -> absolute address */ *raddr = mmu_real2abs(env, *raddr); - mmu_handle_skey(*raddr, rw, flags); + mmu_handle_skey(env->uc, *raddr, rw, flags); return 0; } @@ -549,6 +549,6 @@ int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, *addr = mmu_real2abs(env, raddr & TARGET_PAGE_MASK); - mmu_handle_skey(*addr, rw, flags); + mmu_handle_skey(env->uc, *addr, rw, flags); return 0; } diff --git a/qemu/target/s390x/unicorn.c b/qemu/target/s390x/unicorn.c index df9731ec..5f1ace7f 100644 --- a/qemu/target/s390x/unicorn.c +++ b/qemu/target/s390x/unicorn.c @@ -33,6 +33,13 @@ static void s390_release(void *ctx) g_free(fast->table); } #endif + TCGContext *tcg_ctx = (TCGContext *)ctx; + S390CPU *cpu = (S390CPU *)tcg_ctx->uc->cpu; + + release_common(ctx); + + s390_cpu_model_finalize(cpu); + // TODO: Anymore to free? } void s390_reg_reset(struct uc_struct *uc) From 034a1aa5f2f9968a5c3c90c25f6c1a52ee7ad79a Mon Sep 17 00:00:00 2001 From: mio Date: Mon, 27 Dec 2021 23:48:20 +0100 Subject: [PATCH 09/38] Make s390x stopping mechanism work --- qemu/s390x.h | 1 + qemu/target/s390x/excp_helper.c | 9 +++++++++ qemu/target/s390x/helper.h | 1 + qemu/target/s390x/translate.c | 9 +++++---- qemu/target/s390x/unicorn.c | 2 +- samples/sample_s390x.c | 3 ++- symbols.sh | 3 +++ 7 files changed, 22 insertions(+), 6 deletions(-) diff --git a/qemu/s390x.h b/qemu/s390x.h index b14ea4cf..4de8fd9d 100644 --- a/qemu/s390x.h +++ b/qemu/s390x.h @@ -1277,4 +1277,5 @@ #define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_s390x #define gen_helper_cpsr_read gen_helper_cpsr_read_s390x #define gen_helper_cpsr_write gen_helper_cpsr_write_s390x +#define helper_uc_s390x_exit helper_uc_s390x_exit_s390x #endif diff --git a/qemu/target/s390x/excp_helper.c b/qemu/target/s390x/excp_helper.c index 83433271..30c61359 100644 --- a/qemu/target/s390x/excp_helper.c +++ b/qemu/target/s390x/excp_helper.c @@ -598,3 +598,12 @@ void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); } + +void helper_uc_s390x_exit(CPUS390XState *env) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = EXCP_HLT; + cs->halted = 1; + cpu_loop_exit(cs); +} \ No newline at end of file diff --git a/qemu/target/s390x/helper.h b/qemu/target/s390x/helper.h index 2ab70698..abd8dd2a 100644 --- a/qemu/target/s390x/helper.h +++ b/qemu/target/s390x/helper.h @@ -1,5 +1,6 @@ DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) +DEF_HELPER_1(uc_s390x_exit, void, env) DEF_HELPER_2(exception, noreturn, env, i32) DEF_HELPER_2(data_exception, noreturn, env, i32) diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c index 79b83b0b..3d819f23 100644 --- a/qemu/target/s390x/translate.c +++ b/qemu/target/s390x/translate.c @@ -6652,6 +6652,9 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) /* Search for the insn in the table. */ insn = extract_insn(env, s); + /* Emit insn_start now that we know the ILEN. */ + tcg_gen_insn_start(tcg_ctx, s->base.pc_next, s->cc_op, s->ilen); + // Unicorn: trace this instruction on request if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->base.pc_next)) { gen_uc_tracecode(tcg_ctx, s->ilen, UC_HOOK_CODE_IDX, s->uc, s->base.pc_next); @@ -6659,9 +6662,6 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) check_exit_request(tcg_ctx); } - /* Emit insn_start now that we know the ILEN. */ - tcg_gen_insn_start(tcg_ctx, s->base.pc_next, s->cc_op, s->ilen); - /* Not found means unimplemented/illegal opcode. */ if (insn == NULL) { // qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", @@ -6867,7 +6867,8 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) switch (dc->base.is_jmp) { case DISAS_UNICORN_HALT: - gen_exception(tcg_ctx, EXCP_PGM); + tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, 0, 0); + gen_helper_uc_s390x_exit(tcg_ctx, tcg_ctx->cpu_env); break; case DISAS_GOTO_TB: case DISAS_NORETURN: diff --git a/qemu/target/s390x/unicorn.c b/qemu/target/s390x/unicorn.c index 5f1ace7f..9a718043 100644 --- a/qemu/target/s390x/unicorn.c +++ b/qemu/target/s390x/unicorn.c @@ -38,7 +38,7 @@ static void s390_release(void *ctx) release_common(ctx); - s390_cpu_model_finalize(cpu); + s390_cpu_model_finalize((CPUState *)cpu); // TODO: Anymore to free? } diff --git a/samples/sample_s390x.c b/samples/sample_s390x.c index da15965c..d8ac3ec5 100644 --- a/samples/sample_s390x.c +++ b/samples/sample_s390x.c @@ -65,7 +65,8 @@ static void test_s390x(void) // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(S390X_CODE) - 1, 0, 0); if (err) { - printf("Failed on uc_emu_start() with error returned: %u\n", err); + printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, + uc_strerror(err)); } // now print out some registers diff --git a/symbols.sh b/symbols.sh index 5ace5c89..32d615f1 100755 --- a/symbols.sh +++ b/symbols.sh @@ -6271,6 +6271,9 @@ ppc_irq_reset \ ppc64_SYMBOLS=${ppc_SYMBOLS} +s390x_SYMBOLS="helper_uc_s390x_exit \ +" + ARCHS="x86_64 arm armeb aarch64 aarch64eb riscv32 riscv64 mips mipsel mips64 mips64el sparc sparc64 m68k ppc ppc64 s390x" for arch in $ARCHS; do From 3e674718b4d813219a1645ecf0969cd3289ba14b Mon Sep 17 00:00:00 2001 From: mio Date: Mon, 27 Dec 2021 23:51:05 +0100 Subject: [PATCH 10/38] Update TODO --- TODO-s390 | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/TODO-s390 b/TODO-s390 index b5f45677..28726409 100644 --- a/TODO-s390 +++ b/TODO-s390 @@ -1,13 +1,9 @@ current status: -- only build s390x arch (see CMakeLists.txt) -- sample_s390x crash, due to qemu/target/s390x/cpu.c :: cpu_s390_init() still has bugs +- probably memory leak +- more registers Todo: -- fix qemu/target/s390x/cpu.c, so sample_s390x works -- enable building all arch to fix conflicts - support more registers in qemu/target/s390x/unicorn.c -- storage-keys needed? - find & fix potential memory leaking with valgrind -- sync with "dev" branch of github unicorn From a1e6d64118728f8351de4728426e7c3226c83e72 Mon Sep 17 00:00:00 2001 From: mio Date: Mon, 27 Dec 2021 23:55:24 +0100 Subject: [PATCH 11/38] Update TODO --- TODO-s390 | 1 + 1 file changed, 1 insertion(+) diff --git a/TODO-s390 b/TODO-s390 index 28726409..75470b0e 100644 --- a/TODO-s390 +++ b/TODO-s390 @@ -5,5 +5,6 @@ current status: Todo: +- win32(mingw32) build - support more registers in qemu/target/s390x/unicorn.c - find & fix potential memory leaking with valgrind From 849325b9c61654bf57b372fc2ec3481d9339fc78 Mon Sep 17 00:00:00 2001 From: mio Date: Mon, 27 Dec 2021 23:59:53 +0100 Subject: [PATCH 12/38] Add unit test for s390x --- tests/unit/test_s390x.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_s390x.c b/tests/unit/test_s390x.c index f699ff9f..354656d6 100644 --- a/tests/unit/test_s390x.c +++ b/tests/unit/test_s390x.c @@ -3,4 +3,32 @@ const uint64_t code_start = 0x1000; const uint64_t code_len = 0x4000; -TEST_LIST = {{NULL, NULL}}; +static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, + const char *code, uint64_t size) +{ + OK(uc_open(arch, mode, uc)); + OK(uc_mem_map(*uc, code_start, code_len, UC_PROT_ALL)); + OK(uc_mem_write(*uc, code_start, code, size)); +} + +static void test_s390x_lr() +{ + char code[] = "\x18\x23"; // lr %r2, %r3 + uint64_t r_r2, r_r3 = 0x114514; + uc_engine *uc; + + uc_common_setup(&uc, UC_ARCH_S390X, UC_MODE_BIG_ENDIAN, code, + sizeof(code) - 1); + + OK(uc_reg_write(uc, UC_S390X_REG_R3, &r_r3)); + + OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); + + OK(uc_reg_read(uc, UC_S390X_REG_R2, &r_r2)); + + TEST_CHECK(r_r2 == 0x114514); + + OK(uc_close(uc)); +} + +TEST_LIST = {{"test_s390x_lr", test_s390x_lr}}; From 8fc836c5fa82499aeb63dd4498fd6b1250d4e081 Mon Sep 17 00:00:00 2001 From: mio Date: Wed, 29 Dec 2021 23:10:21 +0100 Subject: [PATCH 13/38] Fix tests list not marked with NULL --- tests/unit/test_s390x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_s390x.c b/tests/unit/test_s390x.c index 354656d6..98d8c3ed 100644 --- a/tests/unit/test_s390x.c +++ b/tests/unit/test_s390x.c @@ -31,4 +31,4 @@ static void test_s390x_lr() OK(uc_close(uc)); } -TEST_LIST = {{"test_s390x_lr", test_s390x_lr}}; +TEST_LIST = {{"test_s390x_lr", test_s390x_lr}, {NULL, NULL}}; From 298795a9f8558cd7ddf169d4182b26c3d3027981 Mon Sep 17 00:00:00 2001 From: mio Date: Wed, 29 Dec 2021 23:18:49 +0100 Subject: [PATCH 14/38] Fix build on MSVC --- qemu/include/qemu-common.h | 1 + 1 file changed, 1 insertion(+) diff --git a/qemu/include/qemu-common.h b/qemu/include/qemu-common.h index 1cda0555..30d897fd 100644 --- a/qemu/include/qemu-common.h +++ b/qemu/include/qemu-common.h @@ -11,6 +11,7 @@ #define QEMU_COMMON_H #include +#include #define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) From ab4ef2e1deb2d023aeb92d9e01cc039f62dc9fee Mon Sep 17 00:00:00 2001 From: mio Date: Thu, 30 Dec 2021 00:26:25 +0100 Subject: [PATCH 15/38] Fix MSVC build and remove warning about unused functions --- qemu/include/hw/s390x/ioinst.h | 8 ++++---- qemu/include/hw/s390x/sclp.h | 36 +++++++++++++++++----------------- qemu/target/s390x/cc_helper.c | 4 ++-- qemu/target/s390x/internal.h | 4 ++-- qemu/target/s390x/ioinst.c | 14 +++++++++---- 5 files changed, 36 insertions(+), 30 deletions(-) diff --git a/qemu/include/hw/s390x/ioinst.h b/qemu/include/hw/s390x/ioinst.h index c6737a30..389ed201 100644 --- a/qemu/include/hw/s390x/ioinst.h +++ b/qemu/include/hw/s390x/ioinst.h @@ -116,12 +116,12 @@ QEMU_BUILD_BUG_MSG(sizeof(PMCW) != 28, "size of PMCW is wrong"); #define PMCW_CHARS_MASK_INVALID 0xff1ffff8 /* subchannel information block */ -typedef struct SCHIB { +QEMU_PACK(typedef struct SCHIB { PMCW pmcw; SCSW scsw; uint64_t mba; uint8_t mda[4]; -} QEMU_PACKED SCHIB; +}) SCHIB; /* interruption response block */ typedef struct IRB { @@ -224,11 +224,11 @@ QEMU_BUILD_BUG_MSG(sizeof(CRW) != 4, "size of CRW is wrong"); #define CRW_RSC_CSS 0xb /* I/O interruption code */ -typedef struct IOIntCode { +QEMU_PACK(typedef struct IOIntCode { uint32_t subsys_id; uint32_t intparm; uint32_t interrupt_id; -} QEMU_PACKED IOIntCode; +}) IOIntCode; /* schid disintegration */ #define IOINST_SCHID_ONE(_schid) ((_schid & 0x00010000) >> 16) diff --git a/qemu/include/hw/s390x/sclp.h b/qemu/include/hw/s390x/sclp.h index 92084838..95b80233 100644 --- a/qemu/include/hw/s390x/sclp.h +++ b/qemu/include/hw/s390x/sclp.h @@ -90,27 +90,27 @@ * So we have to double check that all users of sclp data structures use the * right endianness wrappers. */ -typedef struct SCCBHeader { +QEMU_PACK(typedef struct SCCBHeader { uint16_t length; uint8_t function_code; uint8_t control_mask[3]; uint16_t response_code; -} QEMU_PACKED SCCBHeader; +}) SCCBHeader; #define SCCB_DATA_LEN (SCCB_SIZE - sizeof(SCCBHeader)) #define SCCB_CPU_FEATURE_LEN 6 /* CPU information */ -typedef struct CPUEntry { +QEMU_PACK(typedef struct CPUEntry { uint8_t address; uint8_t reserved0; uint8_t features[SCCB_CPU_FEATURE_LEN]; uint8_t reserved2[6]; uint8_t type; uint8_t reserved1; -} QEMU_PACKED CPUEntry; +}) CPUEntry; -typedef struct ReadInfo { +QEMU_PACK(typedef struct ReadInfo { SCCBHeader h; uint16_t rnmax; uint8_t rnsize; @@ -133,9 +133,9 @@ typedef struct ReadInfo { uint8_t _reserved5[124 - 122]; /* 122-123 */ uint32_t hmfai; struct CPUEntry entries[]; -} QEMU_PACKED ReadInfo; +}) ReadInfo; -typedef struct ReadCpuInfo { +QEMU_PACK(typedef struct ReadCpuInfo { SCCBHeader h; uint16_t nr_configured; /* 8-9 */ uint16_t offset_configured; /* 10-11 */ @@ -143,42 +143,42 @@ typedef struct ReadCpuInfo { uint16_t offset_standby; /* 14-15 */ uint8_t reserved0[24-16]; /* 16-23 */ struct CPUEntry entries[]; -} QEMU_PACKED ReadCpuInfo; +}) ReadCpuInfo; -typedef struct ReadStorageElementInfo { +QEMU_PACK(typedef struct ReadStorageElementInfo { SCCBHeader h; uint16_t max_id; uint16_t assigned; uint16_t standby; uint8_t _reserved0[16 - 14]; /* 14-15 */ uint32_t entries[]; -} QEMU_PACKED ReadStorageElementInfo; +}) ReadStorageElementInfo; -typedef struct AttachStorageElement { +QEMU_PACK(typedef struct AttachStorageElement { SCCBHeader h; uint8_t _reserved0[10 - 8]; /* 8-9 */ uint16_t assigned; uint8_t _reserved1[16 - 12]; /* 12-15 */ uint32_t entries[]; -} QEMU_PACKED AttachStorageElement; +}) AttachStorageElement; -typedef struct AssignStorage { +QEMU_PACK(typedef struct AssignStorage { SCCBHeader h; uint16_t rn; -} QEMU_PACKED AssignStorage; +}) AssignStorage; -typedef struct IoaCfgSccb { +QEMU_PACK(typedef struct IoaCfgSccb { SCCBHeader header; uint8_t atype; uint8_t reserved1; uint16_t reserved2; uint32_t aid; -} QEMU_PACKED IoaCfgSccb; +}) IoaCfgSccb; -typedef struct SCCB { +QEMU_PACK(typedef struct SCCB { SCCBHeader h; char data[SCCB_DATA_LEN]; - } QEMU_PACKED SCCB; +}) SCCB; #define TYPE_SCLP "sclp" #define SCLP(obj) OBJECT_CHECK(SCLPDevice, (obj), TYPE_SCLP) diff --git a/qemu/target/s390x/cc_helper.c b/qemu/target/s390x/cc_helper.c index 44731e4a..fe20830e 100644 --- a/qemu/target/s390x/cc_helper.c +++ b/qemu/target/s390x/cc_helper.c @@ -28,9 +28,9 @@ /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER -#define HELPER_LOG(x...) qemu_log(x) +#define HELPER_LOG(x, ...) qemu_log(x) #else -#define HELPER_LOG(x...) +#define HELPER_LOG(x, ...) #endif static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst) diff --git a/qemu/target/s390x/internal.h b/qemu/target/s390x/internal.h index fc497589..6bfd72e5 100644 --- a/qemu/target/s390x/internal.h +++ b/qemu/target/s390x/internal.h @@ -13,7 +13,7 @@ #include "cpu.h" #ifndef CONFIG_USER_ONLY -typedef struct LowCore { +QEMU_PACK(typedef struct LowCore { /* prefix area: defined by architecture */ uint32_t ccw1[2]; /* 0x000 */ uint32_t ccw2[4]; /* 0x008 */ @@ -91,7 +91,7 @@ typedef struct LowCore { /* align to the top of the prefix area */ uint8_t pad18[0x2000 - 0x1400]; /* 0x1400 */ -} QEMU_PACKED LowCore; +}) LowCore; QEMU_BUILD_BUG_ON(sizeof(LowCore) != 8192); #endif /* CONFIG_USER_ONLY */ diff --git a/qemu/target/s390x/ioinst.c b/qemu/target/s390x/ioinst.c index 9466411a..765c18b7 100644 --- a/qemu/target/s390x/ioinst.c +++ b/qemu/target/s390x/ioinst.c @@ -97,6 +97,7 @@ void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra) #endif } +#if 0 static int ioinst_schib_valid(SCHIB *schib) { if ((be16_to_cpu(schib->pmcw.flags) & PMCW_FLAGS_MASK_INVALID) || @@ -109,6 +110,7 @@ static int ioinst_schib_valid(SCHIB *schib) } return 1; } +#endif void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) { @@ -144,6 +146,7 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) #endif } +#if 0 static void copy_orb_from_guest(ORB *dest, const ORB *src) { dest->intparm = be32_to_cpu(src->intparm); @@ -168,6 +171,7 @@ static int ioinst_orb_valid(ORB *orb) } return 1; } +#endif void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) { @@ -350,20 +354,20 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra) return 0; } -typedef struct ChscReq { +QEMU_PACK(typedef struct ChscReq { uint16_t len; uint16_t command; uint32_t param0; uint32_t param1; uint32_t param2; -} QEMU_PACKED ChscReq; +}) ChscReq; -typedef struct ChscResp { +QEMU_PACK(typedef struct ChscResp { uint16_t len; uint16_t code; uint32_t param; char data[]; -} QEMU_PACKED ChscResp; +}) ChscResp; #define CHSC_MIN_RESP_LEN 0x0008 @@ -546,6 +550,7 @@ out: #endif } +#if 0 static int chsc_sei_nt0_get_event(void *res) { /* no events yet */ @@ -557,6 +562,7 @@ static int chsc_sei_nt0_have_event(void) /* no events yet */ return 0; } +#endif #if 0 static int chsc_sei_nt2_get_event(void *res) From dc402d78ecc8f203a99a25627ac4046b23feef45 Mon Sep 17 00:00:00 2001 From: mio Date: Thu, 30 Dec 2021 00:28:24 +0100 Subject: [PATCH 16/38] Ignore QEMU_BUILD_BUG_MSG on MSVC --- qemu/include/qemu/compiler.h | 1 + 1 file changed, 1 insertion(+) diff --git a/qemu/include/qemu/compiler.h b/qemu/include/qemu/compiler.h index 66341749..971aa127 100644 --- a/qemu/include/qemu/compiler.h +++ b/qemu/include/qemu/compiler.h @@ -77,6 +77,7 @@ static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}}; #define cat2(x,y) cat(x,y) #define QEMU_BUILD_BUG_ON(x) #define QEMU_BUILD_BUG_ON_ZERO(x) +#define QEMU_BUILD_BUG_MSG(x, msg) #define GCC_FMT_ATTR(n, m) From 03f9dd8b61e84a303252b7025709406eaddcac9c Mon Sep 17 00:00:00 2001 From: mio Date: Thu, 30 Dec 2021 00:42:13 +0100 Subject: [PATCH 17/38] Expand case ranges to build on MSVC --- qemu/target/s390x/fpu_helper.c | 4 ++-- qemu/target/s390x/int_helper.c | 4 ++-- qemu/target/s390x/mem_helper.c | 20 +++++++++++++++---- qemu/target/s390x/misc_helper.c | 4 ++-- qemu/target/s390x/mmu_helper.c | 2 +- qemu/target/s390x/translate.c | 34 ++++++++++++++++++++++++++++++--- 6 files changed, 54 insertions(+), 14 deletions(-) diff --git a/qemu/target/s390x/fpu_helper.c b/qemu/target/s390x/fpu_helper.c index 1325bc57..0fc39d71 100644 --- a/qemu/target/s390x/fpu_helper.c +++ b/qemu/target/s390x/fpu_helper.c @@ -29,9 +29,9 @@ /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER -#define HELPER_LOG(x...) qemu_log(x) +#define HELPER_LOG(x, ...) qemu_log(x) #else -#define HELPER_LOG(x...) +#define HELPER_LOG(x, ...) #endif #define RET128(F) (env->retxl = F.low, F.high) diff --git a/qemu/target/s390x/int_helper.c b/qemu/target/s390x/int_helper.c index 658507dd..7bd6ac95 100644 --- a/qemu/target/s390x/int_helper.c +++ b/qemu/target/s390x/int_helper.c @@ -28,9 +28,9 @@ /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER -#define HELPER_LOG(x...) qemu_log(x) +#define HELPER_LOG(x, ...) qemu_log(x) #else -#define HELPER_LOG(x...) +#define HELPER_LOG(x, ...) #endif /* 64/32 -> 32 signed division */ diff --git a/qemu/target/s390x/mem_helper.c b/qemu/target/s390x/mem_helper.c index 3a313d51..92277634 100644 --- a/qemu/target/s390x/mem_helper.c +++ b/qemu/target/s390x/mem_helper.c @@ -36,9 +36,9 @@ /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER -#define HELPER_LOG(x...) qemu_log(x) +#define HELPER_LOG(x, ...) qemu_log(x) #else -#define HELPER_LOG(x...) +#define HELPER_LOG(x, ...) #endif static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key) @@ -1425,7 +1425,9 @@ static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest, switch (b & 0xf) { case 0xa: case 0xc: - case 0xe ... 0xf: + // case 0xe ... 0xf: + case 0xe: + case 0xf: cc = 0; /* plus */ break; case 0xb: @@ -1433,7 +1435,17 @@ static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest, cc = 1; /* minus */ break; default: - case 0x0 ... 0x9: + // case 0x0 ... 0x9: + case 0x0: + case 0x1: + case 0x2: + case 0x3: + case 0x4: + case 0x5: + case 0x6: + case 0x7: + case 0x8: + case 0x9: cc = 3; /* invalid */ break; } diff --git a/qemu/target/s390x/misc_helper.c b/qemu/target/s390x/misc_helper.c index 53a34cc6..3f84403f 100644 --- a/qemu/target/s390x/misc_helper.c +++ b/qemu/target/s390x/misc_helper.c @@ -41,9 +41,9 @@ /* #define DEBUG_HELPER */ #ifdef DEBUG_HELPER -#define HELPER_LOG(x...) qemu_log(x) +#define HELPER_LOG(x, ...) qemu_log(x) #else -#define HELPER_LOG(x...) +#define HELPER_LOG(x, ...) #endif /* Raise an exception statically from a TB. */ diff --git a/qemu/target/s390x/mmu_helper.c b/qemu/target/s390x/mmu_helper.c index 829dec55..4a6bef89 100644 --- a/qemu/target/s390x/mmu_helper.c +++ b/qemu/target/s390x/mmu_helper.c @@ -508,7 +508,7 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, cpu_physical_memory_rw(env_cpu(env)->as, pages[i] | (laddr & ~TARGET_PAGE_MASK), hostbuf, currlen, is_write); laddr += currlen; - hostbuf += currlen; + hostbuf = (void *)(((char *)hostbuf) + currlen); len -= currlen; } } diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c index 3d819f23..9903b0ca 100644 --- a/qemu/target/s390x/translate.c +++ b/qemu/target/s390x/translate.c @@ -6595,14 +6595,42 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) break; case 0xc5: /* MII */ case 0xc7: /* SMI */ - case 0xd0 ... 0xdf: /* SS */ + case 0xd0: + case 0xd1: + case 0xd2: + case 0xd3: + case 0xd4: + case 0xd5: + case 0xd6: + case 0xd7: + case 0xd8: + case 0xd9: + case 0xda: + case 0xdb: + case 0xdc: + case 0xdd: + case 0xde: + case 0xdf: + // case 0xd0 ... 0xdf: /* SS */ case 0xe1: /* SS */ case 0xe2: /* SS */ case 0xe8: /* SS */ case 0xe9: /* SS */ case 0xea: /* SS */ - case 0xee ... 0xf3: /* SS */ - case 0xf8 ... 0xfd: /* SS */ + case 0xee: + case 0xef: + case 0xf0: + case 0xf1: + case 0xf2: + case 0xf3: + // case 0xee ... 0xf3: /* SS */ + case 0xf8: + case 0xf9: + case 0xfa: + case 0xfb: + case 0xfc: + case 0xfd: + // case 0xf8 ... 0xfd: /* SS */ op2 = 0; break; default: From a72cbda6de5052e539feedd77972a42fce9e514a Mon Sep 17 00:00:00 2001 From: mio Date: Thu, 30 Dec 2021 00:51:07 +0100 Subject: [PATCH 18/38] Initialize empty structs explictly to build on MSVC --- qemu/target/s390x/translate.c | 4 ++-- qemu/target/s390x/vec_fpu_helper.c | 12 ++++++------ qemu/target/s390x/vec_helper.c | 4 ++-- qemu/target/s390x/vec_int_helper.c | 2 +- qemu/target/s390x/vec_string_helper.c | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c index 9903b0ca..76c031c1 100644 --- a/qemu/target/s390x/translate.c +++ b/qemu/target/s390x/translate.c @@ -1132,7 +1132,7 @@ typedef struct DisasFormatInfo { #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } -#define F0(N) { { } }, +#define F0(N) { { 0 } }, #define F1(N, X1) { { X1 } }, #define F2(N, X1, X2) { { X1, X2 } }, #define F3(N, X1, X2, X3) { { X1, X2, X3 } }, @@ -6675,7 +6675,7 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) TCGContext *tcg_ctx = s->uc->tcg_ctx; const DisasInsn *insn; DisasJumpType ret = DISAS_NEXT; - DisasOps o = {}; + DisasOps o = { 0 }; /* Search for the insn in the table. */ insn = extract_insn(env, s); diff --git a/qemu/target/s390x/vec_fpu_helper.c b/qemu/target/s390x/vec_fpu_helper.c index a48bd704..e87ef56f 100644 --- a/qemu/target/s390x/vec_fpu_helper.c +++ b/qemu/target/s390x/vec_fpu_helper.c @@ -84,7 +84,7 @@ static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; - S390Vector tmp = {}; + S390Vector tmp = { 0 }; int i, old_mode; old_mode = s390_swap_bfp_rounding_mode(env, erm); @@ -108,7 +108,7 @@ static void vop64_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; - S390Vector tmp = {}; + S390Vector tmp = { 0 }; int i; for (i = 0; i < 2; i++) { @@ -179,7 +179,7 @@ static int vfc64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, CPUS390XState *env, bool s, vfc64_fn fn, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; - S390Vector tmp = {}; + S390Vector tmp = { 0 }; int match = 0; int i; @@ -414,7 +414,7 @@ static void vfll32(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, bool s, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; - S390Vector tmp = {}; + S390Vector tmp = { 0 }; int i; for (i = 0; i < 2; i++) { @@ -449,7 +449,7 @@ static void vflr64(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, bool s, bool XxC, uint8_t erm, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; - S390Vector tmp = {}; + S390Vector tmp = { 0 }; int i, old_mode; old_mode = s390_swap_bfp_rounding_mode(env, erm); @@ -510,7 +510,7 @@ static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, uintptr_t retaddr) { uint8_t vxc, vec_exc = 0; - S390Vector tmp = {}; + S390Vector tmp = { 0 }; int i; for (i = 0; i < 2; i++) { diff --git a/qemu/target/s390x/vec_helper.c b/qemu/target/s390x/vec_helper.c index 986e7cc8..fb774da3 100644 --- a/qemu/target/s390x/vec_helper.c +++ b/qemu/target/s390x/vec_helper.c @@ -30,7 +30,7 @@ void HELPER(vll)(CPUS390XState *env, void *v1, uint64_t addr, uint64_t bytes) s390_vec_write_element64(v1, 0, t0); s390_vec_write_element64(v1, 1, t1); } else { - S390Vector tmp = {}; + S390Vector tmp = { 0 }; int i; for (i = 0; i < bytes; i++) { @@ -178,7 +178,7 @@ void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr, addr = wrap_address(env, addr + 8); cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 1), GETPC()); } else { - S390Vector tmp = {}; + S390Vector tmp = { 0 }; int i; for (i = 0; i < bytes; i++) { diff --git a/qemu/target/s390x/vec_int_helper.c b/qemu/target/s390x/vec_int_helper.c index 0d6bc13d..b8144139 100644 --- a/qemu/target/s390x/vec_int_helper.c +++ b/qemu/target/s390x/vec_int_helper.c @@ -186,7 +186,7 @@ DEF_GALOIS_MULTIPLY(32, 64) static S390Vector galois_multiply64(uint64_t a, uint64_t b) { - S390Vector res = {}; + S390Vector res = { 0 }; S390Vector va = { .doubleword[1] = a, }; diff --git a/qemu/target/s390x/vec_string_helper.c b/qemu/target/s390x/vec_string_helper.c index c516c0ce..c75152fe 100644 --- a/qemu/target/s390x/vec_string_helper.c +++ b/qemu/target/s390x/vec_string_helper.c @@ -350,7 +350,7 @@ static int vstrc(void *v1, const void *v2, const void *v3, const void *v4, uint64_t a0 = s390_vec_read_element64(v2, 0); uint64_t a1 = s390_vec_read_element64(v2, 1); int first_zero = 16, first_match = 16; - S390Vector rt_result = {}; + S390Vector rt_result = { 0 }; uint64_t z0, z1; int i, j; From fdbd743c21539c4d1c4bf6716b12f7ac52fde0ad Mon Sep 17 00:00:00 2001 From: mio Date: Thu, 30 Dec 2021 00:54:55 +0100 Subject: [PATCH 19/38] Remove hard-coded cpu model --- qemu/target/s390x/cpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qemu/target/s390x/cpu.c b/qemu/target/s390x/cpu.c index dd824580..576f8fc9 100644 --- a/qemu/target/s390x/cpu.c +++ b/qemu/target/s390x/cpu.c @@ -258,8 +258,8 @@ S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model) } if (uc->cpu_model == INT_MAX) { - uc->cpu_model = 36; // qemu-s390x-cpu - } else if (uc->cpu_model >= 38) { + uc->cpu_model = UC_CPU_S390X_QEMU; // qemu-s390x-cpu + } else if (uc->cpu_model > UC_CPU_S390X_MAX) { free(cpu); return NULL; } From 4c312d90954a43492cf1676cb820c29e6366723f Mon Sep 17 00:00:00 2001 From: mio Date: Thu, 30 Dec 2021 01:12:38 +0100 Subject: [PATCH 20/38] Update TODO --- TODO-s390 | 1 - 1 file changed, 1 deletion(-) diff --git a/TODO-s390 b/TODO-s390 index 75470b0e..28726409 100644 --- a/TODO-s390 +++ b/TODO-s390 @@ -5,6 +5,5 @@ current status: Todo: -- win32(mingw32) build - support more registers in qemu/target/s390x/unicorn.c - find & fix potential memory leaking with valgrind From fa3fb82c9cbad0e2dc7c196c1ff3e1deb15b9717 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Thu, 30 Dec 2021 17:17:49 +0800 Subject: [PATCH 21/38] s390x: fix warning on commented code --- qemu/include/hw/s390x/storage-keys.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/qemu/include/hw/s390x/storage-keys.h b/qemu/include/hw/s390x/storage-keys.h index 811d616e..1b2e819c 100644 --- a/qemu/include/hw/s390x/storage-keys.h +++ b/qemu/include/hw/s390x/storage-keys.h @@ -53,8 +53,6 @@ typedef struct S390SKeysClass { #define TYPE_KVM_S390_SKEYS "s390-skeys-kvm" #define TYPE_QEMU_S390_SKEYS "s390-skeys-qemu" -// #define QEMU_S390_SKEYS(obj) \ -// OBJECT_CHECK(QEMUS390SKeysState, (obj), TYPE_QEMU_S390_SKEYS) #define QEMU_S390_SKEYS(obj) \ (QEMUS390SKeysState*)(obj) typedef struct QEMUS390SKeysState { From 3b667338cfd3d9b69e47c7052f69a79d7cf6ef9d Mon Sep 17 00:00:00 2001 From: lazymio Date: Fri, 31 Dec 2021 00:10:50 +0100 Subject: [PATCH 22/38] Fix s390x warnings --- qemu/target/s390x/translate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c index 76c031c1..993f2150 100644 --- a/qemu/target/s390x/translate.c +++ b/qemu/target/s390x/translate.c @@ -1132,7 +1132,7 @@ typedef struct DisasFormatInfo { #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } -#define F0(N) { { 0 } }, +#define F0(N) { { { 0 } } }, #define F1(N, X1) { { X1 } }, #define F2(N, X1, X2) { { X1, X2 } }, #define F3(N, X1, X2, X3) { { X1, X2, X3 } }, From a06563ecdd94978e3c0075206b88612c65ee23c1 Mon Sep 17 00:00:00 2001 From: lazymio Date: Fri, 31 Dec 2021 00:24:18 +0100 Subject: [PATCH 23/38] Fix memory leak --- qemu/target/s390x/cpu.c | 2 +- qemu/target/s390x/cpu_models.c | 1 + qemu/target/s390x/unicorn.c | 9 ++------- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/qemu/target/s390x/cpu.c b/qemu/target/s390x/cpu.c index 576f8fc9..95a45d87 100644 --- a/qemu/target/s390x/cpu.c +++ b/qemu/target/s390x/cpu.c @@ -294,7 +294,7 @@ S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model) // init addresss space cpu_address_space_init(cs, 0, cs->memory); - qemu_init_vcpu(cs); + //qemu_init_vcpu(cs); return cpu; } diff --git a/qemu/target/s390x/cpu_models.c b/qemu/target/s390x/cpu_models.c index 37192fe0..b64f48b9 100644 --- a/qemu/target/s390x/cpu_models.c +++ b/qemu/target/s390x/cpu_models.c @@ -414,6 +414,7 @@ void s390_cpu_model_finalize(CPUState *obj) S390CPU *cpu = S390_CPU(obj); g_free(cpu->model); + g_free(cpu->ss.keydata); cpu->model = NULL; } diff --git a/qemu/target/s390x/unicorn.c b/qemu/target/s390x/unicorn.c index 9a718043..b53a1e07 100644 --- a/qemu/target/s390x/unicorn.c +++ b/qemu/target/s390x/unicorn.c @@ -16,10 +16,10 @@ static void s390_set_pc(struct uc_struct *uc, uint64_t address) static void s390_release(void *ctx) { -#if 0 + int i; TCGContext *tcg_ctx = (TCGContext *)ctx; - S390XCPU *cpu = (S390XCPU *)tcg_ctx->uc->cpu; + S390CPU *cpu = (S390CPU *)tcg_ctx->uc->cpu; CPUTLBDesc *d = cpu->neg.tlb.d; CPUTLBDescFast *f = cpu->neg.tlb.f; CPUTLBDesc *desc; @@ -32,11 +32,6 @@ static void s390_release(void *ctx) g_free(desc->iotlb); g_free(fast->table); } -#endif - TCGContext *tcg_ctx = (TCGContext *)ctx; - S390CPU *cpu = (S390CPU *)tcg_ctx->uc->cpu; - - release_common(ctx); s390_cpu_model_finalize((CPUState *)cpu); // TODO: Anymore to free? From 1a0f0d07683d92e4b4937056f01264182c65ce73 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Fri, 31 Dec 2021 09:48:16 +0800 Subject: [PATCH 24/38] s390x: remove some unused fields in S390CPU --- qemu/target/s390x/cpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qemu/target/s390x/cpu.h b/qemu/target/s390x/cpu.h index 368b0ef7..426edb4d 100644 --- a/qemu/target/s390x/cpu.h +++ b/qemu/target/s390x/cpu.h @@ -160,8 +160,8 @@ struct S390CPU { CPUS390XState env; S390CPUModel *model; /* needed for live migration */ - void *irqstate; - uint32_t irqstate_saved_size; + // void *irqstate; + // uint32_t irqstate_saved_size; // unicorn struct S390CPUClass cc; From e55b76f057ef967933cc379f64eb5235a550d87d Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Fri, 31 Dec 2021 10:05:05 +0800 Subject: [PATCH 25/38] s390x: cleanup & re-enable some skey code --- qemu/target/s390x/cpu.c | 7 ------ qemu/target/s390x/cpu_models.c | 41 ---------------------------------- qemu/target/s390x/mem_helper.c | 15 +++---------- 3 files changed, 3 insertions(+), 60 deletions(-) diff --git a/qemu/target/s390x/cpu.c b/qemu/target/s390x/cpu.c index 95a45d87..f6ccf9d1 100644 --- a/qemu/target/s390x/cpu.c +++ b/qemu/target/s390x/cpu.c @@ -219,13 +219,6 @@ void s390_enable_css_support(S390CPU *cpu) { } -#if 0 -static void s390_cpu_reset_full(CPUState *dev) -{ - return s390_cpu_reset(dev, S390_CPU_RESET_CLEAR); -} -#endif - static void s390_cpu_class_init(struct uc_struct *uc, CPUClass *oc) { S390CPUClass *scc = S390_CPU_CLASS(oc); diff --git a/qemu/target/s390x/cpu_models.c b/qemu/target/s390x/cpu_models.c index b64f48b9..d56635cc 100644 --- a/qemu/target/s390x/cpu_models.c +++ b/qemu/target/s390x/cpu_models.c @@ -460,47 +460,6 @@ static void s390_max_cpu_model_class_init(struct uc_struct *uc, CPUClass *oc, vo // "Enables all features supported by the accelerator in the current host"; } -#if 0 - -/* Generate type name for a cpu model. Caller has to free the string. */ -static char *s390_cpu_type_name(const char *model_name) -{ - return g_strdup_printf(S390_CPU_TYPE_NAME("%s"), model_name); -} - -/* Generate type name for a base cpu model. Caller has to free the string. */ -static char *s390_base_cpu_type_name(const char *model_name) -{ - return g_strdup_printf(S390_CPU_TYPE_NAME("%s-base"), model_name); -} - -CPUClass *s390_cpu_class_by_name(const char *name) -{ - char *typename = s390_cpu_type_name(name); - CPUClass *oc; - - oc = object_class_by_name(typename); - g_free(typename); - return oc; -} - -static const TypeInfo qemu_s390_cpu_type_info = { - //.name = S390_CPU_TYPE_NAME("qemu"), - .parent = TYPE_S390_CPU, - .instance_init = s390_qemu_cpu_model_initfn, - .instance_finalize = s390_cpu_model_finalize, - .class_init = s390_qemu_cpu_model_class_init, -}; - -static const TypeInfo max_s390_cpu_type_info = { - //.name = S390_CPU_TYPE_NAME("max"), - .parent = TYPE_S390_CPU, - .instance_init = s390_max_cpu_model_initfn, - .instance_finalize = s390_cpu_model_finalize, - .class_init = s390_max_cpu_model_class_init, -}; -#endif - static void init_ignored_base_feat(void) { static const int feats[] = { diff --git a/qemu/target/s390x/mem_helper.c b/qemu/target/s390x/mem_helper.c index 92277634..206410ae 100644 --- a/qemu/target/s390x/mem_helper.c +++ b/qemu/target/s390x/mem_helper.c @@ -2061,9 +2061,6 @@ uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) /* insert storage key extended */ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) { - return 0; - -#if 0 static S390SKeysState *ss; static S390SKeysClass *skeyclass; uint64_t addr = wrap_address(env, r2); @@ -2076,7 +2073,7 @@ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) #endif if (unlikely(!ss)) { - ss = s390_get_skeys_device(); + ss = s390_get_skeys_device(env->uc); skeyclass = S390_SKEYS_GET_CLASS(ss); } @@ -2084,13 +2081,11 @@ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) return 0; } return key; -#endif } /* set storage key extended */ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) { -#if 0 static S390SKeysState *ss; static S390SKeysClass *skeyclass; uint64_t addr = wrap_address(env, r2); @@ -2103,7 +2098,7 @@ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) #endif if (unlikely(!ss)) { - ss = s390_get_skeys_device(); + ss = s390_get_skeys_device(env->uc); skeyclass = S390_SKEYS_GET_CLASS(ss); } @@ -2114,14 +2109,11 @@ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) * that point to a physical address we have to flush the whole TLB. */ tlb_flush_all_cpus_synced(env_cpu(env)); -#endif } /* reset reference bit extended */ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) { - return 0; -#if 0 static S390SKeysState *ss; static S390SKeysClass *skeyclass; uint8_t re, key; @@ -2133,7 +2125,7 @@ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) #endif if (unlikely(!ss)) { - ss = s390_get_skeys_device(); + ss = s390_get_skeys_device(env->uc); skeyclass = S390_SKEYS_GET_CLASS(ss); } @@ -2163,7 +2155,6 @@ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) */ return re >> 1; -#endif } uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) From d5e0bff7d58fcb8049444d2f80ef912dd846e429 Mon Sep 17 00:00:00 2001 From: mio Date: Sat, 1 Jan 2022 00:41:59 +0100 Subject: [PATCH 26/38] Fix rust build for s390x --- bindings/rust/build.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/bindings/rust/build.rs b/bindings/rust/build.rs index 34a066c0..617d41d2 100644 --- a/bindings/rust/build.rs +++ b/bindings/rust/build.rs @@ -174,6 +174,7 @@ fn main() { "m68k", "ppc", "ppc64", + "s390x" ] .iter() { From 8b0285e13521ad555df5640f90b7a1e225d58004 Mon Sep 17 00:00:00 2001 From: mio Date: Sat, 1 Jan 2022 00:58:10 +0100 Subject: [PATCH 27/38] Add python support for s390x --- bindings/const_generator.py | 3 +- bindings/python/sample_s390x.py | 62 ++++++++++++ bindings/python/unicorn/s390x_const.py | 120 +++++++++++++++++++++++ bindings/python/unicorn/unicorn_const.py | 3 +- 4 files changed, 186 insertions(+), 2 deletions(-) create mode 100644 bindings/python/sample_s390x.py create mode 100644 bindings/python/unicorn/s390x_const.py diff --git a/bindings/const_generator.py b/bindings/const_generator.py index 64c518a8..9c2a15be 100644 --- a/bindings/const_generator.py +++ b/bindings/const_generator.py @@ -6,7 +6,7 @@ import sys, re, os INCL_DIR = os.path.join('..', 'include', 'unicorn') -include = [ 'arm.h', 'arm64.h', 'mips.h', 'x86.h', 'sparc.h', 'm68k.h', 'ppc.h', 'riscv.h', 'unicorn.h' ] +include = [ 'arm.h', 'arm64.h', 'mips.h', 'x86.h', 'sparc.h', 'm68k.h', 'ppc.h', 'riscv.h', 's390x.h', 'unicorn.h' ] template = { 'python': { @@ -23,6 +23,7 @@ template = { 'm68k.h': 'm68k', 'ppc.h': 'ppc', 'riscv.h': 'riscv', + 's390x.h' : "s390x", 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', diff --git a/bindings/python/sample_s390x.py b/bindings/python/sample_s390x.py new file mode 100644 index 00000000..c2a3ec58 --- /dev/null +++ b/bindings/python/sample_s390x.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +from unicorn import * +from unicorn.s390x_const import * + +# lr %r2, %r3 +S390X_CODE = b"\x18\x23" + +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test RISCV +def test_s390x(): + print("Emulate S390X code") + try: + # Initialize emulator in big endian mode + mu = Uc(UC_ARCH_S390X, UC_MODE_BIG_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, S390X_CODE) + + # initialize machine registers + mu.reg_write(UC_S390X_REG_R3, 0x7890) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(S390X_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r2 = mu.reg_read(UC_S390X_REG_R2) + r3 = mu.reg_read(UC_S390X_REG_R3) + print(">>> R2 = 0x%x" % r2) + print(">>> R3 = 0x%x" % r3) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_s390x() + diff --git a/bindings/python/unicorn/s390x_const.py b/bindings/python/unicorn/s390x_const.py new file mode 100644 index 00000000..ec96ae66 --- /dev/null +++ b/bindings/python/unicorn/s390x_const.py @@ -0,0 +1,120 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [s390x_const.py] + +# S390X CPU + +UC_CPU_S390X_Z900 = 0 +UC_CPU_S390X_Z900_2 = 1 +UC_CPU_S390X_Z900_3 = 2 +UC_CPU_S390X_Z800 = 3 +UC_CPU_S390X_Z990 = 4 +UC_CPU_S390X_Z990_2 = 5 +UC_CPU_S390X_Z990_3 = 6 +UC_CPU_S390X_Z890 = 7 +UC_CPU_S390X_Z990_4 = 8 +UC_CPU_S390X_Z890_2 = 9 +UC_CPU_S390X_Z990_5 = 10 +UC_CPU_S390X_Z890_3 = 11 +UC_CPU_S390X_Z9EC = 12 +UC_CPU_S390X_Z9EC_2 = 13 +UC_CPU_S390X_Z9BC = 14 +UC_CPU_S390X_Z9EC_3 = 15 +UC_CPU_S390X_Z9BC_2 = 16 +UC_CPU_S390X_Z10EC = 17 +UC_CPU_S390X_Z10EC_2 = 18 +UC_CPU_S390X_Z10BC = 19 +UC_CPU_S390X_Z10EC_3 = 20 +UC_CPU_S390X_Z10BC_2 = 21 +UC_CPU_S390X_Z196 = 22 +UC_CPU_S390X_Z196_2 = 23 +UC_CPU_S390X_Z114 = 24 +UC_CPU_S390X_ZEC12 = 25 +UC_CPU_S390X_ZEC12_2 = 26 +UC_CPU_S390X_ZBC12 = 27 +UC_CPU_S390X_Z13 = 28 +UC_CPU_S390X_Z13_2 = 29 +UC_CPU_S390X_Z13S = 30 +UC_CPU_S390X_Z14 = 31 +UC_CPU_S390X_Z14_2 = 32 +UC_CPU_S390X_Z14ZR1 = 33 +UC_CPU_S390X_GEN15A = 34 +UC_CPU_S390X_GEN15B = 35 +UC_CPU_S390X_QEMU = 36 +UC_CPU_S390X_MAX = 37 + +# S390X registers + +UC_S390X_REG_INVALID = 0 + +# General purpose registers +UC_S390X_REG_R0 = 1 +UC_S390X_REG_R1 = 2 +UC_S390X_REG_R2 = 3 +UC_S390X_REG_R3 = 4 +UC_S390X_REG_R4 = 5 +UC_S390X_REG_R5 = 6 +UC_S390X_REG_R6 = 7 +UC_S390X_REG_R7 = 8 +UC_S390X_REG_R8 = 9 +UC_S390X_REG_R9 = 10 +UC_S390X_REG_R10 = 11 +UC_S390X_REG_R11 = 12 +UC_S390X_REG_R12 = 13 +UC_S390X_REG_R13 = 14 +UC_S390X_REG_R14 = 15 +UC_S390X_REG_R15 = 16 + +# Floating point registers +UC_S390X_REG_F0 = 17 +UC_S390X_REG_F1 = 18 +UC_S390X_REG_F2 = 19 +UC_S390X_REG_F3 = 20 +UC_S390X_REG_F4 = 21 +UC_S390X_REG_F5 = 22 +UC_S390X_REG_F6 = 23 +UC_S390X_REG_F7 = 24 +UC_S390X_REG_F8 = 25 +UC_S390X_REG_F9 = 26 +UC_S390X_REG_F10 = 27 +UC_S390X_REG_F11 = 28 +UC_S390X_REG_F12 = 29 +UC_S390X_REG_F13 = 30 +UC_S390X_REG_F14 = 31 +UC_S390X_REG_F15 = 32 +UC_S390X_REG_F16 = 33 +UC_S390X_REG_F17 = 34 +UC_S390X_REG_F18 = 35 +UC_S390X_REG_F19 = 36 +UC_S390X_REG_F20 = 37 +UC_S390X_REG_F21 = 38 +UC_S390X_REG_F22 = 39 +UC_S390X_REG_F23 = 40 +UC_S390X_REG_F24 = 41 +UC_S390X_REG_F25 = 42 +UC_S390X_REG_F26 = 43 +UC_S390X_REG_F27 = 44 +UC_S390X_REG_F28 = 45 +UC_S390X_REG_F29 = 46 +UC_S390X_REG_F30 = 47 +UC_S390X_REG_F31 = 48 + +# Access registers +UC_S390X_REG_A0 = 49 +UC_S390X_REG_A1 = 50 +UC_S390X_REG_A2 = 51 +UC_S390X_REG_A3 = 52 +UC_S390X_REG_A4 = 53 +UC_S390X_REG_A5 = 54 +UC_S390X_REG_A6 = 55 +UC_S390X_REG_A7 = 56 +UC_S390X_REG_A8 = 57 +UC_S390X_REG_A9 = 58 +UC_S390X_REG_A10 = 59 +UC_S390X_REG_A11 = 60 +UC_S390X_REG_A12 = 61 +UC_S390X_REG_A13 = 62 +UC_S390X_REG_A14 = 63 +UC_S390X_REG_A15 = 64 +UC_S390X_REG_PC = 65 +UC_S390X_REG_ENDING = 66 + +# Alias registers diff --git a/bindings/python/unicorn/unicorn_const.py b/bindings/python/unicorn/unicorn_const.py index 9baf4f68..1737a2c8 100644 --- a/bindings/python/unicorn/unicorn_const.py +++ b/bindings/python/unicorn/unicorn_const.py @@ -21,7 +21,8 @@ UC_ARCH_PPC = 5 UC_ARCH_SPARC = 6 UC_ARCH_M68K = 7 UC_ARCH_RISCV = 8 -UC_ARCH_MAX = 9 +UC_ARCH_S390X = 9 +UC_ARCH_MAX = 10 UC_MODE_LITTLE_ENDIAN = 0 UC_MODE_BIG_ENDIAN = 1073741824 From 57699b69bbbad92b07b1f876f666a13525287aa4 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Sat, 1 Jan 2022 09:14:49 +0800 Subject: [PATCH 28/38] Delete TODO-s390 --- TODO-s390 | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 TODO-s390 diff --git a/TODO-s390 b/TODO-s390 deleted file mode 100644 index 28726409..00000000 --- a/TODO-s390 +++ /dev/null @@ -1,9 +0,0 @@ -current status: - -- probably memory leak -- more registers - -Todo: - -- support more registers in qemu/target/s390x/unicorn.c -- find & fix potential memory leaking with valgrind From 6813e4a0429f8cf5864777884c7fa19885e9ac16 Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Sat, 1 Jan 2022 09:24:28 +0800 Subject: [PATCH 29/38] bindings: update const_generator.py, and update all binding constants --- bindings/const_generator.py | 7 +- .../dotnet/UnicornManaged/Const/Common.fs | 9 +- bindings/dotnet/UnicornManaged/Const/M68k.fs | 18 +- bindings/dotnet/UnicornManaged/Const/Riscv.fs | 259 +++++++++++++----- bindings/dotnet/UnicornManaged/Const/S390x.fs | 128 +++++++++ bindings/go/unicorn/m68k_const.go | 18 +- bindings/go/unicorn/riscv_const.go | 259 +++++++++++++----- bindings/go/unicorn/s390x_const.go | 123 +++++++++ bindings/go/unicorn/unicorn_const.go | 9 +- bindings/java/unicorn/M68kConst.java | 18 +- bindings/java/unicorn/RiscvConst.java | 259 +++++++++++++----- bindings/java/unicorn/S390xConst.java | 126 +++++++++ bindings/java/unicorn/UnicornConst.java | 9 +- bindings/pascal/unicorn/M68kConst.pas | 18 +- bindings/pascal/unicorn/RiscvConst.pas | 259 +++++++++++++----- bindings/pascal/unicorn/S390xConst.pas | 128 +++++++++ bindings/pascal/unicorn/UnicornConst.pas | 9 +- .../lib/unicorn_engine/m68k_const.rb | 18 +- .../lib/unicorn_engine/riscv_const.rb | 259 +++++++++++++----- .../lib/unicorn_engine/s390x_const.rb | 123 +++++++++ .../lib/unicorn_engine/unicorn_const.rb | 9 +- 21 files changed, 1679 insertions(+), 386 deletions(-) create mode 100644 bindings/dotnet/UnicornManaged/Const/S390x.fs create mode 100644 bindings/go/unicorn/s390x_const.go create mode 100644 bindings/java/unicorn/S390xConst.java create mode 100644 bindings/pascal/unicorn/S390xConst.pas create mode 100644 bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb diff --git a/bindings/const_generator.py b/bindings/const_generator.py index 9c2a15be..7052b2bf 100644 --- a/bindings/const_generator.py +++ b/bindings/const_generator.py @@ -23,7 +23,7 @@ template = { 'm68k.h': 'm68k', 'ppc.h': 'ppc', 'riscv.h': 'riscv', - 's390x.h' : "s390x", + 's390x.h' : 's390x', 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', @@ -42,6 +42,7 @@ template = { 'm68k.h': 'm68k', 'ppc.h': 'ppc', 'riscv.h': 'riscv', + 's390x.h' : 's390x', 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', @@ -60,6 +61,7 @@ template = { 'm68k.h': 'm68k', 'ppc.h': 'ppc', 'riscv.h': 'riscv', + 's390x.h' : 's390x', 'unicorn.h': 'unicorn', 'comment_open': '//', 'comment_close': '', @@ -78,6 +80,7 @@ template = { 'm68k.h': 'M68k', 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', + 's390x.h' : 'S390x', 'unicorn.h': 'Unicorn', 'comment_open': '//', 'comment_close': '', @@ -96,6 +99,7 @@ template = { 'm68k.h': 'M68k', 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', + 's390x.h' : 'S390x', 'unicorn.h': 'Common', 'comment_open': ' //', 'comment_close': '', @@ -114,6 +118,7 @@ template = { 'm68k.h': 'M68k', 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', + 's390x.h' : 'S390x', 'unicorn.h': 'Unicorn', 'comment_open': '//', 'comment_close': '', diff --git a/bindings/dotnet/UnicornManaged/Const/Common.fs b/bindings/dotnet/UnicornManaged/Const/Common.fs index 0eebf1ec..a8b85414 100644 --- a/bindings/dotnet/UnicornManaged/Const/Common.fs +++ b/bindings/dotnet/UnicornManaged/Const/Common.fs @@ -9,11 +9,15 @@ module Common = let UC_API_MAJOR = 2 let UC_API_MINOR = 0 + + let UC_API_PATCH = 0 + let UC_API_EXTRA = 5 let UC_VERSION_MAJOR = 2 let UC_VERSION_MINOR = 0 - let UC_VERSION_EXTRA = 0 + let UC_VERSION_PATCH = 0 + let UC_VERSION_EXTRA = 5 let UC_SECOND_SCALE = 1000000 let UC_MILISECOND_SCALE = 1000 let UC_ARCH_ARM = 1 @@ -24,7 +28,8 @@ module Common = let UC_ARCH_SPARC = 6 let UC_ARCH_M68K = 7 let UC_ARCH_RISCV = 8 - let UC_ARCH_MAX = 9 + let UC_ARCH_S390X = 9 + let UC_ARCH_MAX = 10 let UC_MODE_LITTLE_ENDIAN = 0 let UC_MODE_BIG_ENDIAN = 1073741824 diff --git a/bindings/dotnet/UnicornManaged/Const/M68k.fs b/bindings/dotnet/UnicornManaged/Const/M68k.fs index 8d55516e..7639d240 100644 --- a/bindings/dotnet/UnicornManaged/Const/M68k.fs +++ b/bindings/dotnet/UnicornManaged/Const/M68k.fs @@ -9,15 +9,15 @@ module M68k = // M68K CPU - let UC_CPU_M5206_CPU = 0 - let UC_CPU_M68000_CPU = 1 - let UC_CPU_M68020_CPU = 2 - let UC_CPU_M68030_CPU = 3 - let UC_CPU_M68040_CPU = 4 - let UC_CPU_M68060_CPU = 5 - let UC_CPU_M5208_CPU = 6 - let UC_CPU_CFV4E_CPU = 7 - let UC_CPU_ANY_CPU = 8 + let UC_CPU_M68K_M5206 = 0 + let UC_CPU_M68K_M68000 = 1 + let UC_CPU_M68K_M68020 = 2 + let UC_CPU_M68K_M68030 = 3 + let UC_CPU_M68K_M68040 = 4 + let UC_CPU_M68K_M68060 = 5 + let UC_CPU_M68K_M5208 = 6 + let UC_CPU_M68K_CFV4E = 7 + let UC_CPU_M68K_ANY = 8 // M68K registers diff --git a/bindings/dotnet/UnicornManaged/Const/Riscv.fs b/bindings/dotnet/UnicornManaged/Const/Riscv.fs index fa765e73..4d141e85 100644 --- a/bindings/dotnet/UnicornManaged/Const/Riscv.fs +++ b/bindings/dotnet/UnicornManaged/Const/Riscv.fs @@ -59,41 +59,168 @@ module Riscv = let UC_RISCV_REG_X30 = 31 let UC_RISCV_REG_X31 = 32 + // RISCV CSR + let UC_RISCV_REG_USTATUS = 33 + let UC_RISCV_REG_UIE = 34 + let UC_RISCV_REG_UTVEC = 35 + let UC_RISCV_REG_USCRATCH = 36 + let UC_RISCV_REG_UEPC = 37 + let UC_RISCV_REG_UCAUSE = 38 + let UC_RISCV_REG_UTVAL = 39 + let UC_RISCV_REG_UIP = 40 + let UC_RISCV_REG_FFLAGS = 41 + let UC_RISCV_REG_FRM = 42 + let UC_RISCV_REG_FCSR = 43 + let UC_RISCV_REG_CYCLE = 44 + let UC_RISCV_REG_TIME = 45 + let UC_RISCV_REG_INSTRET = 46 + let UC_RISCV_REG_HPMCOUNTER3 = 47 + let UC_RISCV_REG_HPMCOUNTER4 = 48 + let UC_RISCV_REG_HPMCOUNTER5 = 49 + let UC_RISCV_REG_HPMCOUNTER6 = 50 + let UC_RISCV_REG_HPMCOUNTER7 = 51 + let UC_RISCV_REG_HPMCOUNTER8 = 52 + let UC_RISCV_REG_HPMCOUNTER9 = 53 + let UC_RISCV_REG_HPMCOUNTER10 = 54 + let UC_RISCV_REG_HPMCOUNTER11 = 55 + let UC_RISCV_REG_HPMCOUNTER12 = 56 + let UC_RISCV_REG_HPMCOUNTER13 = 57 + let UC_RISCV_REG_HPMCOUNTER14 = 58 + let UC_RISCV_REG_HPMCOUNTER15 = 59 + let UC_RISCV_REG_HPMCOUNTER16 = 60 + let UC_RISCV_REG_HPMCOUNTER17 = 61 + let UC_RISCV_REG_HPMCOUNTER18 = 62 + let UC_RISCV_REG_HPMCOUNTER19 = 63 + let UC_RISCV_REG_HPMCOUNTER20 = 64 + let UC_RISCV_REG_HPMCOUNTER21 = 65 + let UC_RISCV_REG_HPMCOUNTER22 = 66 + let UC_RISCV_REG_HPMCOUNTER23 = 67 + let UC_RISCV_REG_HPMCOUNTER24 = 68 + let UC_RISCV_REG_HPMCOUNTER25 = 69 + let UC_RISCV_REG_HPMCOUNTER26 = 70 + let UC_RISCV_REG_HPMCOUNTER27 = 71 + let UC_RISCV_REG_HPMCOUNTER28 = 72 + let UC_RISCV_REG_HPMCOUNTER29 = 73 + let UC_RISCV_REG_HPMCOUNTER30 = 74 + let UC_RISCV_REG_HPMCOUNTER31 = 75 + let UC_RISCV_REG_CYCLEH = 76 + let UC_RISCV_REG_TIMEH = 77 + let UC_RISCV_REG_INSTRETH = 78 + let UC_RISCV_REG_HPMCOUNTER3H = 79 + let UC_RISCV_REG_HPMCOUNTER4H = 80 + let UC_RISCV_REG_HPMCOUNTER5H = 81 + let UC_RISCV_REG_HPMCOUNTER6H = 82 + let UC_RISCV_REG_HPMCOUNTER7H = 83 + let UC_RISCV_REG_HPMCOUNTER8H = 84 + let UC_RISCV_REG_HPMCOUNTER9H = 85 + let UC_RISCV_REG_HPMCOUNTER10H = 86 + let UC_RISCV_REG_HPMCOUNTER11H = 87 + let UC_RISCV_REG_HPMCOUNTER12H = 88 + let UC_RISCV_REG_HPMCOUNTER13H = 89 + let UC_RISCV_REG_HPMCOUNTER14H = 90 + let UC_RISCV_REG_HPMCOUNTER15H = 91 + let UC_RISCV_REG_HPMCOUNTER16H = 92 + let UC_RISCV_REG_HPMCOUNTER17H = 93 + let UC_RISCV_REG_HPMCOUNTER18H = 94 + let UC_RISCV_REG_HPMCOUNTER19H = 95 + let UC_RISCV_REG_HPMCOUNTER20H = 96 + let UC_RISCV_REG_HPMCOUNTER21H = 97 + let UC_RISCV_REG_HPMCOUNTER22H = 98 + let UC_RISCV_REG_HPMCOUNTER23H = 99 + let UC_RISCV_REG_HPMCOUNTER24H = 100 + let UC_RISCV_REG_HPMCOUNTER25H = 101 + let UC_RISCV_REG_HPMCOUNTER26H = 102 + let UC_RISCV_REG_HPMCOUNTER27H = 103 + let UC_RISCV_REG_HPMCOUNTER28H = 104 + let UC_RISCV_REG_HPMCOUNTER29H = 105 + let UC_RISCV_REG_HPMCOUNTER30H = 106 + let UC_RISCV_REG_HPMCOUNTER31H = 107 + let UC_RISCV_REG_MCYCLE = 108 + let UC_RISCV_REG_MINSTRET = 109 + let UC_RISCV_REG_MCYCLEH = 110 + let UC_RISCV_REG_MINSTRETH = 111 + let UC_RISCV_REG_MVENDORID = 112 + let UC_RISCV_REG_MARCHID = 113 + let UC_RISCV_REG_MIMPID = 114 + let UC_RISCV_REG_MHARTID = 115 + let UC_RISCV_REG_MSTATUS = 116 + let UC_RISCV_REG_MISA = 117 + let UC_RISCV_REG_MEDELEG = 118 + let UC_RISCV_REG_MIDELEG = 119 + let UC_RISCV_REG_MIE = 120 + let UC_RISCV_REG_MTVEC = 121 + let UC_RISCV_REG_MCOUNTEREN = 122 + let UC_RISCV_REG_MSTATUSH = 123 + let UC_RISCV_REG_MUCOUNTEREN = 124 + let UC_RISCV_REG_MSCOUNTEREN = 125 + let UC_RISCV_REG_MHCOUNTEREN = 126 + let UC_RISCV_REG_MSCRATCH = 127 + let UC_RISCV_REG_MEPC = 128 + let UC_RISCV_REG_MCAUSE = 129 + let UC_RISCV_REG_MTVAL = 130 + let UC_RISCV_REG_MIP = 131 + let UC_RISCV_REG_MBADADDR = 132 + let UC_RISCV_REG_SSTATUS = 133 + let UC_RISCV_REG_SEDELEG = 134 + let UC_RISCV_REG_SIDELEG = 135 + let UC_RISCV_REG_SIE = 136 + let UC_RISCV_REG_STVEC = 137 + let UC_RISCV_REG_SCOUNTEREN = 138 + let UC_RISCV_REG_SSCRATCH = 139 + let UC_RISCV_REG_SEPC = 140 + let UC_RISCV_REG_SCAUSE = 141 + let UC_RISCV_REG_STVAL = 142 + let UC_RISCV_REG_SIP = 143 + let UC_RISCV_REG_SBADADDR = 144 + let UC_RISCV_REG_SPTBR = 145 + let UC_RISCV_REG_SATP = 146 + let UC_RISCV_REG_HSTATUS = 147 + let UC_RISCV_REG_HEDELEG = 148 + let UC_RISCV_REG_HIDELEG = 149 + let UC_RISCV_REG_HIE = 150 + let UC_RISCV_REG_HCOUNTEREN = 151 + let UC_RISCV_REG_HTVAL = 152 + let UC_RISCV_REG_HIP = 153 + let UC_RISCV_REG_HTINST = 154 + let UC_RISCV_REG_HGATP = 155 + let UC_RISCV_REG_HTIMEDELTA = 156 + let UC_RISCV_REG_HTIMEDELTAH = 157 + // Floating-point registers - let UC_RISCV_REG_F0 = 33 - let UC_RISCV_REG_F1 = 34 - let UC_RISCV_REG_F2 = 35 - let UC_RISCV_REG_F3 = 36 - let UC_RISCV_REG_F4 = 37 - let UC_RISCV_REG_F5 = 38 - let UC_RISCV_REG_F6 = 39 - let UC_RISCV_REG_F7 = 40 - let UC_RISCV_REG_F8 = 41 - let UC_RISCV_REG_F9 = 42 - let UC_RISCV_REG_F10 = 43 - let UC_RISCV_REG_F11 = 44 - let UC_RISCV_REG_F12 = 45 - let UC_RISCV_REG_F13 = 46 - let UC_RISCV_REG_F14 = 47 - let UC_RISCV_REG_F15 = 48 - let UC_RISCV_REG_F16 = 49 - let UC_RISCV_REG_F17 = 50 - let UC_RISCV_REG_F18 = 51 - let UC_RISCV_REG_F19 = 52 - let UC_RISCV_REG_F20 = 53 - let UC_RISCV_REG_F21 = 54 - let UC_RISCV_REG_F22 = 55 - let UC_RISCV_REG_F23 = 56 - let UC_RISCV_REG_F24 = 57 - let UC_RISCV_REG_F25 = 58 - let UC_RISCV_REG_F26 = 59 - let UC_RISCV_REG_F27 = 60 - let UC_RISCV_REG_F28 = 61 - let UC_RISCV_REG_F29 = 62 - let UC_RISCV_REG_F30 = 63 - let UC_RISCV_REG_F31 = 64 - let UC_RISCV_REG_PC = 65 - let UC_RISCV_REG_ENDING = 66 + let UC_RISCV_REG_F0 = 158 + let UC_RISCV_REG_F1 = 159 + let UC_RISCV_REG_F2 = 160 + let UC_RISCV_REG_F3 = 161 + let UC_RISCV_REG_F4 = 162 + let UC_RISCV_REG_F5 = 163 + let UC_RISCV_REG_F6 = 164 + let UC_RISCV_REG_F7 = 165 + let UC_RISCV_REG_F8 = 166 + let UC_RISCV_REG_F9 = 167 + let UC_RISCV_REG_F10 = 168 + let UC_RISCV_REG_F11 = 169 + let UC_RISCV_REG_F12 = 170 + let UC_RISCV_REG_F13 = 171 + let UC_RISCV_REG_F14 = 172 + let UC_RISCV_REG_F15 = 173 + let UC_RISCV_REG_F16 = 174 + let UC_RISCV_REG_F17 = 175 + let UC_RISCV_REG_F18 = 176 + let UC_RISCV_REG_F19 = 177 + let UC_RISCV_REG_F20 = 178 + let UC_RISCV_REG_F21 = 179 + let UC_RISCV_REG_F22 = 180 + let UC_RISCV_REG_F23 = 181 + let UC_RISCV_REG_F24 = 182 + let UC_RISCV_REG_F25 = 183 + let UC_RISCV_REG_F26 = 184 + let UC_RISCV_REG_F27 = 185 + let UC_RISCV_REG_F28 = 186 + let UC_RISCV_REG_F29 = 187 + let UC_RISCV_REG_F30 = 188 + let UC_RISCV_REG_F31 = 189 + let UC_RISCV_REG_PC = 190 + let UC_RISCV_REG_ENDING = 191 // Alias registers let UC_RISCV_REG_ZERO = 1 @@ -129,36 +256,36 @@ module Riscv = let UC_RISCV_REG_T4 = 30 let UC_RISCV_REG_T5 = 31 let UC_RISCV_REG_T6 = 32 - let UC_RISCV_REG_FT0 = 33 - let UC_RISCV_REG_FT1 = 34 - let UC_RISCV_REG_FT2 = 35 - let UC_RISCV_REG_FT3 = 36 - let UC_RISCV_REG_FT4 = 37 - let UC_RISCV_REG_FT5 = 38 - let UC_RISCV_REG_FT6 = 39 - let UC_RISCV_REG_FT7 = 40 - let UC_RISCV_REG_FS0 = 41 - let UC_RISCV_REG_FS1 = 42 - let UC_RISCV_REG_FA0 = 43 - let UC_RISCV_REG_FA1 = 44 - let UC_RISCV_REG_FA2 = 45 - let UC_RISCV_REG_FA3 = 46 - let UC_RISCV_REG_FA4 = 47 - let UC_RISCV_REG_FA5 = 48 - let UC_RISCV_REG_FA6 = 49 - let UC_RISCV_REG_FA7 = 50 - let UC_RISCV_REG_FS2 = 51 - let UC_RISCV_REG_FS3 = 52 - let UC_RISCV_REG_FS4 = 53 - let UC_RISCV_REG_FS5 = 54 - let UC_RISCV_REG_FS6 = 55 - let UC_RISCV_REG_FS7 = 56 - let UC_RISCV_REG_FS8 = 57 - let UC_RISCV_REG_FS9 = 58 - let UC_RISCV_REG_FS10 = 59 - let UC_RISCV_REG_FS11 = 60 - let UC_RISCV_REG_FT8 = 61 - let UC_RISCV_REG_FT9 = 62 - let UC_RISCV_REG_FT10 = 63 - let UC_RISCV_REG_FT11 = 64 + let UC_RISCV_REG_FT0 = 158 + let UC_RISCV_REG_FT1 = 159 + let UC_RISCV_REG_FT2 = 160 + let UC_RISCV_REG_FT3 = 161 + let UC_RISCV_REG_FT4 = 162 + let UC_RISCV_REG_FT5 = 163 + let UC_RISCV_REG_FT6 = 164 + let UC_RISCV_REG_FT7 = 165 + let UC_RISCV_REG_FS0 = 166 + let UC_RISCV_REG_FS1 = 167 + let UC_RISCV_REG_FA0 = 168 + let UC_RISCV_REG_FA1 = 169 + let UC_RISCV_REG_FA2 = 170 + let UC_RISCV_REG_FA3 = 171 + let UC_RISCV_REG_FA4 = 172 + let UC_RISCV_REG_FA5 = 173 + let UC_RISCV_REG_FA6 = 174 + let UC_RISCV_REG_FA7 = 175 + let UC_RISCV_REG_FS2 = 176 + let UC_RISCV_REG_FS3 = 177 + let UC_RISCV_REG_FS4 = 178 + let UC_RISCV_REG_FS5 = 179 + let UC_RISCV_REG_FS6 = 180 + let UC_RISCV_REG_FS7 = 181 + let UC_RISCV_REG_FS8 = 182 + let UC_RISCV_REG_FS9 = 183 + let UC_RISCV_REG_FS10 = 184 + let UC_RISCV_REG_FS11 = 185 + let UC_RISCV_REG_FT8 = 186 + let UC_RISCV_REG_FT9 = 187 + let UC_RISCV_REG_FT10 = 188 + let UC_RISCV_REG_FT11 = 189 diff --git a/bindings/dotnet/UnicornManaged/Const/S390x.fs b/bindings/dotnet/UnicornManaged/Const/S390x.fs new file mode 100644 index 00000000..615a4241 --- /dev/null +++ b/bindings/dotnet/UnicornManaged/Const/S390x.fs @@ -0,0 +1,128 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module S390x = + + // S390X CPU + + let UC_CPU_S390X_Z900 = 0 + let UC_CPU_S390X_Z900_2 = 1 + let UC_CPU_S390X_Z900_3 = 2 + let UC_CPU_S390X_Z800 = 3 + let UC_CPU_S390X_Z990 = 4 + let UC_CPU_S390X_Z990_2 = 5 + let UC_CPU_S390X_Z990_3 = 6 + let UC_CPU_S390X_Z890 = 7 + let UC_CPU_S390X_Z990_4 = 8 + let UC_CPU_S390X_Z890_2 = 9 + let UC_CPU_S390X_Z990_5 = 10 + let UC_CPU_S390X_Z890_3 = 11 + let UC_CPU_S390X_Z9EC = 12 + let UC_CPU_S390X_Z9EC_2 = 13 + let UC_CPU_S390X_Z9BC = 14 + let UC_CPU_S390X_Z9EC_3 = 15 + let UC_CPU_S390X_Z9BC_2 = 16 + let UC_CPU_S390X_Z10EC = 17 + let UC_CPU_S390X_Z10EC_2 = 18 + let UC_CPU_S390X_Z10BC = 19 + let UC_CPU_S390X_Z10EC_3 = 20 + let UC_CPU_S390X_Z10BC_2 = 21 + let UC_CPU_S390X_Z196 = 22 + let UC_CPU_S390X_Z196_2 = 23 + let UC_CPU_S390X_Z114 = 24 + let UC_CPU_S390X_ZEC12 = 25 + let UC_CPU_S390X_ZEC12_2 = 26 + let UC_CPU_S390X_ZBC12 = 27 + let UC_CPU_S390X_Z13 = 28 + let UC_CPU_S390X_Z13_2 = 29 + let UC_CPU_S390X_Z13S = 30 + let UC_CPU_S390X_Z14 = 31 + let UC_CPU_S390X_Z14_2 = 32 + let UC_CPU_S390X_Z14ZR1 = 33 + let UC_CPU_S390X_GEN15A = 34 + let UC_CPU_S390X_GEN15B = 35 + let UC_CPU_S390X_QEMU = 36 + let UC_CPU_S390X_MAX = 37 + + // S390X registers + + let UC_S390X_REG_INVALID = 0 + + // General purpose registers + let UC_S390X_REG_R0 = 1 + let UC_S390X_REG_R1 = 2 + let UC_S390X_REG_R2 = 3 + let UC_S390X_REG_R3 = 4 + let UC_S390X_REG_R4 = 5 + let UC_S390X_REG_R5 = 6 + let UC_S390X_REG_R6 = 7 + let UC_S390X_REG_R7 = 8 + let UC_S390X_REG_R8 = 9 + let UC_S390X_REG_R9 = 10 + let UC_S390X_REG_R10 = 11 + let UC_S390X_REG_R11 = 12 + let UC_S390X_REG_R12 = 13 + let UC_S390X_REG_R13 = 14 + let UC_S390X_REG_R14 = 15 + let UC_S390X_REG_R15 = 16 + + // Floating point registers + let UC_S390X_REG_F0 = 17 + let UC_S390X_REG_F1 = 18 + let UC_S390X_REG_F2 = 19 + let UC_S390X_REG_F3 = 20 + let UC_S390X_REG_F4 = 21 + let UC_S390X_REG_F5 = 22 + let UC_S390X_REG_F6 = 23 + let UC_S390X_REG_F7 = 24 + let UC_S390X_REG_F8 = 25 + let UC_S390X_REG_F9 = 26 + let UC_S390X_REG_F10 = 27 + let UC_S390X_REG_F11 = 28 + let UC_S390X_REG_F12 = 29 + let UC_S390X_REG_F13 = 30 + let UC_S390X_REG_F14 = 31 + let UC_S390X_REG_F15 = 32 + let UC_S390X_REG_F16 = 33 + let UC_S390X_REG_F17 = 34 + let UC_S390X_REG_F18 = 35 + let UC_S390X_REG_F19 = 36 + let UC_S390X_REG_F20 = 37 + let UC_S390X_REG_F21 = 38 + let UC_S390X_REG_F22 = 39 + let UC_S390X_REG_F23 = 40 + let UC_S390X_REG_F24 = 41 + let UC_S390X_REG_F25 = 42 + let UC_S390X_REG_F26 = 43 + let UC_S390X_REG_F27 = 44 + let UC_S390X_REG_F28 = 45 + let UC_S390X_REG_F29 = 46 + let UC_S390X_REG_F30 = 47 + let UC_S390X_REG_F31 = 48 + + // Access registers + let UC_S390X_REG_A0 = 49 + let UC_S390X_REG_A1 = 50 + let UC_S390X_REG_A2 = 51 + let UC_S390X_REG_A3 = 52 + let UC_S390X_REG_A4 = 53 + let UC_S390X_REG_A5 = 54 + let UC_S390X_REG_A6 = 55 + let UC_S390X_REG_A7 = 56 + let UC_S390X_REG_A8 = 57 + let UC_S390X_REG_A9 = 58 + let UC_S390X_REG_A10 = 59 + let UC_S390X_REG_A11 = 60 + let UC_S390X_REG_A12 = 61 + let UC_S390X_REG_A13 = 62 + let UC_S390X_REG_A14 = 63 + let UC_S390X_REG_A15 = 64 + let UC_S390X_REG_PC = 65 + let UC_S390X_REG_ENDING = 66 + + // Alias registers + diff --git a/bindings/go/unicorn/m68k_const.go b/bindings/go/unicorn/m68k_const.go index 9b91860b..cdc5019a 100644 --- a/bindings/go/unicorn/m68k_const.go +++ b/bindings/go/unicorn/m68k_const.go @@ -4,15 +4,15 @@ const ( // M68K CPU - CPU_M5206_CPU = 0 - CPU_M68000_CPU = 1 - CPU_M68020_CPU = 2 - CPU_M68030_CPU = 3 - CPU_M68040_CPU = 4 - CPU_M68060_CPU = 5 - CPU_M5208_CPU = 6 - CPU_CFV4E_CPU = 7 - CPU_ANY_CPU = 8 + CPU_M68K_M5206 = 0 + CPU_M68K_M68000 = 1 + CPU_M68K_M68020 = 2 + CPU_M68K_M68030 = 3 + CPU_M68K_M68040 = 4 + CPU_M68K_M68060 = 5 + CPU_M68K_M5208 = 6 + CPU_M68K_CFV4E = 7 + CPU_M68K_ANY = 8 // M68K registers diff --git a/bindings/go/unicorn/riscv_const.go b/bindings/go/unicorn/riscv_const.go index 70741ebc..42927d39 100644 --- a/bindings/go/unicorn/riscv_const.go +++ b/bindings/go/unicorn/riscv_const.go @@ -54,41 +54,168 @@ const ( RISCV_REG_X30 = 31 RISCV_REG_X31 = 32 +// RISCV CSR + RISCV_REG_USTATUS = 33 + RISCV_REG_UIE = 34 + RISCV_REG_UTVEC = 35 + RISCV_REG_USCRATCH = 36 + RISCV_REG_UEPC = 37 + RISCV_REG_UCAUSE = 38 + RISCV_REG_UTVAL = 39 + RISCV_REG_UIP = 40 + RISCV_REG_FFLAGS = 41 + RISCV_REG_FRM = 42 + RISCV_REG_FCSR = 43 + RISCV_REG_CYCLE = 44 + RISCV_REG_TIME = 45 + RISCV_REG_INSTRET = 46 + RISCV_REG_HPMCOUNTER3 = 47 + RISCV_REG_HPMCOUNTER4 = 48 + RISCV_REG_HPMCOUNTER5 = 49 + RISCV_REG_HPMCOUNTER6 = 50 + RISCV_REG_HPMCOUNTER7 = 51 + RISCV_REG_HPMCOUNTER8 = 52 + RISCV_REG_HPMCOUNTER9 = 53 + RISCV_REG_HPMCOUNTER10 = 54 + RISCV_REG_HPMCOUNTER11 = 55 + RISCV_REG_HPMCOUNTER12 = 56 + RISCV_REG_HPMCOUNTER13 = 57 + RISCV_REG_HPMCOUNTER14 = 58 + RISCV_REG_HPMCOUNTER15 = 59 + RISCV_REG_HPMCOUNTER16 = 60 + RISCV_REG_HPMCOUNTER17 = 61 + RISCV_REG_HPMCOUNTER18 = 62 + RISCV_REG_HPMCOUNTER19 = 63 + RISCV_REG_HPMCOUNTER20 = 64 + RISCV_REG_HPMCOUNTER21 = 65 + RISCV_REG_HPMCOUNTER22 = 66 + RISCV_REG_HPMCOUNTER23 = 67 + RISCV_REG_HPMCOUNTER24 = 68 + RISCV_REG_HPMCOUNTER25 = 69 + RISCV_REG_HPMCOUNTER26 = 70 + RISCV_REG_HPMCOUNTER27 = 71 + RISCV_REG_HPMCOUNTER28 = 72 + RISCV_REG_HPMCOUNTER29 = 73 + RISCV_REG_HPMCOUNTER30 = 74 + RISCV_REG_HPMCOUNTER31 = 75 + RISCV_REG_CYCLEH = 76 + RISCV_REG_TIMEH = 77 + RISCV_REG_INSTRETH = 78 + RISCV_REG_HPMCOUNTER3H = 79 + RISCV_REG_HPMCOUNTER4H = 80 + RISCV_REG_HPMCOUNTER5H = 81 + RISCV_REG_HPMCOUNTER6H = 82 + RISCV_REG_HPMCOUNTER7H = 83 + RISCV_REG_HPMCOUNTER8H = 84 + RISCV_REG_HPMCOUNTER9H = 85 + RISCV_REG_HPMCOUNTER10H = 86 + RISCV_REG_HPMCOUNTER11H = 87 + RISCV_REG_HPMCOUNTER12H = 88 + RISCV_REG_HPMCOUNTER13H = 89 + RISCV_REG_HPMCOUNTER14H = 90 + RISCV_REG_HPMCOUNTER15H = 91 + RISCV_REG_HPMCOUNTER16H = 92 + RISCV_REG_HPMCOUNTER17H = 93 + RISCV_REG_HPMCOUNTER18H = 94 + RISCV_REG_HPMCOUNTER19H = 95 + RISCV_REG_HPMCOUNTER20H = 96 + RISCV_REG_HPMCOUNTER21H = 97 + RISCV_REG_HPMCOUNTER22H = 98 + RISCV_REG_HPMCOUNTER23H = 99 + RISCV_REG_HPMCOUNTER24H = 100 + RISCV_REG_HPMCOUNTER25H = 101 + RISCV_REG_HPMCOUNTER26H = 102 + RISCV_REG_HPMCOUNTER27H = 103 + RISCV_REG_HPMCOUNTER28H = 104 + RISCV_REG_HPMCOUNTER29H = 105 + RISCV_REG_HPMCOUNTER30H = 106 + RISCV_REG_HPMCOUNTER31H = 107 + RISCV_REG_MCYCLE = 108 + RISCV_REG_MINSTRET = 109 + RISCV_REG_MCYCLEH = 110 + RISCV_REG_MINSTRETH = 111 + RISCV_REG_MVENDORID = 112 + RISCV_REG_MARCHID = 113 + RISCV_REG_MIMPID = 114 + RISCV_REG_MHARTID = 115 + RISCV_REG_MSTATUS = 116 + RISCV_REG_MISA = 117 + RISCV_REG_MEDELEG = 118 + RISCV_REG_MIDELEG = 119 + RISCV_REG_MIE = 120 + RISCV_REG_MTVEC = 121 + RISCV_REG_MCOUNTEREN = 122 + RISCV_REG_MSTATUSH = 123 + RISCV_REG_MUCOUNTEREN = 124 + RISCV_REG_MSCOUNTEREN = 125 + RISCV_REG_MHCOUNTEREN = 126 + RISCV_REG_MSCRATCH = 127 + RISCV_REG_MEPC = 128 + RISCV_REG_MCAUSE = 129 + RISCV_REG_MTVAL = 130 + RISCV_REG_MIP = 131 + RISCV_REG_MBADADDR = 132 + RISCV_REG_SSTATUS = 133 + RISCV_REG_SEDELEG = 134 + RISCV_REG_SIDELEG = 135 + RISCV_REG_SIE = 136 + RISCV_REG_STVEC = 137 + RISCV_REG_SCOUNTEREN = 138 + RISCV_REG_SSCRATCH = 139 + RISCV_REG_SEPC = 140 + RISCV_REG_SCAUSE = 141 + RISCV_REG_STVAL = 142 + RISCV_REG_SIP = 143 + RISCV_REG_SBADADDR = 144 + RISCV_REG_SPTBR = 145 + RISCV_REG_SATP = 146 + RISCV_REG_HSTATUS = 147 + RISCV_REG_HEDELEG = 148 + RISCV_REG_HIDELEG = 149 + RISCV_REG_HIE = 150 + RISCV_REG_HCOUNTEREN = 151 + RISCV_REG_HTVAL = 152 + RISCV_REG_HIP = 153 + RISCV_REG_HTINST = 154 + RISCV_REG_HGATP = 155 + RISCV_REG_HTIMEDELTA = 156 + RISCV_REG_HTIMEDELTAH = 157 + // Floating-point registers - RISCV_REG_F0 = 33 - RISCV_REG_F1 = 34 - RISCV_REG_F2 = 35 - RISCV_REG_F3 = 36 - RISCV_REG_F4 = 37 - RISCV_REG_F5 = 38 - RISCV_REG_F6 = 39 - RISCV_REG_F7 = 40 - RISCV_REG_F8 = 41 - RISCV_REG_F9 = 42 - RISCV_REG_F10 = 43 - RISCV_REG_F11 = 44 - RISCV_REG_F12 = 45 - RISCV_REG_F13 = 46 - RISCV_REG_F14 = 47 - RISCV_REG_F15 = 48 - RISCV_REG_F16 = 49 - RISCV_REG_F17 = 50 - RISCV_REG_F18 = 51 - RISCV_REG_F19 = 52 - RISCV_REG_F20 = 53 - RISCV_REG_F21 = 54 - RISCV_REG_F22 = 55 - RISCV_REG_F23 = 56 - RISCV_REG_F24 = 57 - RISCV_REG_F25 = 58 - RISCV_REG_F26 = 59 - RISCV_REG_F27 = 60 - RISCV_REG_F28 = 61 - RISCV_REG_F29 = 62 - RISCV_REG_F30 = 63 - RISCV_REG_F31 = 64 - RISCV_REG_PC = 65 - RISCV_REG_ENDING = 66 + RISCV_REG_F0 = 158 + RISCV_REG_F1 = 159 + RISCV_REG_F2 = 160 + RISCV_REG_F3 = 161 + RISCV_REG_F4 = 162 + RISCV_REG_F5 = 163 + RISCV_REG_F6 = 164 + RISCV_REG_F7 = 165 + RISCV_REG_F8 = 166 + RISCV_REG_F9 = 167 + RISCV_REG_F10 = 168 + RISCV_REG_F11 = 169 + RISCV_REG_F12 = 170 + RISCV_REG_F13 = 171 + RISCV_REG_F14 = 172 + RISCV_REG_F15 = 173 + RISCV_REG_F16 = 174 + RISCV_REG_F17 = 175 + RISCV_REG_F18 = 176 + RISCV_REG_F19 = 177 + RISCV_REG_F20 = 178 + RISCV_REG_F21 = 179 + RISCV_REG_F22 = 180 + RISCV_REG_F23 = 181 + RISCV_REG_F24 = 182 + RISCV_REG_F25 = 183 + RISCV_REG_F26 = 184 + RISCV_REG_F27 = 185 + RISCV_REG_F28 = 186 + RISCV_REG_F29 = 187 + RISCV_REG_F30 = 188 + RISCV_REG_F31 = 189 + RISCV_REG_PC = 190 + RISCV_REG_ENDING = 191 // Alias registers RISCV_REG_ZERO = 1 @@ -124,36 +251,36 @@ const ( RISCV_REG_T4 = 30 RISCV_REG_T5 = 31 RISCV_REG_T6 = 32 - RISCV_REG_FT0 = 33 - RISCV_REG_FT1 = 34 - RISCV_REG_FT2 = 35 - RISCV_REG_FT3 = 36 - RISCV_REG_FT4 = 37 - RISCV_REG_FT5 = 38 - RISCV_REG_FT6 = 39 - RISCV_REG_FT7 = 40 - RISCV_REG_FS0 = 41 - RISCV_REG_FS1 = 42 - RISCV_REG_FA0 = 43 - RISCV_REG_FA1 = 44 - RISCV_REG_FA2 = 45 - RISCV_REG_FA3 = 46 - RISCV_REG_FA4 = 47 - RISCV_REG_FA5 = 48 - RISCV_REG_FA6 = 49 - RISCV_REG_FA7 = 50 - RISCV_REG_FS2 = 51 - RISCV_REG_FS3 = 52 - RISCV_REG_FS4 = 53 - RISCV_REG_FS5 = 54 - RISCV_REG_FS6 = 55 - RISCV_REG_FS7 = 56 - RISCV_REG_FS8 = 57 - RISCV_REG_FS9 = 58 - RISCV_REG_FS10 = 59 - RISCV_REG_FS11 = 60 - RISCV_REG_FT8 = 61 - RISCV_REG_FT9 = 62 - RISCV_REG_FT10 = 63 - RISCV_REG_FT11 = 64 + RISCV_REG_FT0 = 158 + RISCV_REG_FT1 = 159 + RISCV_REG_FT2 = 160 + RISCV_REG_FT3 = 161 + RISCV_REG_FT4 = 162 + RISCV_REG_FT5 = 163 + RISCV_REG_FT6 = 164 + RISCV_REG_FT7 = 165 + RISCV_REG_FS0 = 166 + RISCV_REG_FS1 = 167 + RISCV_REG_FA0 = 168 + RISCV_REG_FA1 = 169 + RISCV_REG_FA2 = 170 + RISCV_REG_FA3 = 171 + RISCV_REG_FA4 = 172 + RISCV_REG_FA5 = 173 + RISCV_REG_FA6 = 174 + RISCV_REG_FA7 = 175 + RISCV_REG_FS2 = 176 + RISCV_REG_FS3 = 177 + RISCV_REG_FS4 = 178 + RISCV_REG_FS5 = 179 + RISCV_REG_FS6 = 180 + RISCV_REG_FS7 = 181 + RISCV_REG_FS8 = 182 + RISCV_REG_FS9 = 183 + RISCV_REG_FS10 = 184 + RISCV_REG_FS11 = 185 + RISCV_REG_FT8 = 186 + RISCV_REG_FT9 = 187 + RISCV_REG_FT10 = 188 + RISCV_REG_FT11 = 189 ) \ No newline at end of file diff --git a/bindings/go/unicorn/s390x_const.go b/bindings/go/unicorn/s390x_const.go new file mode 100644 index 00000000..32abb2fe --- /dev/null +++ b/bindings/go/unicorn/s390x_const.go @@ -0,0 +1,123 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [s390x_const.go] +const ( + +// S390X CPU + + CPU_S390X_Z900 = 0 + CPU_S390X_Z900_2 = 1 + CPU_S390X_Z900_3 = 2 + CPU_S390X_Z800 = 3 + CPU_S390X_Z990 = 4 + CPU_S390X_Z990_2 = 5 + CPU_S390X_Z990_3 = 6 + CPU_S390X_Z890 = 7 + CPU_S390X_Z990_4 = 8 + CPU_S390X_Z890_2 = 9 + CPU_S390X_Z990_5 = 10 + CPU_S390X_Z890_3 = 11 + CPU_S390X_Z9EC = 12 + CPU_S390X_Z9EC_2 = 13 + CPU_S390X_Z9BC = 14 + CPU_S390X_Z9EC_3 = 15 + CPU_S390X_Z9BC_2 = 16 + CPU_S390X_Z10EC = 17 + CPU_S390X_Z10EC_2 = 18 + CPU_S390X_Z10BC = 19 + CPU_S390X_Z10EC_3 = 20 + CPU_S390X_Z10BC_2 = 21 + CPU_S390X_Z196 = 22 + CPU_S390X_Z196_2 = 23 + CPU_S390X_Z114 = 24 + CPU_S390X_ZEC12 = 25 + CPU_S390X_ZEC12_2 = 26 + CPU_S390X_ZBC12 = 27 + CPU_S390X_Z13 = 28 + CPU_S390X_Z13_2 = 29 + CPU_S390X_Z13S = 30 + CPU_S390X_Z14 = 31 + CPU_S390X_Z14_2 = 32 + CPU_S390X_Z14ZR1 = 33 + CPU_S390X_GEN15A = 34 + CPU_S390X_GEN15B = 35 + CPU_S390X_QEMU = 36 + CPU_S390X_MAX = 37 + +// S390X registers + + S390X_REG_INVALID = 0 + +// General purpose registers + S390X_REG_R0 = 1 + S390X_REG_R1 = 2 + S390X_REG_R2 = 3 + S390X_REG_R3 = 4 + S390X_REG_R4 = 5 + S390X_REG_R5 = 6 + S390X_REG_R6 = 7 + S390X_REG_R7 = 8 + S390X_REG_R8 = 9 + S390X_REG_R9 = 10 + S390X_REG_R10 = 11 + S390X_REG_R11 = 12 + S390X_REG_R12 = 13 + S390X_REG_R13 = 14 + S390X_REG_R14 = 15 + S390X_REG_R15 = 16 + +// Floating point registers + S390X_REG_F0 = 17 + S390X_REG_F1 = 18 + S390X_REG_F2 = 19 + S390X_REG_F3 = 20 + S390X_REG_F4 = 21 + S390X_REG_F5 = 22 + S390X_REG_F6 = 23 + S390X_REG_F7 = 24 + S390X_REG_F8 = 25 + S390X_REG_F9 = 26 + S390X_REG_F10 = 27 + S390X_REG_F11 = 28 + S390X_REG_F12 = 29 + S390X_REG_F13 = 30 + S390X_REG_F14 = 31 + S390X_REG_F15 = 32 + S390X_REG_F16 = 33 + S390X_REG_F17 = 34 + S390X_REG_F18 = 35 + S390X_REG_F19 = 36 + S390X_REG_F20 = 37 + S390X_REG_F21 = 38 + S390X_REG_F22 = 39 + S390X_REG_F23 = 40 + S390X_REG_F24 = 41 + S390X_REG_F25 = 42 + S390X_REG_F26 = 43 + S390X_REG_F27 = 44 + S390X_REG_F28 = 45 + S390X_REG_F29 = 46 + S390X_REG_F30 = 47 + S390X_REG_F31 = 48 + +// Access registers + S390X_REG_A0 = 49 + S390X_REG_A1 = 50 + S390X_REG_A2 = 51 + S390X_REG_A3 = 52 + S390X_REG_A4 = 53 + S390X_REG_A5 = 54 + S390X_REG_A6 = 55 + S390X_REG_A7 = 56 + S390X_REG_A8 = 57 + S390X_REG_A9 = 58 + S390X_REG_A10 = 59 + S390X_REG_A11 = 60 + S390X_REG_A12 = 61 + S390X_REG_A13 = 62 + S390X_REG_A14 = 63 + S390X_REG_A15 = 64 + S390X_REG_PC = 65 + S390X_REG_ENDING = 66 + +// Alias registers +) \ No newline at end of file diff --git a/bindings/go/unicorn/unicorn_const.go b/bindings/go/unicorn/unicorn_const.go index 0da008ae..052cdd1d 100644 --- a/bindings/go/unicorn/unicorn_const.go +++ b/bindings/go/unicorn/unicorn_const.go @@ -4,11 +4,15 @@ const ( API_MAJOR = 2 API_MINOR = 0 + + API_PATCH = 0 + API_EXTRA = 5 VERSION_MAJOR = 2 VERSION_MINOR = 0 - VERSION_EXTRA = 0 + VERSION_PATCH = 0 + VERSION_EXTRA = 5 SECOND_SCALE = 1000000 MILISECOND_SCALE = 1000 ARCH_ARM = 1 @@ -19,7 +23,8 @@ const ( ARCH_SPARC = 6 ARCH_M68K = 7 ARCH_RISCV = 8 - ARCH_MAX = 9 + ARCH_S390X = 9 + ARCH_MAX = 10 MODE_LITTLE_ENDIAN = 0 MODE_BIG_ENDIAN = 1073741824 diff --git a/bindings/java/unicorn/M68kConst.java b/bindings/java/unicorn/M68kConst.java index 1be8cd3f..e16210f3 100644 --- a/bindings/java/unicorn/M68kConst.java +++ b/bindings/java/unicorn/M68kConst.java @@ -6,15 +6,15 @@ public interface M68kConst { // M68K CPU - public static final int UC_CPU_M5206_CPU = 0; - public static final int UC_CPU_M68000_CPU = 1; - public static final int UC_CPU_M68020_CPU = 2; - public static final int UC_CPU_M68030_CPU = 3; - public static final int UC_CPU_M68040_CPU = 4; - public static final int UC_CPU_M68060_CPU = 5; - public static final int UC_CPU_M5208_CPU = 6; - public static final int UC_CPU_CFV4E_CPU = 7; - public static final int UC_CPU_ANY_CPU = 8; + public static final int UC_CPU_M68K_M5206 = 0; + public static final int UC_CPU_M68K_M68000 = 1; + public static final int UC_CPU_M68K_M68020 = 2; + public static final int UC_CPU_M68K_M68030 = 3; + public static final int UC_CPU_M68K_M68040 = 4; + public static final int UC_CPU_M68K_M68060 = 5; + public static final int UC_CPU_M68K_M5208 = 6; + public static final int UC_CPU_M68K_CFV4E = 7; + public static final int UC_CPU_M68K_ANY = 8; // M68K registers diff --git a/bindings/java/unicorn/RiscvConst.java b/bindings/java/unicorn/RiscvConst.java index 39f0b2b6..f56315de 100644 --- a/bindings/java/unicorn/RiscvConst.java +++ b/bindings/java/unicorn/RiscvConst.java @@ -56,41 +56,168 @@ public interface RiscvConst { public static final int UC_RISCV_REG_X30 = 31; public static final int UC_RISCV_REG_X31 = 32; +// RISCV CSR + public static final int UC_RISCV_REG_USTATUS = 33; + public static final int UC_RISCV_REG_UIE = 34; + public static final int UC_RISCV_REG_UTVEC = 35; + public static final int UC_RISCV_REG_USCRATCH = 36; + public static final int UC_RISCV_REG_UEPC = 37; + public static final int UC_RISCV_REG_UCAUSE = 38; + public static final int UC_RISCV_REG_UTVAL = 39; + public static final int UC_RISCV_REG_UIP = 40; + public static final int UC_RISCV_REG_FFLAGS = 41; + public static final int UC_RISCV_REG_FRM = 42; + public static final int UC_RISCV_REG_FCSR = 43; + public static final int UC_RISCV_REG_CYCLE = 44; + public static final int UC_RISCV_REG_TIME = 45; + public static final int UC_RISCV_REG_INSTRET = 46; + public static final int UC_RISCV_REG_HPMCOUNTER3 = 47; + public static final int UC_RISCV_REG_HPMCOUNTER4 = 48; + public static final int UC_RISCV_REG_HPMCOUNTER5 = 49; + public static final int UC_RISCV_REG_HPMCOUNTER6 = 50; + public static final int UC_RISCV_REG_HPMCOUNTER7 = 51; + public static final int UC_RISCV_REG_HPMCOUNTER8 = 52; + public static final int UC_RISCV_REG_HPMCOUNTER9 = 53; + public static final int UC_RISCV_REG_HPMCOUNTER10 = 54; + public static final int UC_RISCV_REG_HPMCOUNTER11 = 55; + public static final int UC_RISCV_REG_HPMCOUNTER12 = 56; + public static final int UC_RISCV_REG_HPMCOUNTER13 = 57; + public static final int UC_RISCV_REG_HPMCOUNTER14 = 58; + public static final int UC_RISCV_REG_HPMCOUNTER15 = 59; + public static final int UC_RISCV_REG_HPMCOUNTER16 = 60; + public static final int UC_RISCV_REG_HPMCOUNTER17 = 61; + public static final int UC_RISCV_REG_HPMCOUNTER18 = 62; + public static final int UC_RISCV_REG_HPMCOUNTER19 = 63; + public static final int UC_RISCV_REG_HPMCOUNTER20 = 64; + public static final int UC_RISCV_REG_HPMCOUNTER21 = 65; + public static final int UC_RISCV_REG_HPMCOUNTER22 = 66; + public static final int UC_RISCV_REG_HPMCOUNTER23 = 67; + public static final int UC_RISCV_REG_HPMCOUNTER24 = 68; + public static final int UC_RISCV_REG_HPMCOUNTER25 = 69; + public static final int UC_RISCV_REG_HPMCOUNTER26 = 70; + public static final int UC_RISCV_REG_HPMCOUNTER27 = 71; + public static final int UC_RISCV_REG_HPMCOUNTER28 = 72; + public static final int UC_RISCV_REG_HPMCOUNTER29 = 73; + public static final int UC_RISCV_REG_HPMCOUNTER30 = 74; + public static final int UC_RISCV_REG_HPMCOUNTER31 = 75; + public static final int UC_RISCV_REG_CYCLEH = 76; + public static final int UC_RISCV_REG_TIMEH = 77; + public static final int UC_RISCV_REG_INSTRETH = 78; + public static final int UC_RISCV_REG_HPMCOUNTER3H = 79; + public static final int UC_RISCV_REG_HPMCOUNTER4H = 80; + public static final int UC_RISCV_REG_HPMCOUNTER5H = 81; + public static final int UC_RISCV_REG_HPMCOUNTER6H = 82; + public static final int UC_RISCV_REG_HPMCOUNTER7H = 83; + public static final int UC_RISCV_REG_HPMCOUNTER8H = 84; + public static final int UC_RISCV_REG_HPMCOUNTER9H = 85; + public static final int UC_RISCV_REG_HPMCOUNTER10H = 86; + public static final int UC_RISCV_REG_HPMCOUNTER11H = 87; + public static final int UC_RISCV_REG_HPMCOUNTER12H = 88; + public static final int UC_RISCV_REG_HPMCOUNTER13H = 89; + public static final int UC_RISCV_REG_HPMCOUNTER14H = 90; + public static final int UC_RISCV_REG_HPMCOUNTER15H = 91; + public static final int UC_RISCV_REG_HPMCOUNTER16H = 92; + public static final int UC_RISCV_REG_HPMCOUNTER17H = 93; + public static final int UC_RISCV_REG_HPMCOUNTER18H = 94; + public static final int UC_RISCV_REG_HPMCOUNTER19H = 95; + public static final int UC_RISCV_REG_HPMCOUNTER20H = 96; + public static final int UC_RISCV_REG_HPMCOUNTER21H = 97; + public static final int UC_RISCV_REG_HPMCOUNTER22H = 98; + public static final int UC_RISCV_REG_HPMCOUNTER23H = 99; + public static final int UC_RISCV_REG_HPMCOUNTER24H = 100; + public static final int UC_RISCV_REG_HPMCOUNTER25H = 101; + public static final int UC_RISCV_REG_HPMCOUNTER26H = 102; + public static final int UC_RISCV_REG_HPMCOUNTER27H = 103; + public static final int UC_RISCV_REG_HPMCOUNTER28H = 104; + public static final int UC_RISCV_REG_HPMCOUNTER29H = 105; + public static final int UC_RISCV_REG_HPMCOUNTER30H = 106; + public static final int UC_RISCV_REG_HPMCOUNTER31H = 107; + public static final int UC_RISCV_REG_MCYCLE = 108; + public static final int UC_RISCV_REG_MINSTRET = 109; + public static final int UC_RISCV_REG_MCYCLEH = 110; + public static final int UC_RISCV_REG_MINSTRETH = 111; + public static final int UC_RISCV_REG_MVENDORID = 112; + public static final int UC_RISCV_REG_MARCHID = 113; + public static final int UC_RISCV_REG_MIMPID = 114; + public static final int UC_RISCV_REG_MHARTID = 115; + public static final int UC_RISCV_REG_MSTATUS = 116; + public static final int UC_RISCV_REG_MISA = 117; + public static final int UC_RISCV_REG_MEDELEG = 118; + public static final int UC_RISCV_REG_MIDELEG = 119; + public static final int UC_RISCV_REG_MIE = 120; + public static final int UC_RISCV_REG_MTVEC = 121; + public static final int UC_RISCV_REG_MCOUNTEREN = 122; + public static final int UC_RISCV_REG_MSTATUSH = 123; + public static final int UC_RISCV_REG_MUCOUNTEREN = 124; + public static final int UC_RISCV_REG_MSCOUNTEREN = 125; + public static final int UC_RISCV_REG_MHCOUNTEREN = 126; + public static final int UC_RISCV_REG_MSCRATCH = 127; + public static final int UC_RISCV_REG_MEPC = 128; + public static final int UC_RISCV_REG_MCAUSE = 129; + public static final int UC_RISCV_REG_MTVAL = 130; + public static final int UC_RISCV_REG_MIP = 131; + public static final int UC_RISCV_REG_MBADADDR = 132; + public static final int UC_RISCV_REG_SSTATUS = 133; + public static final int UC_RISCV_REG_SEDELEG = 134; + public static final int UC_RISCV_REG_SIDELEG = 135; + public static final int UC_RISCV_REG_SIE = 136; + public static final int UC_RISCV_REG_STVEC = 137; + public static final int UC_RISCV_REG_SCOUNTEREN = 138; + public static final int UC_RISCV_REG_SSCRATCH = 139; + public static final int UC_RISCV_REG_SEPC = 140; + public static final int UC_RISCV_REG_SCAUSE = 141; + public static final int UC_RISCV_REG_STVAL = 142; + public static final int UC_RISCV_REG_SIP = 143; + public static final int UC_RISCV_REG_SBADADDR = 144; + public static final int UC_RISCV_REG_SPTBR = 145; + public static final int UC_RISCV_REG_SATP = 146; + public static final int UC_RISCV_REG_HSTATUS = 147; + public static final int UC_RISCV_REG_HEDELEG = 148; + public static final int UC_RISCV_REG_HIDELEG = 149; + public static final int UC_RISCV_REG_HIE = 150; + public static final int UC_RISCV_REG_HCOUNTEREN = 151; + public static final int UC_RISCV_REG_HTVAL = 152; + public static final int UC_RISCV_REG_HIP = 153; + public static final int UC_RISCV_REG_HTINST = 154; + public static final int UC_RISCV_REG_HGATP = 155; + public static final int UC_RISCV_REG_HTIMEDELTA = 156; + public static final int UC_RISCV_REG_HTIMEDELTAH = 157; + // Floating-point registers - public static final int UC_RISCV_REG_F0 = 33; - public static final int UC_RISCV_REG_F1 = 34; - public static final int UC_RISCV_REG_F2 = 35; - public static final int UC_RISCV_REG_F3 = 36; - public static final int UC_RISCV_REG_F4 = 37; - public static final int UC_RISCV_REG_F5 = 38; - public static final int UC_RISCV_REG_F6 = 39; - public static final int UC_RISCV_REG_F7 = 40; - public static final int UC_RISCV_REG_F8 = 41; - public static final int UC_RISCV_REG_F9 = 42; - public static final int UC_RISCV_REG_F10 = 43; - public static final int UC_RISCV_REG_F11 = 44; - public static final int UC_RISCV_REG_F12 = 45; - public static final int UC_RISCV_REG_F13 = 46; - public static final int UC_RISCV_REG_F14 = 47; - public static final int UC_RISCV_REG_F15 = 48; - public static final int UC_RISCV_REG_F16 = 49; - public static final int UC_RISCV_REG_F17 = 50; - public static final int UC_RISCV_REG_F18 = 51; - public static final int UC_RISCV_REG_F19 = 52; - public static final int UC_RISCV_REG_F20 = 53; - public static final int UC_RISCV_REG_F21 = 54; - public static final int UC_RISCV_REG_F22 = 55; - public static final int UC_RISCV_REG_F23 = 56; - public static final int UC_RISCV_REG_F24 = 57; - public static final int UC_RISCV_REG_F25 = 58; - public static final int UC_RISCV_REG_F26 = 59; - public static final int UC_RISCV_REG_F27 = 60; - public static final int UC_RISCV_REG_F28 = 61; - public static final int UC_RISCV_REG_F29 = 62; - public static final int UC_RISCV_REG_F30 = 63; - public static final int UC_RISCV_REG_F31 = 64; - public static final int UC_RISCV_REG_PC = 65; - public static final int UC_RISCV_REG_ENDING = 66; + public static final int UC_RISCV_REG_F0 = 158; + public static final int UC_RISCV_REG_F1 = 159; + public static final int UC_RISCV_REG_F2 = 160; + public static final int UC_RISCV_REG_F3 = 161; + public static final int UC_RISCV_REG_F4 = 162; + public static final int UC_RISCV_REG_F5 = 163; + public static final int UC_RISCV_REG_F6 = 164; + public static final int UC_RISCV_REG_F7 = 165; + public static final int UC_RISCV_REG_F8 = 166; + public static final int UC_RISCV_REG_F9 = 167; + public static final int UC_RISCV_REG_F10 = 168; + public static final int UC_RISCV_REG_F11 = 169; + public static final int UC_RISCV_REG_F12 = 170; + public static final int UC_RISCV_REG_F13 = 171; + public static final int UC_RISCV_REG_F14 = 172; + public static final int UC_RISCV_REG_F15 = 173; + public static final int UC_RISCV_REG_F16 = 174; + public static final int UC_RISCV_REG_F17 = 175; + public static final int UC_RISCV_REG_F18 = 176; + public static final int UC_RISCV_REG_F19 = 177; + public static final int UC_RISCV_REG_F20 = 178; + public static final int UC_RISCV_REG_F21 = 179; + public static final int UC_RISCV_REG_F22 = 180; + public static final int UC_RISCV_REG_F23 = 181; + public static final int UC_RISCV_REG_F24 = 182; + public static final int UC_RISCV_REG_F25 = 183; + public static final int UC_RISCV_REG_F26 = 184; + public static final int UC_RISCV_REG_F27 = 185; + public static final int UC_RISCV_REG_F28 = 186; + public static final int UC_RISCV_REG_F29 = 187; + public static final int UC_RISCV_REG_F30 = 188; + public static final int UC_RISCV_REG_F31 = 189; + public static final int UC_RISCV_REG_PC = 190; + public static final int UC_RISCV_REG_ENDING = 191; // Alias registers public static final int UC_RISCV_REG_ZERO = 1; @@ -126,37 +253,37 @@ public interface RiscvConst { public static final int UC_RISCV_REG_T4 = 30; public static final int UC_RISCV_REG_T5 = 31; public static final int UC_RISCV_REG_T6 = 32; - public static final int UC_RISCV_REG_FT0 = 33; - public static final int UC_RISCV_REG_FT1 = 34; - public static final int UC_RISCV_REG_FT2 = 35; - public static final int UC_RISCV_REG_FT3 = 36; - public static final int UC_RISCV_REG_FT4 = 37; - public static final int UC_RISCV_REG_FT5 = 38; - public static final int UC_RISCV_REG_FT6 = 39; - public static final int UC_RISCV_REG_FT7 = 40; - public static final int UC_RISCV_REG_FS0 = 41; - public static final int UC_RISCV_REG_FS1 = 42; - public static final int UC_RISCV_REG_FA0 = 43; - public static final int UC_RISCV_REG_FA1 = 44; - public static final int UC_RISCV_REG_FA2 = 45; - public static final int UC_RISCV_REG_FA3 = 46; - public static final int UC_RISCV_REG_FA4 = 47; - public static final int UC_RISCV_REG_FA5 = 48; - public static final int UC_RISCV_REG_FA6 = 49; - public static final int UC_RISCV_REG_FA7 = 50; - public static final int UC_RISCV_REG_FS2 = 51; - public static final int UC_RISCV_REG_FS3 = 52; - public static final int UC_RISCV_REG_FS4 = 53; - public static final int UC_RISCV_REG_FS5 = 54; - public static final int UC_RISCV_REG_FS6 = 55; - public static final int UC_RISCV_REG_FS7 = 56; - public static final int UC_RISCV_REG_FS8 = 57; - public static final int UC_RISCV_REG_FS9 = 58; - public static final int UC_RISCV_REG_FS10 = 59; - public static final int UC_RISCV_REG_FS11 = 60; - public static final int UC_RISCV_REG_FT8 = 61; - public static final int UC_RISCV_REG_FT9 = 62; - public static final int UC_RISCV_REG_FT10 = 63; - public static final int UC_RISCV_REG_FT11 = 64; + public static final int UC_RISCV_REG_FT0 = 158; + public static final int UC_RISCV_REG_FT1 = 159; + public static final int UC_RISCV_REG_FT2 = 160; + public static final int UC_RISCV_REG_FT3 = 161; + public static final int UC_RISCV_REG_FT4 = 162; + public static final int UC_RISCV_REG_FT5 = 163; + public static final int UC_RISCV_REG_FT6 = 164; + public static final int UC_RISCV_REG_FT7 = 165; + public static final int UC_RISCV_REG_FS0 = 166; + public static final int UC_RISCV_REG_FS1 = 167; + public static final int UC_RISCV_REG_FA0 = 168; + public static final int UC_RISCV_REG_FA1 = 169; + public static final int UC_RISCV_REG_FA2 = 170; + public static final int UC_RISCV_REG_FA3 = 171; + public static final int UC_RISCV_REG_FA4 = 172; + public static final int UC_RISCV_REG_FA5 = 173; + public static final int UC_RISCV_REG_FA6 = 174; + public static final int UC_RISCV_REG_FA7 = 175; + public static final int UC_RISCV_REG_FS2 = 176; + public static final int UC_RISCV_REG_FS3 = 177; + public static final int UC_RISCV_REG_FS4 = 178; + public static final int UC_RISCV_REG_FS5 = 179; + public static final int UC_RISCV_REG_FS6 = 180; + public static final int UC_RISCV_REG_FS7 = 181; + public static final int UC_RISCV_REG_FS8 = 182; + public static final int UC_RISCV_REG_FS9 = 183; + public static final int UC_RISCV_REG_FS10 = 184; + public static final int UC_RISCV_REG_FS11 = 185; + public static final int UC_RISCV_REG_FT8 = 186; + public static final int UC_RISCV_REG_FT9 = 187; + public static final int UC_RISCV_REG_FT10 = 188; + public static final int UC_RISCV_REG_FT11 = 189; } diff --git a/bindings/java/unicorn/S390xConst.java b/bindings/java/unicorn/S390xConst.java new file mode 100644 index 00000000..cc2b2a1b --- /dev/null +++ b/bindings/java/unicorn/S390xConst.java @@ -0,0 +1,126 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface S390xConst { + +// S390X CPU + + public static final int UC_CPU_S390X_Z900 = 0; + public static final int UC_CPU_S390X_Z900_2 = 1; + public static final int UC_CPU_S390X_Z900_3 = 2; + public static final int UC_CPU_S390X_Z800 = 3; + public static final int UC_CPU_S390X_Z990 = 4; + public static final int UC_CPU_S390X_Z990_2 = 5; + public static final int UC_CPU_S390X_Z990_3 = 6; + public static final int UC_CPU_S390X_Z890 = 7; + public static final int UC_CPU_S390X_Z990_4 = 8; + public static final int UC_CPU_S390X_Z890_2 = 9; + public static final int UC_CPU_S390X_Z990_5 = 10; + public static final int UC_CPU_S390X_Z890_3 = 11; + public static final int UC_CPU_S390X_Z9EC = 12; + public static final int UC_CPU_S390X_Z9EC_2 = 13; + public static final int UC_CPU_S390X_Z9BC = 14; + public static final int UC_CPU_S390X_Z9EC_3 = 15; + public static final int UC_CPU_S390X_Z9BC_2 = 16; + public static final int UC_CPU_S390X_Z10EC = 17; + public static final int UC_CPU_S390X_Z10EC_2 = 18; + public static final int UC_CPU_S390X_Z10BC = 19; + public static final int UC_CPU_S390X_Z10EC_3 = 20; + public static final int UC_CPU_S390X_Z10BC_2 = 21; + public static final int UC_CPU_S390X_Z196 = 22; + public static final int UC_CPU_S390X_Z196_2 = 23; + public static final int UC_CPU_S390X_Z114 = 24; + public static final int UC_CPU_S390X_ZEC12 = 25; + public static final int UC_CPU_S390X_ZEC12_2 = 26; + public static final int UC_CPU_S390X_ZBC12 = 27; + public static final int UC_CPU_S390X_Z13 = 28; + public static final int UC_CPU_S390X_Z13_2 = 29; + public static final int UC_CPU_S390X_Z13S = 30; + public static final int UC_CPU_S390X_Z14 = 31; + public static final int UC_CPU_S390X_Z14_2 = 32; + public static final int UC_CPU_S390X_Z14ZR1 = 33; + public static final int UC_CPU_S390X_GEN15A = 34; + public static final int UC_CPU_S390X_GEN15B = 35; + public static final int UC_CPU_S390X_QEMU = 36; + public static final int UC_CPU_S390X_MAX = 37; + +// S390X registers + + public static final int UC_S390X_REG_INVALID = 0; + +// General purpose registers + public static final int UC_S390X_REG_R0 = 1; + public static final int UC_S390X_REG_R1 = 2; + public static final int UC_S390X_REG_R2 = 3; + public static final int UC_S390X_REG_R3 = 4; + public static final int UC_S390X_REG_R4 = 5; + public static final int UC_S390X_REG_R5 = 6; + public static final int UC_S390X_REG_R6 = 7; + public static final int UC_S390X_REG_R7 = 8; + public static final int UC_S390X_REG_R8 = 9; + public static final int UC_S390X_REG_R9 = 10; + public static final int UC_S390X_REG_R10 = 11; + public static final int UC_S390X_REG_R11 = 12; + public static final int UC_S390X_REG_R12 = 13; + public static final int UC_S390X_REG_R13 = 14; + public static final int UC_S390X_REG_R14 = 15; + public static final int UC_S390X_REG_R15 = 16; + +// Floating point registers + public static final int UC_S390X_REG_F0 = 17; + public static final int UC_S390X_REG_F1 = 18; + public static final int UC_S390X_REG_F2 = 19; + public static final int UC_S390X_REG_F3 = 20; + public static final int UC_S390X_REG_F4 = 21; + public static final int UC_S390X_REG_F5 = 22; + public static final int UC_S390X_REG_F6 = 23; + public static final int UC_S390X_REG_F7 = 24; + public static final int UC_S390X_REG_F8 = 25; + public static final int UC_S390X_REG_F9 = 26; + public static final int UC_S390X_REG_F10 = 27; + public static final int UC_S390X_REG_F11 = 28; + public static final int UC_S390X_REG_F12 = 29; + public static final int UC_S390X_REG_F13 = 30; + public static final int UC_S390X_REG_F14 = 31; + public static final int UC_S390X_REG_F15 = 32; + public static final int UC_S390X_REG_F16 = 33; + public static final int UC_S390X_REG_F17 = 34; + public static final int UC_S390X_REG_F18 = 35; + public static final int UC_S390X_REG_F19 = 36; + public static final int UC_S390X_REG_F20 = 37; + public static final int UC_S390X_REG_F21 = 38; + public static final int UC_S390X_REG_F22 = 39; + public static final int UC_S390X_REG_F23 = 40; + public static final int UC_S390X_REG_F24 = 41; + public static final int UC_S390X_REG_F25 = 42; + public static final int UC_S390X_REG_F26 = 43; + public static final int UC_S390X_REG_F27 = 44; + public static final int UC_S390X_REG_F28 = 45; + public static final int UC_S390X_REG_F29 = 46; + public static final int UC_S390X_REG_F30 = 47; + public static final int UC_S390X_REG_F31 = 48; + +// Access registers + public static final int UC_S390X_REG_A0 = 49; + public static final int UC_S390X_REG_A1 = 50; + public static final int UC_S390X_REG_A2 = 51; + public static final int UC_S390X_REG_A3 = 52; + public static final int UC_S390X_REG_A4 = 53; + public static final int UC_S390X_REG_A5 = 54; + public static final int UC_S390X_REG_A6 = 55; + public static final int UC_S390X_REG_A7 = 56; + public static final int UC_S390X_REG_A8 = 57; + public static final int UC_S390X_REG_A9 = 58; + public static final int UC_S390X_REG_A10 = 59; + public static final int UC_S390X_REG_A11 = 60; + public static final int UC_S390X_REG_A12 = 61; + public static final int UC_S390X_REG_A13 = 62; + public static final int UC_S390X_REG_A14 = 63; + public static final int UC_S390X_REG_A15 = 64; + public static final int UC_S390X_REG_PC = 65; + public static final int UC_S390X_REG_ENDING = 66; + +// Alias registers + +} diff --git a/bindings/java/unicorn/UnicornConst.java b/bindings/java/unicorn/UnicornConst.java index 73cc4bff..4d8554f6 100644 --- a/bindings/java/unicorn/UnicornConst.java +++ b/bindings/java/unicorn/UnicornConst.java @@ -6,11 +6,15 @@ public interface UnicornConst { public static final int UC_API_MAJOR = 2; public static final int UC_API_MINOR = 0; + + public static final int UC_API_PATCH = 0; + public static final int UC_API_EXTRA = 5; public static final int UC_VERSION_MAJOR = 2; public static final int UC_VERSION_MINOR = 0; - public static final int UC_VERSION_EXTRA = 0; + public static final int UC_VERSION_PATCH = 0; + public static final int UC_VERSION_EXTRA = 5; public static final int UC_SECOND_SCALE = 1000000; public static final int UC_MILISECOND_SCALE = 1000; public static final int UC_ARCH_ARM = 1; @@ -21,7 +25,8 @@ public interface UnicornConst { public static final int UC_ARCH_SPARC = 6; public static final int UC_ARCH_M68K = 7; public static final int UC_ARCH_RISCV = 8; - public static final int UC_ARCH_MAX = 9; + public static final int UC_ARCH_S390X = 9; + public static final int UC_ARCH_MAX = 10; public static final int UC_MODE_LITTLE_ENDIAN = 0; public static final int UC_MODE_BIG_ENDIAN = 1073741824; diff --git a/bindings/pascal/unicorn/M68kConst.pas b/bindings/pascal/unicorn/M68kConst.pas index 072ed476..587ee15a 100644 --- a/bindings/pascal/unicorn/M68kConst.pas +++ b/bindings/pascal/unicorn/M68kConst.pas @@ -7,15 +7,15 @@ interface const // M68K CPU - UC_CPU_M5206_CPU = 0; - UC_CPU_M68000_CPU = 1; - UC_CPU_M68020_CPU = 2; - UC_CPU_M68030_CPU = 3; - UC_CPU_M68040_CPU = 4; - UC_CPU_M68060_CPU = 5; - UC_CPU_M5208_CPU = 6; - UC_CPU_CFV4E_CPU = 7; - UC_CPU_ANY_CPU = 8; + UC_CPU_M68K_M5206 = 0; + UC_CPU_M68K_M68000 = 1; + UC_CPU_M68K_M68020 = 2; + UC_CPU_M68K_M68030 = 3; + UC_CPU_M68K_M68040 = 4; + UC_CPU_M68K_M68060 = 5; + UC_CPU_M68K_M5208 = 6; + UC_CPU_M68K_CFV4E = 7; + UC_CPU_M68K_ANY = 8; // M68K registers diff --git a/bindings/pascal/unicorn/RiscvConst.pas b/bindings/pascal/unicorn/RiscvConst.pas index 7420f59f..39dcb405 100644 --- a/bindings/pascal/unicorn/RiscvConst.pas +++ b/bindings/pascal/unicorn/RiscvConst.pas @@ -57,41 +57,168 @@ const UC_RISCV_REG_X30 = 31; UC_RISCV_REG_X31 = 32; +// RISCV CSR + UC_RISCV_REG_USTATUS = 33; + UC_RISCV_REG_UIE = 34; + UC_RISCV_REG_UTVEC = 35; + UC_RISCV_REG_USCRATCH = 36; + UC_RISCV_REG_UEPC = 37; + UC_RISCV_REG_UCAUSE = 38; + UC_RISCV_REG_UTVAL = 39; + UC_RISCV_REG_UIP = 40; + UC_RISCV_REG_FFLAGS = 41; + UC_RISCV_REG_FRM = 42; + UC_RISCV_REG_FCSR = 43; + UC_RISCV_REG_CYCLE = 44; + UC_RISCV_REG_TIME = 45; + UC_RISCV_REG_INSTRET = 46; + UC_RISCV_REG_HPMCOUNTER3 = 47; + UC_RISCV_REG_HPMCOUNTER4 = 48; + UC_RISCV_REG_HPMCOUNTER5 = 49; + UC_RISCV_REG_HPMCOUNTER6 = 50; + UC_RISCV_REG_HPMCOUNTER7 = 51; + UC_RISCV_REG_HPMCOUNTER8 = 52; + UC_RISCV_REG_HPMCOUNTER9 = 53; + UC_RISCV_REG_HPMCOUNTER10 = 54; + UC_RISCV_REG_HPMCOUNTER11 = 55; + UC_RISCV_REG_HPMCOUNTER12 = 56; + UC_RISCV_REG_HPMCOUNTER13 = 57; + UC_RISCV_REG_HPMCOUNTER14 = 58; + UC_RISCV_REG_HPMCOUNTER15 = 59; + UC_RISCV_REG_HPMCOUNTER16 = 60; + UC_RISCV_REG_HPMCOUNTER17 = 61; + UC_RISCV_REG_HPMCOUNTER18 = 62; + UC_RISCV_REG_HPMCOUNTER19 = 63; + UC_RISCV_REG_HPMCOUNTER20 = 64; + UC_RISCV_REG_HPMCOUNTER21 = 65; + UC_RISCV_REG_HPMCOUNTER22 = 66; + UC_RISCV_REG_HPMCOUNTER23 = 67; + UC_RISCV_REG_HPMCOUNTER24 = 68; + UC_RISCV_REG_HPMCOUNTER25 = 69; + UC_RISCV_REG_HPMCOUNTER26 = 70; + UC_RISCV_REG_HPMCOUNTER27 = 71; + UC_RISCV_REG_HPMCOUNTER28 = 72; + UC_RISCV_REG_HPMCOUNTER29 = 73; + UC_RISCV_REG_HPMCOUNTER30 = 74; + UC_RISCV_REG_HPMCOUNTER31 = 75; + UC_RISCV_REG_CYCLEH = 76; + UC_RISCV_REG_TIMEH = 77; + UC_RISCV_REG_INSTRETH = 78; + UC_RISCV_REG_HPMCOUNTER3H = 79; + UC_RISCV_REG_HPMCOUNTER4H = 80; + UC_RISCV_REG_HPMCOUNTER5H = 81; + UC_RISCV_REG_HPMCOUNTER6H = 82; + UC_RISCV_REG_HPMCOUNTER7H = 83; + UC_RISCV_REG_HPMCOUNTER8H = 84; + UC_RISCV_REG_HPMCOUNTER9H = 85; + UC_RISCV_REG_HPMCOUNTER10H = 86; + UC_RISCV_REG_HPMCOUNTER11H = 87; + UC_RISCV_REG_HPMCOUNTER12H = 88; + UC_RISCV_REG_HPMCOUNTER13H = 89; + UC_RISCV_REG_HPMCOUNTER14H = 90; + UC_RISCV_REG_HPMCOUNTER15H = 91; + UC_RISCV_REG_HPMCOUNTER16H = 92; + UC_RISCV_REG_HPMCOUNTER17H = 93; + UC_RISCV_REG_HPMCOUNTER18H = 94; + UC_RISCV_REG_HPMCOUNTER19H = 95; + UC_RISCV_REG_HPMCOUNTER20H = 96; + UC_RISCV_REG_HPMCOUNTER21H = 97; + UC_RISCV_REG_HPMCOUNTER22H = 98; + UC_RISCV_REG_HPMCOUNTER23H = 99; + UC_RISCV_REG_HPMCOUNTER24H = 100; + UC_RISCV_REG_HPMCOUNTER25H = 101; + UC_RISCV_REG_HPMCOUNTER26H = 102; + UC_RISCV_REG_HPMCOUNTER27H = 103; + UC_RISCV_REG_HPMCOUNTER28H = 104; + UC_RISCV_REG_HPMCOUNTER29H = 105; + UC_RISCV_REG_HPMCOUNTER30H = 106; + UC_RISCV_REG_HPMCOUNTER31H = 107; + UC_RISCV_REG_MCYCLE = 108; + UC_RISCV_REG_MINSTRET = 109; + UC_RISCV_REG_MCYCLEH = 110; + UC_RISCV_REG_MINSTRETH = 111; + UC_RISCV_REG_MVENDORID = 112; + UC_RISCV_REG_MARCHID = 113; + UC_RISCV_REG_MIMPID = 114; + UC_RISCV_REG_MHARTID = 115; + UC_RISCV_REG_MSTATUS = 116; + UC_RISCV_REG_MISA = 117; + UC_RISCV_REG_MEDELEG = 118; + UC_RISCV_REG_MIDELEG = 119; + UC_RISCV_REG_MIE = 120; + UC_RISCV_REG_MTVEC = 121; + UC_RISCV_REG_MCOUNTEREN = 122; + UC_RISCV_REG_MSTATUSH = 123; + UC_RISCV_REG_MUCOUNTEREN = 124; + UC_RISCV_REG_MSCOUNTEREN = 125; + UC_RISCV_REG_MHCOUNTEREN = 126; + UC_RISCV_REG_MSCRATCH = 127; + UC_RISCV_REG_MEPC = 128; + UC_RISCV_REG_MCAUSE = 129; + UC_RISCV_REG_MTVAL = 130; + UC_RISCV_REG_MIP = 131; + UC_RISCV_REG_MBADADDR = 132; + UC_RISCV_REG_SSTATUS = 133; + UC_RISCV_REG_SEDELEG = 134; + UC_RISCV_REG_SIDELEG = 135; + UC_RISCV_REG_SIE = 136; + UC_RISCV_REG_STVEC = 137; + UC_RISCV_REG_SCOUNTEREN = 138; + UC_RISCV_REG_SSCRATCH = 139; + UC_RISCV_REG_SEPC = 140; + UC_RISCV_REG_SCAUSE = 141; + UC_RISCV_REG_STVAL = 142; + UC_RISCV_REG_SIP = 143; + UC_RISCV_REG_SBADADDR = 144; + UC_RISCV_REG_SPTBR = 145; + UC_RISCV_REG_SATP = 146; + UC_RISCV_REG_HSTATUS = 147; + UC_RISCV_REG_HEDELEG = 148; + UC_RISCV_REG_HIDELEG = 149; + UC_RISCV_REG_HIE = 150; + UC_RISCV_REG_HCOUNTEREN = 151; + UC_RISCV_REG_HTVAL = 152; + UC_RISCV_REG_HIP = 153; + UC_RISCV_REG_HTINST = 154; + UC_RISCV_REG_HGATP = 155; + UC_RISCV_REG_HTIMEDELTA = 156; + UC_RISCV_REG_HTIMEDELTAH = 157; + // Floating-point registers - UC_RISCV_REG_F0 = 33; - UC_RISCV_REG_F1 = 34; - UC_RISCV_REG_F2 = 35; - UC_RISCV_REG_F3 = 36; - UC_RISCV_REG_F4 = 37; - UC_RISCV_REG_F5 = 38; - UC_RISCV_REG_F6 = 39; - UC_RISCV_REG_F7 = 40; - UC_RISCV_REG_F8 = 41; - UC_RISCV_REG_F9 = 42; - UC_RISCV_REG_F10 = 43; - UC_RISCV_REG_F11 = 44; - UC_RISCV_REG_F12 = 45; - UC_RISCV_REG_F13 = 46; - UC_RISCV_REG_F14 = 47; - UC_RISCV_REG_F15 = 48; - UC_RISCV_REG_F16 = 49; - UC_RISCV_REG_F17 = 50; - UC_RISCV_REG_F18 = 51; - UC_RISCV_REG_F19 = 52; - UC_RISCV_REG_F20 = 53; - UC_RISCV_REG_F21 = 54; - UC_RISCV_REG_F22 = 55; - UC_RISCV_REG_F23 = 56; - UC_RISCV_REG_F24 = 57; - UC_RISCV_REG_F25 = 58; - UC_RISCV_REG_F26 = 59; - UC_RISCV_REG_F27 = 60; - UC_RISCV_REG_F28 = 61; - UC_RISCV_REG_F29 = 62; - UC_RISCV_REG_F30 = 63; - UC_RISCV_REG_F31 = 64; - UC_RISCV_REG_PC = 65; - UC_RISCV_REG_ENDING = 66; + UC_RISCV_REG_F0 = 158; + UC_RISCV_REG_F1 = 159; + UC_RISCV_REG_F2 = 160; + UC_RISCV_REG_F3 = 161; + UC_RISCV_REG_F4 = 162; + UC_RISCV_REG_F5 = 163; + UC_RISCV_REG_F6 = 164; + UC_RISCV_REG_F7 = 165; + UC_RISCV_REG_F8 = 166; + UC_RISCV_REG_F9 = 167; + UC_RISCV_REG_F10 = 168; + UC_RISCV_REG_F11 = 169; + UC_RISCV_REG_F12 = 170; + UC_RISCV_REG_F13 = 171; + UC_RISCV_REG_F14 = 172; + UC_RISCV_REG_F15 = 173; + UC_RISCV_REG_F16 = 174; + UC_RISCV_REG_F17 = 175; + UC_RISCV_REG_F18 = 176; + UC_RISCV_REG_F19 = 177; + UC_RISCV_REG_F20 = 178; + UC_RISCV_REG_F21 = 179; + UC_RISCV_REG_F22 = 180; + UC_RISCV_REG_F23 = 181; + UC_RISCV_REG_F24 = 182; + UC_RISCV_REG_F25 = 183; + UC_RISCV_REG_F26 = 184; + UC_RISCV_REG_F27 = 185; + UC_RISCV_REG_F28 = 186; + UC_RISCV_REG_F29 = 187; + UC_RISCV_REG_F30 = 188; + UC_RISCV_REG_F31 = 189; + UC_RISCV_REG_PC = 190; + UC_RISCV_REG_ENDING = 191; // Alias registers UC_RISCV_REG_ZERO = 1; @@ -127,38 +254,38 @@ const UC_RISCV_REG_T4 = 30; UC_RISCV_REG_T5 = 31; UC_RISCV_REG_T6 = 32; - UC_RISCV_REG_FT0 = 33; - UC_RISCV_REG_FT1 = 34; - UC_RISCV_REG_FT2 = 35; - UC_RISCV_REG_FT3 = 36; - UC_RISCV_REG_FT4 = 37; - UC_RISCV_REG_FT5 = 38; - UC_RISCV_REG_FT6 = 39; - UC_RISCV_REG_FT7 = 40; - UC_RISCV_REG_FS0 = 41; - UC_RISCV_REG_FS1 = 42; - UC_RISCV_REG_FA0 = 43; - UC_RISCV_REG_FA1 = 44; - UC_RISCV_REG_FA2 = 45; - UC_RISCV_REG_FA3 = 46; - UC_RISCV_REG_FA4 = 47; - UC_RISCV_REG_FA5 = 48; - UC_RISCV_REG_FA6 = 49; - UC_RISCV_REG_FA7 = 50; - UC_RISCV_REG_FS2 = 51; - UC_RISCV_REG_FS3 = 52; - UC_RISCV_REG_FS4 = 53; - UC_RISCV_REG_FS5 = 54; - UC_RISCV_REG_FS6 = 55; - UC_RISCV_REG_FS7 = 56; - UC_RISCV_REG_FS8 = 57; - UC_RISCV_REG_FS9 = 58; - UC_RISCV_REG_FS10 = 59; - UC_RISCV_REG_FS11 = 60; - UC_RISCV_REG_FT8 = 61; - UC_RISCV_REG_FT9 = 62; - UC_RISCV_REG_FT10 = 63; - UC_RISCV_REG_FT11 = 64; + UC_RISCV_REG_FT0 = 158; + UC_RISCV_REG_FT1 = 159; + UC_RISCV_REG_FT2 = 160; + UC_RISCV_REG_FT3 = 161; + UC_RISCV_REG_FT4 = 162; + UC_RISCV_REG_FT5 = 163; + UC_RISCV_REG_FT6 = 164; + UC_RISCV_REG_FT7 = 165; + UC_RISCV_REG_FS0 = 166; + UC_RISCV_REG_FS1 = 167; + UC_RISCV_REG_FA0 = 168; + UC_RISCV_REG_FA1 = 169; + UC_RISCV_REG_FA2 = 170; + UC_RISCV_REG_FA3 = 171; + UC_RISCV_REG_FA4 = 172; + UC_RISCV_REG_FA5 = 173; + UC_RISCV_REG_FA6 = 174; + UC_RISCV_REG_FA7 = 175; + UC_RISCV_REG_FS2 = 176; + UC_RISCV_REG_FS3 = 177; + UC_RISCV_REG_FS4 = 178; + UC_RISCV_REG_FS5 = 179; + UC_RISCV_REG_FS6 = 180; + UC_RISCV_REG_FS7 = 181; + UC_RISCV_REG_FS8 = 182; + UC_RISCV_REG_FS9 = 183; + UC_RISCV_REG_FS10 = 184; + UC_RISCV_REG_FS11 = 185; + UC_RISCV_REG_FT8 = 186; + UC_RISCV_REG_FT9 = 187; + UC_RISCV_REG_FT10 = 188; + UC_RISCV_REG_FT11 = 189; implementation end. \ No newline at end of file diff --git a/bindings/pascal/unicorn/S390xConst.pas b/bindings/pascal/unicorn/S390xConst.pas new file mode 100644 index 00000000..8041a19b --- /dev/null +++ b/bindings/pascal/unicorn/S390xConst.pas @@ -0,0 +1,128 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit S390xConst; + +interface + +const +// S390X CPU + + UC_CPU_S390X_Z900 = 0; + UC_CPU_S390X_Z900_2 = 1; + UC_CPU_S390X_Z900_3 = 2; + UC_CPU_S390X_Z800 = 3; + UC_CPU_S390X_Z990 = 4; + UC_CPU_S390X_Z990_2 = 5; + UC_CPU_S390X_Z990_3 = 6; + UC_CPU_S390X_Z890 = 7; + UC_CPU_S390X_Z990_4 = 8; + UC_CPU_S390X_Z890_2 = 9; + UC_CPU_S390X_Z990_5 = 10; + UC_CPU_S390X_Z890_3 = 11; + UC_CPU_S390X_Z9EC = 12; + UC_CPU_S390X_Z9EC_2 = 13; + UC_CPU_S390X_Z9BC = 14; + UC_CPU_S390X_Z9EC_3 = 15; + UC_CPU_S390X_Z9BC_2 = 16; + UC_CPU_S390X_Z10EC = 17; + UC_CPU_S390X_Z10EC_2 = 18; + UC_CPU_S390X_Z10BC = 19; + UC_CPU_S390X_Z10EC_3 = 20; + UC_CPU_S390X_Z10BC_2 = 21; + UC_CPU_S390X_Z196 = 22; + UC_CPU_S390X_Z196_2 = 23; + UC_CPU_S390X_Z114 = 24; + UC_CPU_S390X_ZEC12 = 25; + UC_CPU_S390X_ZEC12_2 = 26; + UC_CPU_S390X_ZBC12 = 27; + UC_CPU_S390X_Z13 = 28; + UC_CPU_S390X_Z13_2 = 29; + UC_CPU_S390X_Z13S = 30; + UC_CPU_S390X_Z14 = 31; + UC_CPU_S390X_Z14_2 = 32; + UC_CPU_S390X_Z14ZR1 = 33; + UC_CPU_S390X_GEN15A = 34; + UC_CPU_S390X_GEN15B = 35; + UC_CPU_S390X_QEMU = 36; + UC_CPU_S390X_MAX = 37; + +// S390X registers + + UC_S390X_REG_INVALID = 0; + +// General purpose registers + UC_S390X_REG_R0 = 1; + UC_S390X_REG_R1 = 2; + UC_S390X_REG_R2 = 3; + UC_S390X_REG_R3 = 4; + UC_S390X_REG_R4 = 5; + UC_S390X_REG_R5 = 6; + UC_S390X_REG_R6 = 7; + UC_S390X_REG_R7 = 8; + UC_S390X_REG_R8 = 9; + UC_S390X_REG_R9 = 10; + UC_S390X_REG_R10 = 11; + UC_S390X_REG_R11 = 12; + UC_S390X_REG_R12 = 13; + UC_S390X_REG_R13 = 14; + UC_S390X_REG_R14 = 15; + UC_S390X_REG_R15 = 16; + +// Floating point registers + UC_S390X_REG_F0 = 17; + UC_S390X_REG_F1 = 18; + UC_S390X_REG_F2 = 19; + UC_S390X_REG_F3 = 20; + UC_S390X_REG_F4 = 21; + UC_S390X_REG_F5 = 22; + UC_S390X_REG_F6 = 23; + UC_S390X_REG_F7 = 24; + UC_S390X_REG_F8 = 25; + UC_S390X_REG_F9 = 26; + UC_S390X_REG_F10 = 27; + UC_S390X_REG_F11 = 28; + UC_S390X_REG_F12 = 29; + UC_S390X_REG_F13 = 30; + UC_S390X_REG_F14 = 31; + UC_S390X_REG_F15 = 32; + UC_S390X_REG_F16 = 33; + UC_S390X_REG_F17 = 34; + UC_S390X_REG_F18 = 35; + UC_S390X_REG_F19 = 36; + UC_S390X_REG_F20 = 37; + UC_S390X_REG_F21 = 38; + UC_S390X_REG_F22 = 39; + UC_S390X_REG_F23 = 40; + UC_S390X_REG_F24 = 41; + UC_S390X_REG_F25 = 42; + UC_S390X_REG_F26 = 43; + UC_S390X_REG_F27 = 44; + UC_S390X_REG_F28 = 45; + UC_S390X_REG_F29 = 46; + UC_S390X_REG_F30 = 47; + UC_S390X_REG_F31 = 48; + +// Access registers + UC_S390X_REG_A0 = 49; + UC_S390X_REG_A1 = 50; + UC_S390X_REG_A2 = 51; + UC_S390X_REG_A3 = 52; + UC_S390X_REG_A4 = 53; + UC_S390X_REG_A5 = 54; + UC_S390X_REG_A6 = 55; + UC_S390X_REG_A7 = 56; + UC_S390X_REG_A8 = 57; + UC_S390X_REG_A9 = 58; + UC_S390X_REG_A10 = 59; + UC_S390X_REG_A11 = 60; + UC_S390X_REG_A12 = 61; + UC_S390X_REG_A13 = 62; + UC_S390X_REG_A14 = 63; + UC_S390X_REG_A15 = 64; + UC_S390X_REG_PC = 65; + UC_S390X_REG_ENDING = 66; + +// Alias registers + +implementation +end. \ No newline at end of file diff --git a/bindings/pascal/unicorn/UnicornConst.pas b/bindings/pascal/unicorn/UnicornConst.pas index af67d5de..4ae08515 100644 --- a/bindings/pascal/unicorn/UnicornConst.pas +++ b/bindings/pascal/unicorn/UnicornConst.pas @@ -7,11 +7,15 @@ interface const UC_API_MAJOR = 2; UC_API_MINOR = 0; + + UC_API_PATCH = 0; + UC_API_EXTRA = 5; UC_VERSION_MAJOR = 2; UC_VERSION_MINOR = 0; - UC_VERSION_EXTRA = 0; + UC_VERSION_PATCH = 0; + UC_VERSION_EXTRA = 5; UC_SECOND_SCALE = 1000000; UC_MILISECOND_SCALE = 1000; UC_ARCH_ARM = 1; @@ -22,7 +26,8 @@ const UC_API_MAJOR = 2; UC_ARCH_SPARC = 6; UC_ARCH_M68K = 7; UC_ARCH_RISCV = 8; - UC_ARCH_MAX = 9; + UC_ARCH_S390X = 9; + UC_ARCH_MAX = 10; UC_MODE_LITTLE_ENDIAN = 0; UC_MODE_BIG_ENDIAN = 1073741824; diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/m68k_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/m68k_const.rb index b4e72a04..dd59f78c 100644 --- a/bindings/ruby/unicorn_gem/lib/unicorn_engine/m68k_const.rb +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/m68k_const.rb @@ -4,15 +4,15 @@ module UnicornEngine # M68K CPU - UC_CPU_M5206_CPU = 0 - UC_CPU_M68000_CPU = 1 - UC_CPU_M68020_CPU = 2 - UC_CPU_M68030_CPU = 3 - UC_CPU_M68040_CPU = 4 - UC_CPU_M68060_CPU = 5 - UC_CPU_M5208_CPU = 6 - UC_CPU_CFV4E_CPU = 7 - UC_CPU_ANY_CPU = 8 + UC_CPU_M68K_M5206 = 0 + UC_CPU_M68K_M68000 = 1 + UC_CPU_M68K_M68020 = 2 + UC_CPU_M68K_M68030 = 3 + UC_CPU_M68K_M68040 = 4 + UC_CPU_M68K_M68060 = 5 + UC_CPU_M68K_M5208 = 6 + UC_CPU_M68K_CFV4E = 7 + UC_CPU_M68K_ANY = 8 # M68K registers diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb index 636686e2..59623bcf 100644 --- a/bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/riscv_const.rb @@ -54,41 +54,168 @@ module UnicornEngine UC_RISCV_REG_X30 = 31 UC_RISCV_REG_X31 = 32 +# RISCV CSR + UC_RISCV_REG_USTATUS = 33 + UC_RISCV_REG_UIE = 34 + UC_RISCV_REG_UTVEC = 35 + UC_RISCV_REG_USCRATCH = 36 + UC_RISCV_REG_UEPC = 37 + UC_RISCV_REG_UCAUSE = 38 + UC_RISCV_REG_UTVAL = 39 + UC_RISCV_REG_UIP = 40 + UC_RISCV_REG_FFLAGS = 41 + UC_RISCV_REG_FRM = 42 + UC_RISCV_REG_FCSR = 43 + UC_RISCV_REG_CYCLE = 44 + UC_RISCV_REG_TIME = 45 + UC_RISCV_REG_INSTRET = 46 + UC_RISCV_REG_HPMCOUNTER3 = 47 + UC_RISCV_REG_HPMCOUNTER4 = 48 + UC_RISCV_REG_HPMCOUNTER5 = 49 + UC_RISCV_REG_HPMCOUNTER6 = 50 + UC_RISCV_REG_HPMCOUNTER7 = 51 + UC_RISCV_REG_HPMCOUNTER8 = 52 + UC_RISCV_REG_HPMCOUNTER9 = 53 + UC_RISCV_REG_HPMCOUNTER10 = 54 + UC_RISCV_REG_HPMCOUNTER11 = 55 + UC_RISCV_REG_HPMCOUNTER12 = 56 + UC_RISCV_REG_HPMCOUNTER13 = 57 + UC_RISCV_REG_HPMCOUNTER14 = 58 + UC_RISCV_REG_HPMCOUNTER15 = 59 + UC_RISCV_REG_HPMCOUNTER16 = 60 + UC_RISCV_REG_HPMCOUNTER17 = 61 + UC_RISCV_REG_HPMCOUNTER18 = 62 + UC_RISCV_REG_HPMCOUNTER19 = 63 + UC_RISCV_REG_HPMCOUNTER20 = 64 + UC_RISCV_REG_HPMCOUNTER21 = 65 + UC_RISCV_REG_HPMCOUNTER22 = 66 + UC_RISCV_REG_HPMCOUNTER23 = 67 + UC_RISCV_REG_HPMCOUNTER24 = 68 + UC_RISCV_REG_HPMCOUNTER25 = 69 + UC_RISCV_REG_HPMCOUNTER26 = 70 + UC_RISCV_REG_HPMCOUNTER27 = 71 + UC_RISCV_REG_HPMCOUNTER28 = 72 + UC_RISCV_REG_HPMCOUNTER29 = 73 + UC_RISCV_REG_HPMCOUNTER30 = 74 + UC_RISCV_REG_HPMCOUNTER31 = 75 + UC_RISCV_REG_CYCLEH = 76 + UC_RISCV_REG_TIMEH = 77 + UC_RISCV_REG_INSTRETH = 78 + UC_RISCV_REG_HPMCOUNTER3H = 79 + UC_RISCV_REG_HPMCOUNTER4H = 80 + UC_RISCV_REG_HPMCOUNTER5H = 81 + UC_RISCV_REG_HPMCOUNTER6H = 82 + UC_RISCV_REG_HPMCOUNTER7H = 83 + UC_RISCV_REG_HPMCOUNTER8H = 84 + UC_RISCV_REG_HPMCOUNTER9H = 85 + UC_RISCV_REG_HPMCOUNTER10H = 86 + UC_RISCV_REG_HPMCOUNTER11H = 87 + UC_RISCV_REG_HPMCOUNTER12H = 88 + UC_RISCV_REG_HPMCOUNTER13H = 89 + UC_RISCV_REG_HPMCOUNTER14H = 90 + UC_RISCV_REG_HPMCOUNTER15H = 91 + UC_RISCV_REG_HPMCOUNTER16H = 92 + UC_RISCV_REG_HPMCOUNTER17H = 93 + UC_RISCV_REG_HPMCOUNTER18H = 94 + UC_RISCV_REG_HPMCOUNTER19H = 95 + UC_RISCV_REG_HPMCOUNTER20H = 96 + UC_RISCV_REG_HPMCOUNTER21H = 97 + UC_RISCV_REG_HPMCOUNTER22H = 98 + UC_RISCV_REG_HPMCOUNTER23H = 99 + UC_RISCV_REG_HPMCOUNTER24H = 100 + UC_RISCV_REG_HPMCOUNTER25H = 101 + UC_RISCV_REG_HPMCOUNTER26H = 102 + UC_RISCV_REG_HPMCOUNTER27H = 103 + UC_RISCV_REG_HPMCOUNTER28H = 104 + UC_RISCV_REG_HPMCOUNTER29H = 105 + UC_RISCV_REG_HPMCOUNTER30H = 106 + UC_RISCV_REG_HPMCOUNTER31H = 107 + UC_RISCV_REG_MCYCLE = 108 + UC_RISCV_REG_MINSTRET = 109 + UC_RISCV_REG_MCYCLEH = 110 + UC_RISCV_REG_MINSTRETH = 111 + UC_RISCV_REG_MVENDORID = 112 + UC_RISCV_REG_MARCHID = 113 + UC_RISCV_REG_MIMPID = 114 + UC_RISCV_REG_MHARTID = 115 + UC_RISCV_REG_MSTATUS = 116 + UC_RISCV_REG_MISA = 117 + UC_RISCV_REG_MEDELEG = 118 + UC_RISCV_REG_MIDELEG = 119 + UC_RISCV_REG_MIE = 120 + UC_RISCV_REG_MTVEC = 121 + UC_RISCV_REG_MCOUNTEREN = 122 + UC_RISCV_REG_MSTATUSH = 123 + UC_RISCV_REG_MUCOUNTEREN = 124 + UC_RISCV_REG_MSCOUNTEREN = 125 + UC_RISCV_REG_MHCOUNTEREN = 126 + UC_RISCV_REG_MSCRATCH = 127 + UC_RISCV_REG_MEPC = 128 + UC_RISCV_REG_MCAUSE = 129 + UC_RISCV_REG_MTVAL = 130 + UC_RISCV_REG_MIP = 131 + UC_RISCV_REG_MBADADDR = 132 + UC_RISCV_REG_SSTATUS = 133 + UC_RISCV_REG_SEDELEG = 134 + UC_RISCV_REG_SIDELEG = 135 + UC_RISCV_REG_SIE = 136 + UC_RISCV_REG_STVEC = 137 + UC_RISCV_REG_SCOUNTEREN = 138 + UC_RISCV_REG_SSCRATCH = 139 + UC_RISCV_REG_SEPC = 140 + UC_RISCV_REG_SCAUSE = 141 + UC_RISCV_REG_STVAL = 142 + UC_RISCV_REG_SIP = 143 + UC_RISCV_REG_SBADADDR = 144 + UC_RISCV_REG_SPTBR = 145 + UC_RISCV_REG_SATP = 146 + UC_RISCV_REG_HSTATUS = 147 + UC_RISCV_REG_HEDELEG = 148 + UC_RISCV_REG_HIDELEG = 149 + UC_RISCV_REG_HIE = 150 + UC_RISCV_REG_HCOUNTEREN = 151 + UC_RISCV_REG_HTVAL = 152 + UC_RISCV_REG_HIP = 153 + UC_RISCV_REG_HTINST = 154 + UC_RISCV_REG_HGATP = 155 + UC_RISCV_REG_HTIMEDELTA = 156 + UC_RISCV_REG_HTIMEDELTAH = 157 + # Floating-point registers - UC_RISCV_REG_F0 = 33 - UC_RISCV_REG_F1 = 34 - UC_RISCV_REG_F2 = 35 - UC_RISCV_REG_F3 = 36 - UC_RISCV_REG_F4 = 37 - UC_RISCV_REG_F5 = 38 - UC_RISCV_REG_F6 = 39 - UC_RISCV_REG_F7 = 40 - UC_RISCV_REG_F8 = 41 - UC_RISCV_REG_F9 = 42 - UC_RISCV_REG_F10 = 43 - UC_RISCV_REG_F11 = 44 - UC_RISCV_REG_F12 = 45 - UC_RISCV_REG_F13 = 46 - UC_RISCV_REG_F14 = 47 - UC_RISCV_REG_F15 = 48 - UC_RISCV_REG_F16 = 49 - UC_RISCV_REG_F17 = 50 - UC_RISCV_REG_F18 = 51 - UC_RISCV_REG_F19 = 52 - UC_RISCV_REG_F20 = 53 - UC_RISCV_REG_F21 = 54 - UC_RISCV_REG_F22 = 55 - UC_RISCV_REG_F23 = 56 - UC_RISCV_REG_F24 = 57 - UC_RISCV_REG_F25 = 58 - UC_RISCV_REG_F26 = 59 - UC_RISCV_REG_F27 = 60 - UC_RISCV_REG_F28 = 61 - UC_RISCV_REG_F29 = 62 - UC_RISCV_REG_F30 = 63 - UC_RISCV_REG_F31 = 64 - UC_RISCV_REG_PC = 65 - UC_RISCV_REG_ENDING = 66 + UC_RISCV_REG_F0 = 158 + UC_RISCV_REG_F1 = 159 + UC_RISCV_REG_F2 = 160 + UC_RISCV_REG_F3 = 161 + UC_RISCV_REG_F4 = 162 + UC_RISCV_REG_F5 = 163 + UC_RISCV_REG_F6 = 164 + UC_RISCV_REG_F7 = 165 + UC_RISCV_REG_F8 = 166 + UC_RISCV_REG_F9 = 167 + UC_RISCV_REG_F10 = 168 + UC_RISCV_REG_F11 = 169 + UC_RISCV_REG_F12 = 170 + UC_RISCV_REG_F13 = 171 + UC_RISCV_REG_F14 = 172 + UC_RISCV_REG_F15 = 173 + UC_RISCV_REG_F16 = 174 + UC_RISCV_REG_F17 = 175 + UC_RISCV_REG_F18 = 176 + UC_RISCV_REG_F19 = 177 + UC_RISCV_REG_F20 = 178 + UC_RISCV_REG_F21 = 179 + UC_RISCV_REG_F22 = 180 + UC_RISCV_REG_F23 = 181 + UC_RISCV_REG_F24 = 182 + UC_RISCV_REG_F25 = 183 + UC_RISCV_REG_F26 = 184 + UC_RISCV_REG_F27 = 185 + UC_RISCV_REG_F28 = 186 + UC_RISCV_REG_F29 = 187 + UC_RISCV_REG_F30 = 188 + UC_RISCV_REG_F31 = 189 + UC_RISCV_REG_PC = 190 + UC_RISCV_REG_ENDING = 191 # Alias registers UC_RISCV_REG_ZERO = 1 @@ -124,36 +251,36 @@ module UnicornEngine UC_RISCV_REG_T4 = 30 UC_RISCV_REG_T5 = 31 UC_RISCV_REG_T6 = 32 - UC_RISCV_REG_FT0 = 33 - UC_RISCV_REG_FT1 = 34 - UC_RISCV_REG_FT2 = 35 - UC_RISCV_REG_FT3 = 36 - UC_RISCV_REG_FT4 = 37 - UC_RISCV_REG_FT5 = 38 - UC_RISCV_REG_FT6 = 39 - UC_RISCV_REG_FT7 = 40 - UC_RISCV_REG_FS0 = 41 - UC_RISCV_REG_FS1 = 42 - UC_RISCV_REG_FA0 = 43 - UC_RISCV_REG_FA1 = 44 - UC_RISCV_REG_FA2 = 45 - UC_RISCV_REG_FA3 = 46 - UC_RISCV_REG_FA4 = 47 - UC_RISCV_REG_FA5 = 48 - UC_RISCV_REG_FA6 = 49 - UC_RISCV_REG_FA7 = 50 - UC_RISCV_REG_FS2 = 51 - UC_RISCV_REG_FS3 = 52 - UC_RISCV_REG_FS4 = 53 - UC_RISCV_REG_FS5 = 54 - UC_RISCV_REG_FS6 = 55 - UC_RISCV_REG_FS7 = 56 - UC_RISCV_REG_FS8 = 57 - UC_RISCV_REG_FS9 = 58 - UC_RISCV_REG_FS10 = 59 - UC_RISCV_REG_FS11 = 60 - UC_RISCV_REG_FT8 = 61 - UC_RISCV_REG_FT9 = 62 - UC_RISCV_REG_FT10 = 63 - UC_RISCV_REG_FT11 = 64 + UC_RISCV_REG_FT0 = 158 + UC_RISCV_REG_FT1 = 159 + UC_RISCV_REG_FT2 = 160 + UC_RISCV_REG_FT3 = 161 + UC_RISCV_REG_FT4 = 162 + UC_RISCV_REG_FT5 = 163 + UC_RISCV_REG_FT6 = 164 + UC_RISCV_REG_FT7 = 165 + UC_RISCV_REG_FS0 = 166 + UC_RISCV_REG_FS1 = 167 + UC_RISCV_REG_FA0 = 168 + UC_RISCV_REG_FA1 = 169 + UC_RISCV_REG_FA2 = 170 + UC_RISCV_REG_FA3 = 171 + UC_RISCV_REG_FA4 = 172 + UC_RISCV_REG_FA5 = 173 + UC_RISCV_REG_FA6 = 174 + UC_RISCV_REG_FA7 = 175 + UC_RISCV_REG_FS2 = 176 + UC_RISCV_REG_FS3 = 177 + UC_RISCV_REG_FS4 = 178 + UC_RISCV_REG_FS5 = 179 + UC_RISCV_REG_FS6 = 180 + UC_RISCV_REG_FS7 = 181 + UC_RISCV_REG_FS8 = 182 + UC_RISCV_REG_FS9 = 183 + UC_RISCV_REG_FS10 = 184 + UC_RISCV_REG_FS11 = 185 + UC_RISCV_REG_FT8 = 186 + UC_RISCV_REG_FT9 = 187 + UC_RISCV_REG_FT10 = 188 + UC_RISCV_REG_FT11 = 189 end \ No newline at end of file diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb new file mode 100644 index 00000000..e089c090 --- /dev/null +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb @@ -0,0 +1,123 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [s390x_const.rb] + +module UnicornEngine + +# S390X CPU + + UC_CPU_S390X_Z900 = 0 + UC_CPU_S390X_Z900_2 = 1 + UC_CPU_S390X_Z900_3 = 2 + UC_CPU_S390X_Z800 = 3 + UC_CPU_S390X_Z990 = 4 + UC_CPU_S390X_Z990_2 = 5 + UC_CPU_S390X_Z990_3 = 6 + UC_CPU_S390X_Z890 = 7 + UC_CPU_S390X_Z990_4 = 8 + UC_CPU_S390X_Z890_2 = 9 + UC_CPU_S390X_Z990_5 = 10 + UC_CPU_S390X_Z890_3 = 11 + UC_CPU_S390X_Z9EC = 12 + UC_CPU_S390X_Z9EC_2 = 13 + UC_CPU_S390X_Z9BC = 14 + UC_CPU_S390X_Z9EC_3 = 15 + UC_CPU_S390X_Z9BC_2 = 16 + UC_CPU_S390X_Z10EC = 17 + UC_CPU_S390X_Z10EC_2 = 18 + UC_CPU_S390X_Z10BC = 19 + UC_CPU_S390X_Z10EC_3 = 20 + UC_CPU_S390X_Z10BC_2 = 21 + UC_CPU_S390X_Z196 = 22 + UC_CPU_S390X_Z196_2 = 23 + UC_CPU_S390X_Z114 = 24 + UC_CPU_S390X_ZEC12 = 25 + UC_CPU_S390X_ZEC12_2 = 26 + UC_CPU_S390X_ZBC12 = 27 + UC_CPU_S390X_Z13 = 28 + UC_CPU_S390X_Z13_2 = 29 + UC_CPU_S390X_Z13S = 30 + UC_CPU_S390X_Z14 = 31 + UC_CPU_S390X_Z14_2 = 32 + UC_CPU_S390X_Z14ZR1 = 33 + UC_CPU_S390X_GEN15A = 34 + UC_CPU_S390X_GEN15B = 35 + UC_CPU_S390X_QEMU = 36 + UC_CPU_S390X_MAX = 37 + +# S390X registers + + UC_S390X_REG_INVALID = 0 + +# General purpose registers + UC_S390X_REG_R0 = 1 + UC_S390X_REG_R1 = 2 + UC_S390X_REG_R2 = 3 + UC_S390X_REG_R3 = 4 + UC_S390X_REG_R4 = 5 + UC_S390X_REG_R5 = 6 + UC_S390X_REG_R6 = 7 + UC_S390X_REG_R7 = 8 + UC_S390X_REG_R8 = 9 + UC_S390X_REG_R9 = 10 + UC_S390X_REG_R10 = 11 + UC_S390X_REG_R11 = 12 + UC_S390X_REG_R12 = 13 + UC_S390X_REG_R13 = 14 + UC_S390X_REG_R14 = 15 + UC_S390X_REG_R15 = 16 + +# Floating point registers + UC_S390X_REG_F0 = 17 + UC_S390X_REG_F1 = 18 + UC_S390X_REG_F2 = 19 + UC_S390X_REG_F3 = 20 + UC_S390X_REG_F4 = 21 + UC_S390X_REG_F5 = 22 + UC_S390X_REG_F6 = 23 + UC_S390X_REG_F7 = 24 + UC_S390X_REG_F8 = 25 + UC_S390X_REG_F9 = 26 + UC_S390X_REG_F10 = 27 + UC_S390X_REG_F11 = 28 + UC_S390X_REG_F12 = 29 + UC_S390X_REG_F13 = 30 + UC_S390X_REG_F14 = 31 + UC_S390X_REG_F15 = 32 + UC_S390X_REG_F16 = 33 + UC_S390X_REG_F17 = 34 + UC_S390X_REG_F18 = 35 + UC_S390X_REG_F19 = 36 + UC_S390X_REG_F20 = 37 + UC_S390X_REG_F21 = 38 + UC_S390X_REG_F22 = 39 + UC_S390X_REG_F23 = 40 + UC_S390X_REG_F24 = 41 + UC_S390X_REG_F25 = 42 + UC_S390X_REG_F26 = 43 + UC_S390X_REG_F27 = 44 + UC_S390X_REG_F28 = 45 + UC_S390X_REG_F29 = 46 + UC_S390X_REG_F30 = 47 + UC_S390X_REG_F31 = 48 + +# Access registers + UC_S390X_REG_A0 = 49 + UC_S390X_REG_A1 = 50 + UC_S390X_REG_A2 = 51 + UC_S390X_REG_A3 = 52 + UC_S390X_REG_A4 = 53 + UC_S390X_REG_A5 = 54 + UC_S390X_REG_A6 = 55 + UC_S390X_REG_A7 = 56 + UC_S390X_REG_A8 = 57 + UC_S390X_REG_A9 = 58 + UC_S390X_REG_A10 = 59 + UC_S390X_REG_A11 = 60 + UC_S390X_REG_A12 = 61 + UC_S390X_REG_A13 = 62 + UC_S390X_REG_A14 = 63 + UC_S390X_REG_A15 = 64 + UC_S390X_REG_PC = 65 + UC_S390X_REG_ENDING = 66 + +# Alias registers +end \ No newline at end of file diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb index 6c22c744..e12c7be4 100644 --- a/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb @@ -4,11 +4,15 @@ module UnicornEngine UC_API_MAJOR = 2 UC_API_MINOR = 0 + + UC_API_PATCH = 0 + UC_API_EXTRA = 5 UC_VERSION_MAJOR = 2 UC_VERSION_MINOR = 0 - UC_VERSION_EXTRA = 0 + UC_VERSION_PATCH = 0 + UC_VERSION_EXTRA = 5 UC_SECOND_SCALE = 1000000 UC_MILISECOND_SCALE = 1000 UC_ARCH_ARM = 1 @@ -19,7 +23,8 @@ module UnicornEngine UC_ARCH_SPARC = 6 UC_ARCH_M68K = 7 UC_ARCH_RISCV = 8 - UC_ARCH_MAX = 9 + UC_ARCH_S390X = 9 + UC_ARCH_MAX = 10 UC_MODE_LITTLE_ENDIAN = 0 UC_MODE_BIG_ENDIAN = 1073741824 From 5bb40c5fafb03dbdf50253c256e94b1576792fcc Mon Sep 17 00:00:00 2001 From: Nguyen Anh Quynh Date: Sat, 1 Jan 2022 10:15:18 +0800 Subject: [PATCH 30/38] s390x: cleanup CMakeLists.txt --- CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4fc88cd8..d1d9d8a7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -986,7 +986,6 @@ add_library(s390x-softmmu qemu/target/s390x/sigp.c qemu/target/s390x/tcg-stub.c qemu/target/s390x/translate.c - # qemu/target/s390x/translate_vx.inc.c qemu/target/s390x/vec_fpu_helper.c qemu/target/s390x/vec_helper.c qemu/target/s390x/vec_int_helper.c From 441afe17e6ba0e68e603667d14b1913ab1c7cf63 Mon Sep 17 00:00:00 2001 From: lazymio Date: Mon, 10 Jan 2022 15:34:04 +0100 Subject: [PATCH 31/38] Add psw.mask register --- include/unicorn/s390x.h | 1 + qemu/target/s390x/unicorn.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/include/unicorn/s390x.h b/include/unicorn/s390x.h index 68bb0210..02e53ae0 100644 --- a/include/unicorn/s390x.h +++ b/include/unicorn/s390x.h @@ -128,6 +128,7 @@ typedef enum uc_s390x_reg { UC_S390X_REG_A15, UC_S390X_REG_PC, // PC register + UC_S390X_REG_PSWM, UC_S390X_REG_ENDING, // <-- mark the end of the list or registers diff --git a/qemu/target/s390x/unicorn.c b/qemu/target/s390x/unicorn.c index b53a1e07..81d64e0c 100644 --- a/qemu/target/s390x/unicorn.c +++ b/qemu/target/s390x/unicorn.c @@ -6,6 +6,7 @@ #include "unicorn_common.h" #include "uc_priv.h" #include "unicorn.h" +#include "internal.h" S390CPU *cpu_s390_init(struct uc_struct *uc, const char *cpu_model); @@ -65,6 +66,9 @@ static void reg_read(CPUS390XState *env, unsigned int regid, void *value) case UC_S390X_REG_PC: *(uint64_t *)value = env->psw.addr; break; + case UC_S390X_REG_PSWM: + *(uint64_t *)value = get_psw_mask(env); + break; } } @@ -86,6 +90,10 @@ static void reg_write(CPUS390XState *env, unsigned int regid, const void *value) case UC_S390X_REG_PC: env->psw.addr = *(uint64_t *)value; break; + case UC_S390X_REG_PSWM: + env->psw.mask = *(uint64_t *)value; + env->cc_op = (env->psw.mask >> 44) & 3; + break; } } From 980eae7f442d050c02adec62b56c16ee2d09e0d6 Mon Sep 17 00:00:00 2001 From: lazymio Date: Mon, 10 Jan 2022 15:45:56 +0100 Subject: [PATCH 32/38] Sync PC at the end of emulation --- qemu/target/s390x/translate.c | 1 + tests/unit/test_s390x.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c index 993f2150..39b9821f 100644 --- a/qemu/target/s390x/translate.c +++ b/qemu/target/s390x/translate.c @@ -6896,6 +6896,7 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) switch (dc->base.is_jmp) { case DISAS_UNICORN_HALT: tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, 0, 0); + update_psw_addr(dc); gen_helper_uc_s390x_exit(tcg_ctx, tcg_ctx->cpu_env); break; case DISAS_GOTO_TB: diff --git a/tests/unit/test_s390x.c b/tests/unit/test_s390x.c index 98d8c3ed..e3337235 100644 --- a/tests/unit/test_s390x.c +++ b/tests/unit/test_s390x.c @@ -14,7 +14,7 @@ static void uc_common_setup(uc_engine **uc, uc_arch arch, uc_mode mode, static void test_s390x_lr() { char code[] = "\x18\x23"; // lr %r2, %r3 - uint64_t r_r2, r_r3 = 0x114514; + uint64_t r_pc, r_r2, r_r3 = 0x114514; uc_engine *uc; uc_common_setup(&uc, UC_ARCH_S390X, UC_MODE_BIG_ENDIAN, code, @@ -25,8 +25,10 @@ static void test_s390x_lr() OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0)); OK(uc_reg_read(uc, UC_S390X_REG_R2, &r_r2)); + OK(uc_reg_read(uc, UC_S390X_REG_PC, &r_pc)); TEST_CHECK(r_r2 == 0x114514); + TEST_CHECK(r_pc == code_start + sizeof(code) - 1); OK(uc_close(uc)); } From 9ac796531abdf52ea82b4574ebacbb50ff8d17f8 Mon Sep 17 00:00:00 2001 From: lazymio Date: Mon, 10 Jan 2022 19:18:52 +0100 Subject: [PATCH 33/38] Don't cache S390SkeyState and S390SkeysClass --- qemu/hw/s390x/s390-skeys.c | 7 ------- qemu/include/hw/s390x/storage-keys.h | 2 -- qemu/target/s390x/mem_helper.c | 27 ++++++--------------------- qemu/target/s390x/mmu_helper.c | 11 +++-------- 4 files changed, 9 insertions(+), 38 deletions(-) diff --git a/qemu/hw/s390x/s390-skeys.c b/qemu/hw/s390x/s390-skeys.c index 04985d7b..1aa538c4 100644 --- a/qemu/hw/s390x/s390-skeys.c +++ b/qemu/hw/s390x/s390-skeys.c @@ -24,13 +24,6 @@ static void qemu_s390_skeys_class_init(uc_engine *uc, S390SKeysClass* skeyclass) static void s390_skeys_instance_init(uc_engine *uc, S390SKeysState* ss); static void qemu_s390_skeys_init(uc_engine *uc, QEMUS390SKeysState *skey); -S390SKeysState *s390_get_skeys_device(uc_engine *uc) -{ - S390CPU *cpu = S390_CPU(uc->cpu); - - return (S390SKeysState*)&cpu->ss; -} - void s390_skeys_init(uc_engine *uc) { S390CPU *cpu = S390_CPU(uc->cpu); diff --git a/qemu/include/hw/s390x/storage-keys.h b/qemu/include/hw/s390x/storage-keys.h index 1b2e819c..97473043 100644 --- a/qemu/include/hw/s390x/storage-keys.h +++ b/qemu/include/hw/s390x/storage-keys.h @@ -66,6 +66,4 @@ typedef struct QEMUS390SKeysState { void s390_skeys_init(uc_engine *uc); -S390SKeysState *s390_get_skeys_device(uc_engine *uc); - #endif /* S390_STORAGE_KEYS_H */ diff --git a/qemu/target/s390x/mem_helper.c b/qemu/target/s390x/mem_helper.c index 206410ae..7058ae45 100644 --- a/qemu/target/s390x/mem_helper.c +++ b/qemu/target/s390x/mem_helper.c @@ -2061,8 +2061,8 @@ uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) /* insert storage key extended */ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) { - static S390SKeysState *ss; - static S390SKeysClass *skeyclass; + S390SKeysState *ss = (S390SKeysState *)(&((S390CPU *)env->uc->cpu)->ss); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint64_t addr = wrap_address(env, r2); uint8_t key; @@ -2072,11 +2072,6 @@ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) } #endif - if (unlikely(!ss)) { - ss = s390_get_skeys_device(env->uc); - skeyclass = S390_SKEYS_GET_CLASS(ss); - } - if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) { return 0; } @@ -2086,8 +2081,8 @@ uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) /* set storage key extended */ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) { - static S390SKeysState *ss; - static S390SKeysClass *skeyclass; + S390SKeysState *ss = (S390SKeysState *)(&((S390CPU *)env->uc->cpu)->ss); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint64_t addr = wrap_address(env, r2); uint8_t key; @@ -2097,11 +2092,6 @@ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) } #endif - if (unlikely(!ss)) { - ss = s390_get_skeys_device(env->uc); - skeyclass = S390_SKEYS_GET_CLASS(ss); - } - key = (uint8_t) r1; skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); /* @@ -2114,8 +2104,8 @@ void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) /* reset reference bit extended */ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) { - static S390SKeysState *ss; - static S390SKeysClass *skeyclass; + S390SKeysState *ss = (S390SKeysState *)(&((S390CPU *)env->uc->cpu)->ss); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint8_t re, key; #if 0 @@ -2124,11 +2114,6 @@ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) } #endif - if (unlikely(!ss)) { - ss = s390_get_skeys_device(env->uc); - skeyclass = S390_SKEYS_GET_CLASS(ss); - } - if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { return 0; } diff --git a/qemu/target/s390x/mmu_helper.c b/qemu/target/s390x/mmu_helper.c index 4a6bef89..753f71df 100644 --- a/qemu/target/s390x/mmu_helper.c +++ b/qemu/target/s390x/mmu_helper.c @@ -283,9 +283,9 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, } static void mmu_handle_skey(uc_engine *uc, target_ulong addr, int rw, int *flags) -{ - static S390SKeysClass *skeyclass; - static S390SKeysState *ss; +{ + S390SKeysState *ss = (S390SKeysState *)(&((S390CPU *)uc->cpu)->ss); + S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss); uint8_t key; int rc; @@ -295,11 +295,6 @@ static void mmu_handle_skey(uc_engine *uc, target_ulong addr, int rw, int *flags } #endif - if (unlikely(!ss)) { - ss = s390_get_skeys_device(uc); - skeyclass = S390_SKEYS_GET_CLASS(ss); - } - /* * Whenever we create a new TLB entry, we set the storage key reference * bit. In case we allow write accesses, we set the storage key change From 33afdcf87244553c67f46c64ca6aabb980e3cc08 Mon Sep 17 00:00:00 2001 From: lazymio Date: Mon, 10 Jan 2022 21:48:03 +0100 Subject: [PATCH 34/38] Save CC at the end of emulation --- qemu/target/s390x/translate.c | 1 + 1 file changed, 1 insertion(+) diff --git a/qemu/target/s390x/translate.c b/qemu/target/s390x/translate.c index 39b9821f..987890cd 100644 --- a/qemu/target/s390x/translate.c +++ b/qemu/target/s390x/translate.c @@ -6897,6 +6897,7 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) case DISAS_UNICORN_HALT: tcg_gen_insn_start(tcg_ctx, dc->base.pc_next, 0, 0); update_psw_addr(dc); + update_cc_op(dc); gen_helper_uc_s390x_exit(tcg_ctx, tcg_ctx->cpu_env); break; case DISAS_GOTO_TB: From 4f1aeb83caeceaf07686fe518e1f12e1ad7077f5 Mon Sep 17 00:00:00 2001 From: mio Date: Tue, 18 Jan 2022 21:16:01 +0100 Subject: [PATCH 35/38] Add fuzz_emu_s390x_be.c --- CMakeLists.txt | 2 +- tests/fuzz/fuzz_emu_s390x_be.c | 56 ++++++++++++++++++++++++++++++++++ tests/fuzz/gentargets.sh | 2 ++ 3 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 tests/fuzz/fuzz_emu_s390x_be.c diff --git a/CMakeLists.txt b/CMakeLists.txt index dbd652ff..0cb138cc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1319,7 +1319,7 @@ endif() if(UNICORN_FUZZ) - set(UNICORN_FUZZ_SUFFIX "arm_arm;arm_armbe;arm_thumb;arm64_arm;arm64_armbe;m68k_be;mips_32be;mips_32le;sparc_32be;x86_16;x86_32;x86_64;s390x") + set(UNICORN_FUZZ_SUFFIX "arm_arm;arm_armbe;arm_thumb;arm64_arm;arm64_armbe;m68k_be;mips_32be;mips_32le;sparc_32be;x86_16;x86_32;x86_64;s390x_be") set(SAMPLES_LIB ${SAMPLES_LIB} rt) foreach(SUFFIX ${UNICORN_FUZZ_SUFFIX}) add_executable(fuzz_emu_${SUFFIX} diff --git a/tests/fuzz/fuzz_emu_s390x_be.c b/tests/fuzz/fuzz_emu_s390x_be.c new file mode 100644 index 00000000..88d4873a --- /dev/null +++ b/tests/fuzz/fuzz_emu_s390x_be.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_S390X, UC_MODE_BIG_ENDIAN, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/tests/fuzz/gentargets.sh b/tests/fuzz/gentargets.sh index 92385051..ac53ace8 100644 --- a/tests/fuzz/gentargets.sh +++ b/tests/fuzz/gentargets.sh @@ -19,3 +19,5 @@ sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_A sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_THUMB/' > fuzz_emu_arm_thumb.c sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM + UC_MODE_BIG_ENDIAN/' > fuzz_emu_arm_armbe.c #sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_THUMB + UC_MODE_BIG_ENDIAN/' > fuzz_emu_arm_thumbbe.c + +sed 's/UC_ARCH_X86/UC_ARCH_S390X/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_BIG_ENDIAN/' > fuzz_emu_s390x_be.c \ No newline at end of file From f57467e7ed29b0daeb43f5d76c6d424a658ab1d1 Mon Sep 17 00:00:00 2001 From: mio Date: Wed, 19 Jan 2022 20:10:09 +0100 Subject: [PATCH 36/38] Generate bindings --- bindings/dotnet/UnicornManaged/Const/S390x.fs | 3 ++- bindings/go/unicorn/s390x_const.go | 3 ++- bindings/java/unicorn/S390xConst.java | 3 ++- bindings/pascal/unicorn/S390xConst.pas | 3 ++- bindings/python/unicorn/s390x_const.py | 3 ++- bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb | 3 ++- 6 files changed, 12 insertions(+), 6 deletions(-) diff --git a/bindings/dotnet/UnicornManaged/Const/S390x.fs b/bindings/dotnet/UnicornManaged/Const/S390x.fs index 615a4241..405c5dc7 100644 --- a/bindings/dotnet/UnicornManaged/Const/S390x.fs +++ b/bindings/dotnet/UnicornManaged/Const/S390x.fs @@ -122,7 +122,8 @@ module S390x = let UC_S390X_REG_A14 = 63 let UC_S390X_REG_A15 = 64 let UC_S390X_REG_PC = 65 - let UC_S390X_REG_ENDING = 66 + let UC_S390X_REG_PSWM = 66 + let UC_S390X_REG_ENDING = 67 // Alias registers diff --git a/bindings/go/unicorn/s390x_const.go b/bindings/go/unicorn/s390x_const.go index 32abb2fe..e3802478 100644 --- a/bindings/go/unicorn/s390x_const.go +++ b/bindings/go/unicorn/s390x_const.go @@ -117,7 +117,8 @@ const ( S390X_REG_A14 = 63 S390X_REG_A15 = 64 S390X_REG_PC = 65 - S390X_REG_ENDING = 66 + S390X_REG_PSWM = 66 + S390X_REG_ENDING = 67 // Alias registers ) \ No newline at end of file diff --git a/bindings/java/unicorn/S390xConst.java b/bindings/java/unicorn/S390xConst.java index cc2b2a1b..8d9a343d 100644 --- a/bindings/java/unicorn/S390xConst.java +++ b/bindings/java/unicorn/S390xConst.java @@ -119,7 +119,8 @@ public interface S390xConst { public static final int UC_S390X_REG_A14 = 63; public static final int UC_S390X_REG_A15 = 64; public static final int UC_S390X_REG_PC = 65; - public static final int UC_S390X_REG_ENDING = 66; + public static final int UC_S390X_REG_PSWM = 66; + public static final int UC_S390X_REG_ENDING = 67; // Alias registers diff --git a/bindings/pascal/unicorn/S390xConst.pas b/bindings/pascal/unicorn/S390xConst.pas index 8041a19b..6ac09f19 100644 --- a/bindings/pascal/unicorn/S390xConst.pas +++ b/bindings/pascal/unicorn/S390xConst.pas @@ -120,7 +120,8 @@ const UC_S390X_REG_A14 = 63; UC_S390X_REG_A15 = 64; UC_S390X_REG_PC = 65; - UC_S390X_REG_ENDING = 66; + UC_S390X_REG_PSWM = 66; + UC_S390X_REG_ENDING = 67; // Alias registers diff --git a/bindings/python/unicorn/s390x_const.py b/bindings/python/unicorn/s390x_const.py index ec96ae66..a75a0158 100644 --- a/bindings/python/unicorn/s390x_const.py +++ b/bindings/python/unicorn/s390x_const.py @@ -115,6 +115,7 @@ UC_S390X_REG_A13 = 62 UC_S390X_REG_A14 = 63 UC_S390X_REG_A15 = 64 UC_S390X_REG_PC = 65 -UC_S390X_REG_ENDING = 66 +UC_S390X_REG_PSWM = 66 +UC_S390X_REG_ENDING = 67 # Alias registers diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb index e089c090..8851026c 100644 --- a/bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/s390x_const.rb @@ -117,7 +117,8 @@ module UnicornEngine UC_S390X_REG_A14 = 63 UC_S390X_REG_A15 = 64 UC_S390X_REG_PC = 65 - UC_S390X_REG_ENDING = 66 + UC_S390X_REG_PSWM = 66 + UC_S390X_REG_ENDING = 67 # Alias registers end \ No newline at end of file From 67c437d8b8ffd77ad450f64977c3d8da03d8a30f Mon Sep 17 00:00:00 2001 From: mio Date: Wed, 19 Jan 2022 22:02:26 +0100 Subject: [PATCH 37/38] Enable s390x in default arch This was overriden in a previous PR --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index df89060d..055c3ca9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,7 +27,7 @@ option(BUILD_SHARED_LIBS "Build shared instead of static library" ${PROJECT_IS_T option(UNICORN_FUZZ "Enable fuzzing" OFF) option(UNICORN_BUILD_TESTS "Build unicorn tests" ${PROJECT_IS_TOP_LEVEL}) option(UNICORN_INSTALL "Enable unicorn installation" ${PROJECT_IS_TOP_LEVEL}) -set(UNICORN_ARCH "x86;arm;aarch64;riscv;mips;sparc;m68k;ppc" CACHE STRING "Enabled unicorn architectures") +set(UNICORN_ARCH "x86;arm;aarch64;riscv;mips;sparc;m68k;ppc;s390x" CACHE STRING "Enabled unicorn architectures") option(UNICORN_TRACER "Trace unicorn execution" OFF) foreach(ARCH_LOOP ${UNICORN_ARCH}) From 5a592c753e4bbae13ee98ab41f341e6d85e747cd Mon Sep 17 00:00:00 2001 From: mio Date: Wed, 19 Jan 2022 22:32:00 +0100 Subject: [PATCH 38/38] Set s390x-softmmu to STATIC --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 055c3ca9..7f235f54 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1053,7 +1053,7 @@ endif() endif() if (UNICORN_HAS_S390X) -add_library(s390x-softmmu +add_library(s390x-softmmu STATIC ${UNICORN_ARCH_COMMON} qemu/hw/s390x/s390-skeys.c